clang 22.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/IR/Assumptions.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/InlineAsm.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/MDBuilder.h"
37#include "llvm/Support/SaveAndRestore.h"
38#include <optional>
39
40using namespace clang;
41using namespace CodeGen;
42
43//===----------------------------------------------------------------------===//
44// Statement Emission
45//===----------------------------------------------------------------------===//
46
47namespace llvm {
48extern cl::opt<bool> EnableSingleByteCoverage;
49} // namespace llvm
50
52 if (CGDebugInfo *DI = getDebugInfo()) {
54 Loc = S->getBeginLoc();
55 DI->EmitLocation(Builder, Loc);
56
57 LastStopPoint = Loc;
58 }
59}
60
62 assert(S && "Null statement?");
63 PGO->setCurrentStmt(S);
64
65 // These statements have their own debug info handling.
66 if (EmitSimpleStmt(S, Attrs))
67 return;
68
69 // Check if we are generating unreachable code.
70 if (!HaveInsertPoint()) {
71 // If so, and the statement doesn't contain a label, then we do not need to
72 // generate actual code. This is safe because (1) the current point is
73 // unreachable, so we don't need to execute the code, and (2) we've already
74 // handled the statements which update internal data structures (like the
75 // local variable map) which could be used by subsequent statements.
76 if (!ContainsLabel(S)) {
77 // Verify that any decl statements were handled as simple, they may be in
78 // scope of subsequent reachable statements.
79 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
80 PGO->markStmtMaybeUsed(S);
81 return;
82 }
83
84 // Otherwise, make a new block to hold the code.
86 }
87
88 // Generate a stoppoint if we are emitting debug info.
90
91 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
92 // enabled.
93 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
94 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
96 return;
97 }
98 }
99
100 switch (S->getStmtClass()) {
102 case Stmt::CXXCatchStmtClass:
103 case Stmt::SEHExceptStmtClass:
104 case Stmt::SEHFinallyStmtClass:
105 case Stmt::MSDependentExistsStmtClass:
106 llvm_unreachable("invalid statement class to emit generically");
107 case Stmt::NullStmtClass:
108 case Stmt::CompoundStmtClass:
109 case Stmt::DeclStmtClass:
110 case Stmt::LabelStmtClass:
111 case Stmt::AttributedStmtClass:
112 case Stmt::GotoStmtClass:
113 case Stmt::BreakStmtClass:
114 case Stmt::ContinueStmtClass:
115 case Stmt::DefaultStmtClass:
116 case Stmt::CaseStmtClass:
117 case Stmt::SEHLeaveStmtClass:
118 case Stmt::SYCLKernelCallStmtClass:
119 llvm_unreachable("should have emitted these statements as simple");
120
121#define STMT(Type, Base)
122#define ABSTRACT_STMT(Op)
123#define EXPR(Type, Base) \
124 case Stmt::Type##Class:
125#include "clang/AST/StmtNodes.inc"
126 {
127 // Remember the block we came in on.
128 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
129 assert(incoming && "expression emission must have an insertion point");
130
132
133 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
134 assert(outgoing && "expression emission cleared block!");
135
136 // The expression emitters assume (reasonably!) that the insertion
137 // point is always set. To maintain that, the call-emission code
138 // for noreturn functions has to enter a new block with no
139 // predecessors. We want to kill that block and mark the current
140 // insertion point unreachable in the common case of a call like
141 // "exit();". Since expression emission doesn't otherwise create
142 // blocks with no predecessors, we can just test for that.
143 // However, we must be careful not to do this to our incoming
144 // block, because *statement* emission does sometimes create
145 // reachable blocks which will have no predecessors until later in
146 // the function. This occurs with, e.g., labels that are not
147 // reachable by fallthrough.
148 if (incoming != outgoing && outgoing->use_empty()) {
149 outgoing->eraseFromParent();
150 Builder.ClearInsertionPoint();
151 }
152 break;
153 }
154
155 case Stmt::IndirectGotoStmtClass:
157
158 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
159 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
160 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
161 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
162
163 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
164
165 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
166 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
167 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
168 case Stmt::CoroutineBodyStmtClass:
170 break;
171 case Stmt::CoreturnStmtClass:
173 break;
174 case Stmt::CapturedStmtClass: {
175 const CapturedStmt *CS = cast<CapturedStmt>(S);
177 }
178 break;
179 case Stmt::ObjCAtTryStmtClass:
181 break;
182 case Stmt::ObjCAtCatchStmtClass:
183 llvm_unreachable(
184 "@catch statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtFinallyStmtClass:
186 llvm_unreachable(
187 "@finally statements should be handled by EmitObjCAtTryStmt");
188 case Stmt::ObjCAtThrowStmtClass:
190 break;
191 case Stmt::ObjCAtSynchronizedStmtClass:
193 break;
194 case Stmt::ObjCForCollectionStmtClass:
196 break;
197 case Stmt::ObjCAutoreleasePoolStmtClass:
199 break;
200
201 case Stmt::CXXTryStmtClass:
203 break;
204 case Stmt::CXXForRangeStmtClass:
206 break;
207 case Stmt::SEHTryStmtClass:
209 break;
210 case Stmt::OMPMetaDirectiveClass:
212 break;
213 case Stmt::OMPCanonicalLoopClass:
215 break;
216 case Stmt::OMPParallelDirectiveClass:
218 break;
219 case Stmt::OMPSimdDirectiveClass:
221 break;
222 case Stmt::OMPTileDirectiveClass:
224 break;
225 case Stmt::OMPStripeDirectiveClass:
227 break;
228 case Stmt::OMPUnrollDirectiveClass:
230 break;
231 case Stmt::OMPReverseDirectiveClass:
233 break;
234 case Stmt::OMPInterchangeDirectiveClass:
236 break;
237 case Stmt::OMPFuseDirectiveClass:
239 break;
240 case Stmt::OMPForDirectiveClass:
242 break;
243 case Stmt::OMPForSimdDirectiveClass:
245 break;
246 case Stmt::OMPSectionsDirectiveClass:
248 break;
249 case Stmt::OMPSectionDirectiveClass:
251 break;
252 case Stmt::OMPSingleDirectiveClass:
254 break;
255 case Stmt::OMPMasterDirectiveClass:
257 break;
258 case Stmt::OMPCriticalDirectiveClass:
260 break;
261 case Stmt::OMPParallelForDirectiveClass:
263 break;
264 case Stmt::OMPParallelForSimdDirectiveClass:
266 break;
267 case Stmt::OMPParallelMasterDirectiveClass:
269 break;
270 case Stmt::OMPParallelSectionsDirectiveClass:
272 break;
273 case Stmt::OMPTaskDirectiveClass:
275 break;
276 case Stmt::OMPTaskyieldDirectiveClass:
278 break;
279 case Stmt::OMPErrorDirectiveClass:
281 break;
282 case Stmt::OMPBarrierDirectiveClass:
284 break;
285 case Stmt::OMPTaskwaitDirectiveClass:
287 break;
288 case Stmt::OMPTaskgroupDirectiveClass:
290 break;
291 case Stmt::OMPFlushDirectiveClass:
293 break;
294 case Stmt::OMPDepobjDirectiveClass:
296 break;
297 case Stmt::OMPScanDirectiveClass:
299 break;
300 case Stmt::OMPOrderedDirectiveClass:
302 break;
303 case Stmt::OMPAtomicDirectiveClass:
305 break;
306 case Stmt::OMPTargetDirectiveClass:
308 break;
309 case Stmt::OMPTeamsDirectiveClass:
311 break;
312 case Stmt::OMPCancellationPointDirectiveClass:
314 break;
315 case Stmt::OMPCancelDirectiveClass:
317 break;
318 case Stmt::OMPTargetDataDirectiveClass:
320 break;
321 case Stmt::OMPTargetEnterDataDirectiveClass:
323 break;
324 case Stmt::OMPTargetExitDataDirectiveClass:
326 break;
327 case Stmt::OMPTargetParallelDirectiveClass:
329 break;
330 case Stmt::OMPTargetParallelForDirectiveClass:
332 break;
333 case Stmt::OMPTaskLoopDirectiveClass:
335 break;
336 case Stmt::OMPTaskLoopSimdDirectiveClass:
338 break;
339 case Stmt::OMPMasterTaskLoopDirectiveClass:
341 break;
342 case Stmt::OMPMaskedTaskLoopDirectiveClass:
344 break;
345 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
348 break;
349 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
352 break;
353 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
356 break;
357 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
360 break;
361 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
364 break;
365 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
368 break;
369 case Stmt::OMPDistributeDirectiveClass:
371 break;
372 case Stmt::OMPTargetUpdateDirectiveClass:
374 break;
375 case Stmt::OMPDistributeParallelForDirectiveClass:
378 break;
379 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
382 break;
383 case Stmt::OMPDistributeSimdDirectiveClass:
385 break;
386 case Stmt::OMPTargetParallelForSimdDirectiveClass:
389 break;
390 case Stmt::OMPTargetSimdDirectiveClass:
392 break;
393 case Stmt::OMPTeamsDistributeDirectiveClass:
395 break;
396 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
399 break;
400 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
403 break;
404 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
407 break;
408 case Stmt::OMPTargetTeamsDirectiveClass:
410 break;
411 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
414 break;
415 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
418 break;
419 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
422 break;
423 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
426 break;
427 case Stmt::OMPInteropDirectiveClass:
429 break;
430 case Stmt::OMPDispatchDirectiveClass:
431 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
432 break;
433 case Stmt::OMPScopeDirectiveClass:
435 break;
436 case Stmt::OMPMaskedDirectiveClass:
438 break;
439 case Stmt::OMPGenericLoopDirectiveClass:
441 break;
442 case Stmt::OMPTeamsGenericLoopDirectiveClass:
444 break;
445 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
448 break;
449 case Stmt::OMPParallelGenericLoopDirectiveClass:
452 break;
453 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
456 break;
457 case Stmt::OMPParallelMaskedDirectiveClass:
459 break;
460 case Stmt::OMPAssumeDirectiveClass:
462 break;
463 case Stmt::OpenACCComputeConstructClass:
465 break;
466 case Stmt::OpenACCLoopConstructClass:
468 break;
469 case Stmt::OpenACCCombinedConstructClass:
471 break;
472 case Stmt::OpenACCDataConstructClass:
474 break;
475 case Stmt::OpenACCEnterDataConstructClass:
477 break;
478 case Stmt::OpenACCExitDataConstructClass:
480 break;
481 case Stmt::OpenACCHostDataConstructClass:
483 break;
484 case Stmt::OpenACCWaitConstructClass:
486 break;
487 case Stmt::OpenACCInitConstructClass:
489 break;
490 case Stmt::OpenACCShutdownConstructClass:
492 break;
493 case Stmt::OpenACCSetConstructClass:
495 break;
496 case Stmt::OpenACCUpdateConstructClass:
498 break;
499 case Stmt::OpenACCAtomicConstructClass:
501 break;
502 case Stmt::OpenACCCacheConstructClass:
504 break;
505 }
506}
507
510 switch (S->getStmtClass()) {
511 default:
512 return false;
513 case Stmt::NullStmtClass:
514 break;
515 case Stmt::CompoundStmtClass:
517 break;
518 case Stmt::DeclStmtClass:
520 break;
521 case Stmt::LabelStmtClass:
523 break;
524 case Stmt::AttributedStmtClass:
526 break;
527 case Stmt::GotoStmtClass:
529 break;
530 case Stmt::BreakStmtClass:
532 break;
533 case Stmt::ContinueStmtClass:
535 break;
536 case Stmt::DefaultStmtClass:
538 break;
539 case Stmt::CaseStmtClass:
540 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
541 break;
542 case Stmt::SEHLeaveStmtClass:
544 break;
545 case Stmt::SYCLKernelCallStmtClass:
546 // SYCL kernel call statements are generated as wrappers around the body
547 // of functions declared with the sycl_kernel_entry_point attribute. Such
548 // functions are used to specify how a SYCL kernel (a function object) is
549 // to be invoked; the SYCL kernel call statement contains a transformed
550 // variation of the function body and is used to generate a SYCL kernel
551 // caller function; a function that serves as the device side entry point
552 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed
553 // function is invoked by host code in order to trigger emission of the
554 // device side SYCL kernel caller function and to generate metadata needed
555 // by SYCL run-time library implementations; the function is otherwise
556 // intended to have no effect. As such, the function body is not evaluated
557 // as part of the invocation during host compilation (and the function
558 // should not be called or emitted during device compilation); the SYCL
559 // kernel call statement is thus handled as a null statement for the
560 // purpose of code generation.
561 break;
562 }
563 return true;
564}
565
566/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
567/// this captures the expression result of the last sub-statement and returns it
568/// (for use by the statement expression extension).
570 AggValueSlot AggSlot) {
571 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
572 "LLVM IR generation of compound statement ('{}')");
573
574 // Keep track of the current cleanup stack depth, including debug scopes.
576
577 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
578}
579
582 bool GetLast,
583 AggValueSlot AggSlot) {
584
585 const Stmt *ExprResult = S.getStmtExprResult();
586 assert((!GetLast || (GetLast && ExprResult)) &&
587 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
588
589 Address RetAlloca = Address::invalid();
590
591 for (auto *CurStmt : S.body()) {
592 if (GetLast && ExprResult == CurStmt) {
593 // We have to special case labels here. They are statements, but when put
594 // at the end of a statement expression, they yield the value of their
595 // subexpression. Handle this by walking through all labels we encounter,
596 // emitting them before we evaluate the subexpr.
597 // Similar issues arise for attributed statements.
598 while (!isa<Expr>(ExprResult)) {
599 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
600 EmitLabel(LS->getDecl());
601 ExprResult = LS->getSubStmt();
602 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
603 // FIXME: Update this if we ever have attributes that affect the
604 // semantics of an expression.
605 ExprResult = AS->getSubStmt();
606 } else {
607 llvm_unreachable("unknown value statement");
608 }
609 }
610
612
613 const Expr *E = cast<Expr>(ExprResult);
614 QualType ExprTy = E->getType();
615 if (hasAggregateEvaluationKind(ExprTy)) {
616 EmitAggExpr(E, AggSlot);
617 } else {
618 // We can't return an RValue here because there might be cleanups at
619 // the end of the StmtExpr. Because of that, we have to emit the result
620 // here into a temporary alloca.
621 RetAlloca = CreateMemTemp(ExprTy);
622 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
623 /*IsInit*/ false);
624 }
625 } else {
626 EmitStmt(CurStmt);
627 }
628 }
629
630 return RetAlloca;
631}
632
634 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
635
636 // If there is a cleanup stack, then we it isn't worth trying to
637 // simplify this block (we would need to remove it from the scope map
638 // and cleanup entry).
639 if (!EHStack.empty())
640 return;
641
642 // Can only simplify direct branches.
643 if (!BI || !BI->isUnconditional())
644 return;
645
646 // Can only simplify empty blocks.
647 if (BI->getIterator() != BB->begin())
648 return;
649
650 BB->replaceAllUsesWith(BI->getSuccessor(0));
651 BI->eraseFromParent();
652 BB->eraseFromParent();
653}
654
655void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
656 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
657
658 // Fall out of the current block (if necessary).
659 EmitBranch(BB);
660
661 if (IsFinished && BB->use_empty()) {
662 delete BB;
663 return;
664 }
665
666 // Place the block after the current block, if possible, or else at
667 // the end of the function.
668 if (CurBB && CurBB->getParent())
669 CurFn->insert(std::next(CurBB->getIterator()), BB);
670 else
671 CurFn->insert(CurFn->end(), BB);
672 Builder.SetInsertPoint(BB);
673}
674
675void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
676 // Emit a branch from the current block to the target one if this
677 // was a real block. If this was just a fall-through block after a
678 // terminator, don't emit it.
679 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
680
681 if (!CurBB || CurBB->getTerminator()) {
682 // If there is no insert point or the previous block is already
683 // terminated, don't touch it.
684 } else {
685 // Otherwise, create a fall-through branch.
686 Builder.CreateBr(Target);
687 }
688
689 Builder.ClearInsertionPoint();
690}
691
692void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
693 bool inserted = false;
694 for (llvm::User *u : block->users()) {
695 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
696 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
697 inserted = true;
698 break;
699 }
700 }
701
702 if (!inserted)
703 CurFn->insert(CurFn->end(), block);
704
705 Builder.SetInsertPoint(block);
706}
707
710 JumpDest &Dest = LabelMap[D];
711 if (Dest.isValid()) return Dest;
712
713 // Create, but don't insert, the new block.
714 Dest = JumpDest(createBasicBlock(D->getName()),
717 return Dest;
718}
719
721 // Add this label to the current lexical scope if we're within any
722 // normal cleanups. Jumps "in" to this label --- when permitted by
723 // the language --- may need to be routed around such cleanups.
724 if (EHStack.hasNormalCleanups() && CurLexicalScope)
725 CurLexicalScope->addLabel(D);
726
727 JumpDest &Dest = LabelMap[D];
728
729 // If we didn't need a forward reference to this label, just go
730 // ahead and create a destination at the current scope.
731 if (!Dest.isValid()) {
733
734 // Otherwise, we need to give this label a target depth and remove
735 // it from the branch-fixups list.
736 } else {
737 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
738 Dest.setScopeDepth(EHStack.stable_begin());
740 }
741
742 EmitBlock(Dest.getBlock());
743
744 // Emit debug info for labels.
745 if (CGDebugInfo *DI = getDebugInfo()) {
746 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
747 DI->setLocation(D->getLocation());
748 DI->EmitLabel(D, Builder);
749 }
750 }
751
753}
754
755/// Change the cleanup scope of the labels in this lexical scope to
756/// match the scope of the enclosing context.
758 assert(!Labels.empty());
759 EHScopeStack::stable_iterator innermostScope
760 = CGF.EHStack.getInnermostNormalCleanup();
761
762 // Change the scope depth of all the labels.
763 for (const LabelDecl *Label : Labels) {
764 assert(CGF.LabelMap.count(Label));
765 JumpDest &dest = CGF.LabelMap.find(Label)->second;
766 assert(dest.getScopeDepth().isValid());
767 assert(innermostScope.encloses(dest.getScopeDepth()));
768 dest.setScopeDepth(innermostScope);
769 }
770
771 // Reparent the labels if the new scope also has cleanups.
772 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
773 ParentScope->Labels.append(Labels.begin(), Labels.end());
774 }
775}
776
777
779 EmitLabel(S.getDecl());
780
781 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
782 if (getLangOpts().EHAsynch && S.isSideEntry())
784
785 EmitStmt(S.getSubStmt());
786}
787
789 bool nomerge = false;
790 bool noinline = false;
791 bool alwaysinline = false;
792 bool noconvergent = false;
793 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
794 HLSLControlFlowHintAttr::SpellingNotCalculated;
795 const CallExpr *musttail = nullptr;
796 const AtomicAttr *AA = nullptr;
797
798 for (const auto *A : S.getAttrs()) {
799 switch (A->getKind()) {
800 default:
801 break;
802 case attr::NoMerge:
803 nomerge = true;
804 break;
805 case attr::NoInline:
806 noinline = true;
807 break;
808 case attr::AlwaysInline:
809 alwaysinline = true;
810 break;
811 case attr::NoConvergent:
812 noconvergent = true;
813 break;
814 case attr::MustTail: {
815 const Stmt *Sub = S.getSubStmt();
816 const ReturnStmt *R = cast<ReturnStmt>(Sub);
817 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
818 } break;
819 case attr::CXXAssume: {
820 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
821 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
822 !Assumption->HasSideEffects(getContext())) {
823 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
824 Builder.CreateAssumption(AssumptionVal);
825 }
826 } break;
827 case attr::Atomic:
828 AA = cast<AtomicAttr>(A);
829 break;
830 case attr::HLSLControlFlowHint: {
831 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
832 } break;
833 }
834 }
835 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
836 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
837 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
838 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
839 SaveAndRestore save_musttail(MustTailCall, musttail);
840 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
841 CGAtomicOptionsRAII AORAII(CGM, AA);
842 EmitStmt(S.getSubStmt(), S.getAttrs());
843}
844
846 // If this code is reachable then emit a stop point (if generating
847 // debug info). We have to do this ourselves because we are on the
848 // "simple" statement path.
849 if (HaveInsertPoint())
850 EmitStopPoint(&S);
851
854}
855
856
859 if (const LabelDecl *Target = S.getConstantTarget()) {
861 return;
862 }
863
864 // Ensure that we have an i8* for our PHI node.
865 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
866 Int8PtrTy, "addr");
867 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
868
869 // Get the basic block for the indirect goto.
870 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
871
872 // The first instruction in the block has to be the PHI for the switch dest,
873 // add an entry for this branch.
874 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
875
876 EmitBranch(IndGotoBB);
877 if (CurBB && CurBB->getTerminator())
878 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr);
879}
880
882 const Stmt *Else = S.getElse();
883
884 // The else branch of a consteval if statement is always the only branch that
885 // can be runtime evaluated.
886 if (S.isConsteval()) {
887 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
888 if (Executed) {
889 RunCleanupsScope ExecutedScope(*this);
890 EmitStmt(Executed);
891 }
892 return;
893 }
894
895 // C99 6.8.4.1: The first substatement is executed if the expression compares
896 // unequal to 0. The condition must be a scalar type.
897 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
898 ApplyDebugLocation DL(*this, S.getCond());
899
900 if (S.getInit())
901 EmitStmt(S.getInit());
902
903 if (S.getConditionVariable())
905
906 // If the condition constant folds and can be elided, try to avoid emitting
907 // the condition and the dead arm of the if/else.
908 bool CondConstant;
909 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
910 S.isConstexpr())) {
911 // Figure out which block (then or else) is executed.
912 const Stmt *Executed = S.getThen();
913 const Stmt *Skipped = Else;
914 if (!CondConstant) // Condition false?
915 std::swap(Executed, Skipped);
916
917 // If the skipped block has no labels in it, just emit the executed block.
918 // This avoids emitting dead code and simplifies the CFG substantially.
919 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
920 if (CondConstant)
922 if (Executed) {
924 RunCleanupsScope ExecutedScope(*this);
925 EmitStmt(Executed);
926 }
927 PGO->markStmtMaybeUsed(Skipped);
928 return;
929 }
930 }
931
932 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
933 // the conditional branch.
934 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
935 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
936 llvm::BasicBlock *ElseBlock = ContBlock;
937 if (Else)
938 ElseBlock = createBasicBlock("if.else");
939
940 // Prefer the PGO based weights over the likelihood attribute.
941 // When the build isn't optimized the metadata isn't used, so don't generate
942 // it.
943 // Also, differentiate between disabled PGO and a never executed branch with
944 // PGO. Assuming PGO is in use:
945 // - we want to ignore the [[likely]] attribute if the branch is never
946 // executed,
947 // - assuming the profile is poor, preserving the attribute may still be
948 // beneficial.
949 // As an approximation, preserve the attribute only if both the branch and the
950 // parent context were not executed.
952 uint64_t ThenCount = getProfileCount(S.getThen());
953 if (!ThenCount && !getCurrentProfileCount() &&
954 CGM.getCodeGenOpts().OptimizationLevel)
955 LH = Stmt::getLikelihood(S.getThen(), Else);
956
957 // When measuring MC/DC, always fully evaluate the condition up front using
958 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
959 // executing the body of the if.then or if.else. This is useful for when
960 // there is a 'return' within the body, but this is particularly beneficial
961 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
962 // updates are kept linear and consistent.
963 if (!CGM.getCodeGenOpts().MCDCCoverage) {
964 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH,
965 /*ConditionalOp=*/nullptr,
966 /*ConditionalDecl=*/S.getConditionVariable());
967 } else {
968 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
970 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
971 }
972
973 // Emit the 'then' code.
974 EmitBlock(ThenBlock);
977 else
979 {
980 RunCleanupsScope ThenScope(*this);
981 EmitStmt(S.getThen());
982 }
983 EmitBranch(ContBlock);
984
985 // Emit the 'else' code if present.
986 if (Else) {
987 {
988 // There is no need to emit line number for an unconditional branch.
989 auto NL = ApplyDebugLocation::CreateEmpty(*this);
990 EmitBlock(ElseBlock);
991 }
992 // When single byte coverage mode is enabled, add a counter to else block.
995 {
996 RunCleanupsScope ElseScope(*this);
997 EmitStmt(Else);
998 }
999 {
1000 // There is no need to emit line number for an unconditional branch.
1001 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1002 EmitBranch(ContBlock);
1003 }
1004 }
1005
1006 // Emit the continuation block for code after the if.
1007 EmitBlock(ContBlock, true);
1008
1009 // When single byte coverage mode is enabled, add a counter to continuation
1010 // block.
1013}
1014
1015bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
1016 bool HasEmptyBody) {
1017 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1019 return false;
1020
1021 // Now apply rules for plain C (see 6.8.5.6 in C11).
1022 // Loops with constant conditions do not have to make progress in any C
1023 // version.
1024 // As an extension, we consisider loops whose constant expression
1025 // can be constant-folded.
1027 bool CondIsConstInt =
1028 !ControllingExpression ||
1029 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1030 Result.Val.isInt());
1031
1032 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1033 Result.Val.getInt().getBoolValue());
1034
1035 // Loops with non-constant conditions must make progress in C11 and later.
1036 if (getLangOpts().C11 && !CondIsConstInt)
1037 return true;
1038
1039 // [C++26][intro.progress] (DR)
1040 // The implementation may assume that any thread will eventually do one of the
1041 // following:
1042 // [...]
1043 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1044 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1047 if (HasEmptyBody && CondIsTrue) {
1048 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1049 return false;
1050 }
1051 return true;
1052 }
1053 return false;
1054}
1055
1056// [C++26][stmt.iter.general] (DR)
1057// A trivially empty iteration statement is an iteration statement matching one
1058// of the following forms:
1059// - while ( expression ) ;
1060// - while ( expression ) { }
1061// - do ; while ( expression ) ;
1062// - do { } while ( expression ) ;
1063// - for ( init-statement expression(opt); ) ;
1064// - for ( init-statement expression(opt); ) { }
1065template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1066 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1067 if (S.getInc())
1068 return false;
1069 }
1070 const Stmt *Body = S.getBody();
1071 if (!Body || isa<NullStmt>(Body))
1072 return true;
1073 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1074 return Compound->body_empty();
1075 return false;
1076}
1077
1079 ArrayRef<const Attr *> WhileAttrs) {
1080 // Emit the header for the loop, which will also become
1081 // the continue target.
1082 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1083 EmitBlock(LoopHeader.getBlock());
1084
1085 if (CGM.shouldEmitConvergenceTokens())
1086 ConvergenceTokenStack.push_back(
1087 emitConvergenceLoopToken(LoopHeader.getBlock()));
1088
1089 // Create an exit block for when the condition fails, which will
1090 // also become the break target.
1092
1093 // Store the blocks to use for break and continue.
1094 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopHeader));
1095
1096 // C++ [stmt.while]p2:
1097 // When the condition of a while statement is a declaration, the
1098 // scope of the variable that is declared extends from its point
1099 // of declaration (3.3.2) to the end of the while statement.
1100 // [...]
1101 // The object created in a condition is destroyed and created
1102 // with each iteration of the loop.
1103 RunCleanupsScope ConditionScope(*this);
1104
1105 if (S.getConditionVariable())
1107
1108 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1109 // evaluation of the controlling expression takes place before each
1110 // execution of the loop body.
1111 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1112
1114
1115 // while(1) is common, avoid extra exit blocks. Be sure
1116 // to correctly handle break/continue though.
1117 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1118 bool EmitBoolCondBranch = !C || !C->isOne();
1119 const SourceRange &R = S.getSourceRange();
1120 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1121 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1124
1125 // When single byte coverage mode is enabled, add a counter to loop condition.
1128
1129 // As long as the condition is true, go to the loop body.
1130 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1131 if (EmitBoolCondBranch) {
1132 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1133 if (ConditionScope.requiresCleanups())
1134 ExitBlock = createBasicBlock("while.exit");
1135 llvm::MDNode *Weights =
1136 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1137 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1138 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1139 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1140 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1141 // Key Instructions: Emit the condition and branch as separate source
1142 // location atoms otherwise we may omit a step onto the loop condition in
1143 // favour of the `while` keyword.
1144 // FIXME: We could have the branch as the backup location for the condition,
1145 // which would probably be a better experience. Explore this later.
1146 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1147 addInstToNewSourceAtom(CondI, nullptr);
1148 addInstToNewSourceAtom(I, nullptr);
1149
1150 if (ExitBlock != LoopExit.getBlock()) {
1151 EmitBlock(ExitBlock);
1153 }
1154 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1155 CGM.getDiags().Report(A->getLocation(),
1156 diag::warn_attribute_has_no_effect_on_infinite_loop)
1157 << A << A->getRange();
1158 CGM.getDiags().Report(
1159 S.getWhileLoc(),
1160 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1162 }
1163
1164 // Emit the loop body. We have to emit this in a cleanup scope
1165 // because it might be a singleton DeclStmt.
1166 {
1167 RunCleanupsScope BodyScope(*this);
1168 EmitBlock(LoopBody);
1169 // When single byte coverage mode is enabled, add a counter to the body.
1172 else
1174 EmitStmt(S.getBody());
1175 }
1176
1177 BreakContinueStack.pop_back();
1178
1179 // Immediately force cleanup.
1180 ConditionScope.ForceCleanup();
1181
1182 EmitStopPoint(&S);
1183 // Branch to the loop header again.
1184 EmitBranch(LoopHeader.getBlock());
1185
1186 LoopStack.pop();
1187
1188 // Emit the exit block.
1189 EmitBlock(LoopExit.getBlock(), true);
1190
1191 // The LoopHeader typically is just a branch if we skipped emitting
1192 // a branch, try to erase it.
1193 if (!EmitBoolCondBranch)
1194 SimplifyForwardingBlocks(LoopHeader.getBlock());
1195
1196 // When single byte coverage mode is enabled, add a counter to continuation
1197 // block.
1200
1201 if (CGM.shouldEmitConvergenceTokens())
1202 ConvergenceTokenStack.pop_back();
1203}
1204
1206 ArrayRef<const Attr *> DoAttrs) {
1208 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1209
1210 uint64_t ParentCount = getCurrentProfileCount();
1211
1212 // Store the blocks to use for break and continue.
1213 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopCond));
1214
1215 // Emit the body of the loop.
1216 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1217
1219 EmitBlockWithFallThrough(LoopBody, S.getBody());
1220 else
1221 EmitBlockWithFallThrough(LoopBody, &S);
1222
1223 if (CGM.shouldEmitConvergenceTokens())
1224 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1225
1226 {
1227 RunCleanupsScope BodyScope(*this);
1228 EmitStmt(S.getBody());
1229 }
1230
1231 EmitBlock(LoopCond.getBlock());
1232 // When single byte coverage mode is enabled, add a counter to loop condition.
1235
1236 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1237 // after each execution of the loop body."
1238
1239 // Evaluate the conditional in the while header.
1240 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1241 // compares unequal to 0. The condition must be a scalar type.
1242 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1243
1244 BreakContinueStack.pop_back();
1245
1246 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1247 // to correctly handle break/continue though.
1248 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1249 bool EmitBoolCondBranch = !C || !C->isZero();
1250
1251 const SourceRange &R = S.getSourceRange();
1252 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1256
1257 // As long as the condition is true, iterate the loop.
1258 if (EmitBoolCondBranch) {
1259 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1260 auto *I = Builder.CreateCondBr(
1261 BoolCondVal, LoopBody, LoopExit.getBlock(),
1262 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1263
1264 // Key Instructions: Emit the condition and branch as separate source
1265 // location atoms otherwise we may omit a step onto the loop condition in
1266 // favour of the closing brace.
1267 // FIXME: We could have the branch as the backup location for the condition,
1268 // which would probably be a better experience (no jumping to the brace).
1269 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1270 addInstToNewSourceAtom(CondI, nullptr);
1271 addInstToNewSourceAtom(I, nullptr);
1272 }
1273
1274 LoopStack.pop();
1275
1276 // Emit the exit block.
1277 EmitBlock(LoopExit.getBlock());
1278
1279 // The DoCond block typically is just a branch if we skipped
1280 // emitting a branch, try to erase it.
1281 if (!EmitBoolCondBranch)
1283
1284 // When single byte coverage mode is enabled, add a counter to continuation
1285 // block.
1288
1289 if (CGM.shouldEmitConvergenceTokens())
1290 ConvergenceTokenStack.pop_back();
1291}
1292
1294 ArrayRef<const Attr *> ForAttrs) {
1296
1297 std::optional<LexicalScope> ForScope;
1299 ForScope.emplace(*this, S.getSourceRange());
1300
1301 // Evaluate the first part before the loop.
1302 if (S.getInit())
1303 EmitStmt(S.getInit());
1304
1305 // Start the loop with a block that tests the condition.
1306 // If there's an increment, the continue scope will be overwritten
1307 // later.
1308 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1309 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1310 EmitBlock(CondBlock);
1311
1312 if (CGM.shouldEmitConvergenceTokens())
1313 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1314
1315 const SourceRange &R = S.getSourceRange();
1316 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1320
1321 // Create a cleanup scope for the condition variable cleanups.
1322 LexicalScope ConditionScope(*this, S.getSourceRange());
1323
1324 // If the for loop doesn't have an increment we can just use the condition as
1325 // the continue block. Otherwise, if there is no condition variable, we can
1326 // form the continue block now. If there is a condition variable, we can't
1327 // form the continue block until after we've emitted the condition, because
1328 // the condition is in scope in the increment, but Sema's jump diagnostics
1329 // ensure that there are no continues from the condition variable that jump
1330 // to the loop increment.
1331 JumpDest Continue;
1332 if (!S.getInc())
1333 Continue = CondDest;
1334 else if (!S.getConditionVariable())
1335 Continue = getJumpDestInCurrentScope("for.inc");
1336 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1337
1338 if (S.getCond()) {
1339 // If the for statement has a condition scope, emit the local variable
1340 // declaration.
1341 if (S.getConditionVariable()) {
1343
1344 // We have entered the condition variable's scope, so we're now able to
1345 // jump to the continue block.
1346 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1347 BreakContinueStack.back().ContinueBlock = Continue;
1348 }
1349
1350 // When single byte coverage mode is enabled, add a counter to loop
1351 // condition.
1354
1355 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1356 // If there are any cleanups between here and the loop-exit scope,
1357 // create a block to stage a loop exit along.
1358 if (ForScope && ForScope->requiresCleanups())
1359 ExitBlock = createBasicBlock("for.cond.cleanup");
1360
1361 // As long as the condition is true, iterate the loop.
1362 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1363
1364 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1365 // compares unequal to 0. The condition must be a scalar type.
1366 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1367
1369
1370 llvm::MDNode *Weights =
1371 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1372 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1373 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1374 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1375
1376 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1377 // Key Instructions: Emit the condition and branch as separate atoms to
1378 // match existing loop stepping behaviour. FIXME: We could have the branch
1379 // as the backup location for the condition, which would probably be a
1380 // better experience (no jumping to the brace).
1381 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1382 addInstToNewSourceAtom(CondI, nullptr);
1383 addInstToNewSourceAtom(I, nullptr);
1384
1385 if (ExitBlock != LoopExit.getBlock()) {
1386 EmitBlock(ExitBlock);
1388 }
1389
1390 EmitBlock(ForBody);
1391 } else {
1392 // Treat it as a non-zero constant. Don't even create a new block for the
1393 // body, just fall into it.
1394 }
1395
1396 // When single byte coverage mode is enabled, add a counter to the body.
1399 else
1401 {
1402 // Create a separate cleanup scope for the body, in case it is not
1403 // a compound statement.
1404 RunCleanupsScope BodyScope(*this);
1405 EmitStmt(S.getBody());
1406 }
1407
1408 // The last block in the loop's body (which unconditionally branches to the
1409 // `inc` block if there is one).
1410 auto *FinalBodyBB = Builder.GetInsertBlock();
1411
1412 // If there is an increment, emit it next.
1413 if (S.getInc()) {
1414 EmitBlock(Continue.getBlock());
1415 EmitStmt(S.getInc());
1418 }
1419
1420 BreakContinueStack.pop_back();
1421
1422 ConditionScope.ForceCleanup();
1423
1424 EmitStopPoint(&S);
1425 EmitBranch(CondBlock);
1426
1427 if (ForScope)
1428 ForScope->ForceCleanup();
1429
1430 LoopStack.pop();
1431
1432 // Emit the fall-through block.
1433 EmitBlock(LoopExit.getBlock(), true);
1434
1435 // When single byte coverage mode is enabled, add a counter to continuation
1436 // block.
1439
1440 if (CGM.shouldEmitConvergenceTokens())
1441 ConvergenceTokenStack.pop_back();
1442
1443 if (FinalBodyBB) {
1444 // Key Instructions: We want the for closing brace to be step-able on to
1445 // match existing behaviour.
1446 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1447 }
1448}
1449
1450void
1452 ArrayRef<const Attr *> ForAttrs) {
1454
1455 LexicalScope ForScope(*this, S.getSourceRange());
1456
1457 // Evaluate the first pieces before the loop.
1458 if (S.getInit())
1459 EmitStmt(S.getInit());
1462 EmitStmt(S.getEndStmt());
1463
1464 // Start the loop with a block that tests the condition.
1465 // If there's an increment, the continue scope will be overwritten
1466 // later.
1467 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1468 EmitBlock(CondBlock);
1469
1470 if (CGM.shouldEmitConvergenceTokens())
1471 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1472
1473 const SourceRange &R = S.getSourceRange();
1474 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1477
1478 // If there are any cleanups between here and the loop-exit scope,
1479 // create a block to stage a loop exit along.
1480 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1481 if (ForScope.requiresCleanups())
1482 ExitBlock = createBasicBlock("for.cond.cleanup");
1483
1484 // The loop body, consisting of the specified body and the loop variable.
1485 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1486
1487 // The body is executed if the expression, contextually converted
1488 // to bool, is true.
1489 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1490 llvm::MDNode *Weights =
1491 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1492 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1493 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1494 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1495 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1496 // Key Instructions: Emit the condition and branch as separate atoms to
1497 // match existing loop stepping behaviour. FIXME: We could have the branch as
1498 // the backup location for the condition, which would probably be a better
1499 // experience.
1500 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1501 addInstToNewSourceAtom(CondI, nullptr);
1502 addInstToNewSourceAtom(I, nullptr);
1503
1504 if (ExitBlock != LoopExit.getBlock()) {
1505 EmitBlock(ExitBlock);
1507 }
1508
1509 EmitBlock(ForBody);
1512 else
1514
1515 // Create a block for the increment. In case of a 'continue', we jump there.
1516 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1517
1518 // Store the blocks to use for break and continue.
1519 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1520
1521 {
1522 // Create a separate cleanup scope for the loop variable and body.
1523 LexicalScope BodyScope(*this, S.getSourceRange());
1525 EmitStmt(S.getBody());
1526 }
1527 // The last block in the loop's body (which unconditionally branches to the
1528 // `inc` block if there is one).
1529 auto *FinalBodyBB = Builder.GetInsertBlock();
1530
1531 EmitStopPoint(&S);
1532 // If there is an increment, emit it next.
1533 EmitBlock(Continue.getBlock());
1534 EmitStmt(S.getInc());
1535
1536 BreakContinueStack.pop_back();
1537
1538 EmitBranch(CondBlock);
1539
1540 ForScope.ForceCleanup();
1541
1542 LoopStack.pop();
1543
1544 // Emit the fall-through block.
1545 EmitBlock(LoopExit.getBlock(), true);
1546
1547 // When single byte coverage mode is enabled, add a counter to continuation
1548 // block.
1551
1552 if (CGM.shouldEmitConvergenceTokens())
1553 ConvergenceTokenStack.pop_back();
1554
1555 if (FinalBodyBB) {
1556 // We want the for closing brace to be step-able on to match existing
1557 // behaviour.
1558 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1559 }
1560}
1561
1562void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1563 if (RV.isScalar()) {
1564 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1565 } else if (RV.isAggregate()) {
1566 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1569 } else {
1571 /*init*/ true);
1572 }
1574}
1575
1576namespace {
1577// RAII struct used to save and restore a return statment's result expression.
1578struct SaveRetExprRAII {
1579 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1580 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1581 CGF.RetExpr = RetExpr;
1582 }
1583 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1584 const Expr *OldRetExpr;
1585 CodeGenFunction &CGF;
1586};
1587} // namespace
1588
1589/// Determine if the given call uses the swiftasync calling convention.
1590static bool isSwiftAsyncCallee(const CallExpr *CE) {
1591 auto calleeQualType = CE->getCallee()->getType();
1592 const FunctionType *calleeType = nullptr;
1593 if (calleeQualType->isFunctionPointerType() ||
1594 calleeQualType->isFunctionReferenceType() ||
1595 calleeQualType->isBlockPointerType() ||
1596 calleeQualType->isMemberFunctionPointerType()) {
1597 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1598 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1599 calleeType = ty;
1600 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1601 if (auto methodDecl = CMCE->getMethodDecl()) {
1602 // getMethodDecl() doesn't handle member pointers at the moment.
1603 calleeType = methodDecl->getType()->castAs<FunctionType>();
1604 } else {
1605 return false;
1606 }
1607 } else {
1608 return false;
1609 }
1610 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1611}
1612
1613/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1614/// if the function returns void, or may be missing one if the function returns
1615/// non-void. Fun stuff :).
1618 if (requiresReturnValueCheck()) {
1619 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1620 auto *SLocPtr =
1621 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1622 llvm::GlobalVariable::PrivateLinkage, SLoc);
1623 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1624 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1625 assert(ReturnLocation.isValid() && "No valid return location");
1626 Builder.CreateStore(SLocPtr, ReturnLocation);
1627 }
1628
1629 // Returning from an outlined SEH helper is UB, and we already warn on it.
1630 if (IsOutlinedSEHHelper) {
1631 Builder.CreateUnreachable();
1632 Builder.ClearInsertionPoint();
1633 }
1634
1635 // Emit the result value, even if unused, to evaluate the side effects.
1636 const Expr *RV = S.getRetValue();
1637
1638 // Record the result expression of the return statement. The recorded
1639 // expression is used to determine whether a block capture's lifetime should
1640 // end at the end of the full expression as opposed to the end of the scope
1641 // enclosing the block expression.
1642 //
1643 // This permits a small, easily-implemented exception to our over-conservative
1644 // rules about not jumping to statements following block literals with
1645 // non-trivial cleanups.
1646 SaveRetExprRAII SaveRetExpr(RV, *this);
1647
1648 RunCleanupsScope cleanupScope(*this);
1649 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1650 RV = EWC->getSubExpr();
1651
1652 // If we're in a swiftasynccall function, and the return expression is a
1653 // call to a swiftasynccall function, mark the call as the musttail call.
1654 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1655 if (RV && CurFnInfo &&
1656 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1657 if (auto CE = dyn_cast<CallExpr>(RV)) {
1658 if (isSwiftAsyncCallee(CE)) {
1659 SaveMustTail.emplace(MustTailCall, CE);
1660 }
1661 }
1662 }
1663
1664 // FIXME: Clean this up by using an LValue for ReturnTemp,
1665 // EmitStoreThroughLValue, and EmitAnyExpr.
1666 // Check if the NRVO candidate was not globalized in OpenMP mode.
1667 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1669 (!getLangOpts().OpenMP ||
1670 !CGM.getOpenMPRuntime()
1671 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1672 .isValid())) {
1673 // Apply the named return value optimization for this return statement,
1674 // which means doing nothing: the appropriate result has already been
1675 // constructed into the NRVO variable.
1676
1677 // If there is an NRVO flag for this variable, set it to 1 into indicate
1678 // that the cleanup code should not destroy the variable.
1679 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1680 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1681 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1682 // Make sure not to return anything, but evaluate the expression
1683 // for side effects.
1684 if (RV) {
1685 EmitAnyExpr(RV);
1686 }
1687 } else if (!RV) {
1688 // Do nothing (return value is left uninitialized)
1689 } else if (FnRetTy->isReferenceType()) {
1690 // If this function returns a reference, take the address of the expression
1691 // rather than the value.
1693 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1694 addInstToCurrentSourceAtom(I, I->getValueOperand());
1695 } else {
1696 switch (getEvaluationKind(RV->getType())) {
1697 case TEK_Scalar: {
1698 llvm::Value *Ret = EmitScalarExpr(RV);
1699 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1701 /*isInit*/ true);
1702 } else {
1703 auto *I = Builder.CreateStore(Ret, ReturnValue);
1704 addInstToCurrentSourceAtom(I, I->getValueOperand());
1705 }
1706 break;
1707 }
1708 case TEK_Complex:
1710 /*isInit*/ true);
1711 break;
1712 case TEK_Aggregate:
1719 break;
1720 }
1721 }
1722
1723 ++NumReturnExprs;
1724 if (!RV || RV->isEvaluatable(getContext()))
1725 ++NumSimpleReturnExprs;
1726
1727 cleanupScope.ForceCleanup();
1729}
1730
1732 // As long as debug info is modeled with instructions, we have to ensure we
1733 // have a place to insert here and write the stop point here.
1734 if (HaveInsertPoint())
1735 EmitStopPoint(&S);
1736
1737 for (const auto *I : S.decls())
1738 EmitDecl(*I, /*EvaluateConditionDecl=*/true);
1739}
1740
1742 -> const BreakContinue * {
1743 if (!S.hasLabelTarget())
1744 return &BreakContinueStack.back();
1745
1746 const Stmt *LoopOrSwitch = S.getNamedLoopOrSwitch();
1747 assert(LoopOrSwitch && "break/continue target not set?");
1748 for (const BreakContinue &BC : llvm::reverse(BreakContinueStack))
1749 if (BC.LoopOrSwitch == LoopOrSwitch)
1750 return &BC;
1751
1752 llvm_unreachable("break/continue target not found");
1753}
1754
1756 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1757
1758 // If this code is reachable then emit a stop point (if generating
1759 // debug info). We have to do this ourselves because we are on the
1760 // "simple" statement path.
1761 if (HaveInsertPoint())
1762 EmitStopPoint(&S);
1763
1766}
1767
1769 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1770
1771 // If this code is reachable then emit a stop point (if generating
1772 // debug info). We have to do this ourselves because we are on the
1773 // "simple" statement path.
1774 if (HaveInsertPoint())
1775 EmitStopPoint(&S);
1776
1779}
1780
1781/// EmitCaseStmtRange - If case statement range is not too big then
1782/// add multiple cases to switch instruction, one for each value within
1783/// the range. If range is too big then emit "if" condition check.
1785 ArrayRef<const Attr *> Attrs) {
1786 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1787
1788 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1789 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1790
1791 // Emit the code for this case. We do this first to make sure it is
1792 // properly chained from our predecessor before generating the
1793 // switch machinery to enter this block.
1794 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1795 EmitBlockWithFallThrough(CaseDest, &S);
1796 EmitStmt(S.getSubStmt());
1797
1798 // If range is empty, do nothing.
1799 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1800 return;
1801
1803 llvm::APInt Range = RHS - LHS;
1804 // FIXME: parameters such as this should not be hardcoded.
1805 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1806 // Range is small enough to add multiple switch instruction cases.
1807 uint64_t Total = getProfileCount(&S);
1808 unsigned NCases = Range.getZExtValue() + 1;
1809 // We only have one region counter for the entire set of cases here, so we
1810 // need to divide the weights evenly between the generated cases, ensuring
1811 // that the total weight is preserved. E.g., a weight of 5 over three cases
1812 // will be distributed as weights of 2, 2, and 1.
1813 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1814 for (unsigned I = 0; I != NCases; ++I) {
1815 if (SwitchWeights)
1816 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1817 else if (SwitchLikelihood)
1818 SwitchLikelihood->push_back(LH);
1819
1820 if (Rem)
1821 Rem--;
1822 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1823 ++LHS;
1824 }
1825 return;
1826 }
1827
1828 // The range is too big. Emit "if" condition into a new block,
1829 // making sure to save and restore the current insertion point.
1830 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1831
1832 // Push this test onto the chain of range checks (which terminates
1833 // in the default basic block). The switch's default will be changed
1834 // to the top of this chain after switch emission is complete.
1835 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1836 CaseRangeBlock = createBasicBlock("sw.caserange");
1837
1838 CurFn->insert(CurFn->end(), CaseRangeBlock);
1839 Builder.SetInsertPoint(CaseRangeBlock);
1840
1841 // Emit range check.
1842 llvm::Value *Diff =
1843 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1844 llvm::Value *Cond =
1845 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1846
1847 llvm::MDNode *Weights = nullptr;
1848 if (SwitchWeights) {
1849 uint64_t ThisCount = getProfileCount(&S);
1850 uint64_t DefaultCount = (*SwitchWeights)[0];
1851 Weights = createProfileWeights(ThisCount, DefaultCount);
1852
1853 // Since we're chaining the switch default through each large case range, we
1854 // need to update the weight for the default, ie, the first case, to include
1855 // this case.
1856 (*SwitchWeights)[0] += ThisCount;
1857 } else if (SwitchLikelihood)
1858 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1859
1860 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1861
1862 // Restore the appropriate insertion point.
1863 if (RestoreBB)
1864 Builder.SetInsertPoint(RestoreBB);
1865 else
1866 Builder.ClearInsertionPoint();
1867}
1868
1870 ArrayRef<const Attr *> Attrs) {
1871 // If there is no enclosing switch instance that we're aware of, then this
1872 // case statement and its block can be elided. This situation only happens
1873 // when we've constant-folded the switch, are emitting the constant case,
1874 // and part of the constant case includes another case statement. For
1875 // instance: switch (4) { case 4: do { case 5: } while (1); }
1876 if (!SwitchInsn) {
1877 EmitStmt(S.getSubStmt());
1878 return;
1879 }
1880
1881 // Handle case ranges.
1882 if (S.getRHS()) {
1883 EmitCaseStmtRange(S, Attrs);
1884 return;
1885 }
1886
1887 llvm::ConstantInt *CaseVal =
1889
1890 // Emit debuginfo for the case value if it is an enum value.
1891 const ConstantExpr *CE;
1892 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1893 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1894 else
1895 CE = dyn_cast<ConstantExpr>(S.getLHS());
1896 if (CE) {
1897 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1898 if (CGDebugInfo *Dbg = getDebugInfo())
1899 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1900 Dbg->EmitGlobalVariable(DE->getDecl(),
1901 APValue(llvm::APSInt(CaseVal->getValue())));
1902 }
1903
1904 if (SwitchLikelihood)
1905 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1906
1907 // If the body of the case is just a 'break', try to not emit an empty block.
1908 // If we're profiling or we're not optimizing, leave the block in for better
1909 // debug and coverage analysis.
1910 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1911 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1913 JumpDest Block = BreakContinueStack.back().BreakBlock;
1914
1915 // Only do this optimization if there are no cleanups that need emitting.
1917 if (SwitchWeights)
1918 SwitchWeights->push_back(getProfileCount(&S));
1919 SwitchInsn->addCase(CaseVal, Block.getBlock());
1920
1921 // If there was a fallthrough into this case, make sure to redirect it to
1922 // the end of the switch as well.
1923 if (Builder.GetInsertBlock()) {
1924 Builder.CreateBr(Block.getBlock());
1925 Builder.ClearInsertionPoint();
1926 }
1927 return;
1928 }
1929 }
1930
1931 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1932 EmitBlockWithFallThrough(CaseDest, &S);
1933 if (SwitchWeights)
1934 SwitchWeights->push_back(getProfileCount(&S));
1935 SwitchInsn->addCase(CaseVal, CaseDest);
1936
1937 // Recursively emitting the statement is acceptable, but is not wonderful for
1938 // code where we have many case statements nested together, i.e.:
1939 // case 1:
1940 // case 2:
1941 // case 3: etc.
1942 // Handling this recursively will create a new block for each case statement
1943 // that falls through to the next case which is IR intensive. It also causes
1944 // deep recursion which can run into stack depth limitations. Handle
1945 // sequential non-range case statements specially.
1946 //
1947 // TODO When the next case has a likelihood attribute the code returns to the
1948 // recursive algorithm. Maybe improve this case if it becomes common practice
1949 // to use a lot of attributes.
1950 const CaseStmt *CurCase = &S;
1951 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1952
1953 // Otherwise, iteratively add consecutive cases to this switch stmt.
1954 while (NextCase && NextCase->getRHS() == nullptr) {
1955 CurCase = NextCase;
1956 llvm::ConstantInt *CaseVal =
1957 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1958
1959 if (SwitchWeights)
1960 SwitchWeights->push_back(getProfileCount(NextCase));
1961 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1962 CaseDest = createBasicBlock("sw.bb");
1963 EmitBlockWithFallThrough(CaseDest, CurCase);
1964 }
1965 // Since this loop is only executed when the CaseStmt has no attributes
1966 // use a hard-coded value.
1967 if (SwitchLikelihood)
1968 SwitchLikelihood->push_back(Stmt::LH_None);
1969
1970 SwitchInsn->addCase(CaseVal, CaseDest);
1971 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1972 }
1973
1974 // Generate a stop point for debug info if the case statement is
1975 // followed by a default statement. A fallthrough case before a
1976 // default case gets its own branch target.
1977 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1978 EmitStopPoint(CurCase);
1979
1980 // Normal default recursion for non-cases.
1981 EmitStmt(CurCase->getSubStmt());
1982}
1983
1985 ArrayRef<const Attr *> Attrs) {
1986 // If there is no enclosing switch instance that we're aware of, then this
1987 // default statement can be elided. This situation only happens when we've
1988 // constant-folded the switch.
1989 if (!SwitchInsn) {
1990 EmitStmt(S.getSubStmt());
1991 return;
1992 }
1993
1994 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1995 assert(DefaultBlock->empty() &&
1996 "EmitDefaultStmt: Default block already defined?");
1997
1998 if (SwitchLikelihood)
1999 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
2000
2001 EmitBlockWithFallThrough(DefaultBlock, &S);
2002
2003 EmitStmt(S.getSubStmt());
2004}
2005
2006/// CollectStatementsForCase - Given the body of a 'switch' statement and a
2007/// constant value that is being switched on, see if we can dead code eliminate
2008/// the body of the switch to a simple series of statements to emit. Basically,
2009/// on a switch (5) we want to find these statements:
2010/// case 5:
2011/// printf(...); <--
2012/// ++i; <--
2013/// break;
2014///
2015/// and add them to the ResultStmts vector. If it is unsafe to do this
2016/// transformation (for example, one of the elided statements contains a label
2017/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
2018/// should include statements after it (e.g. the printf() line is a substmt of
2019/// the case) then return CSFC_FallThrough. If we handled it and found a break
2020/// statement, then return CSFC_Success.
2021///
2022/// If Case is non-null, then we are looking for the specified case, checking
2023/// that nothing we jump over contains labels. If Case is null, then we found
2024/// the case and are looking for the break.
2025///
2026/// If the recursive walk actually finds our Case, then we set FoundCase to
2027/// true.
2028///
2031 const SwitchCase *Case,
2032 bool &FoundCase,
2033 SmallVectorImpl<const Stmt*> &ResultStmts) {
2034 // If this is a null statement, just succeed.
2035 if (!S)
2036 return Case ? CSFC_Success : CSFC_FallThrough;
2037
2038 // If this is the switchcase (case 4: or default) that we're looking for, then
2039 // we're in business. Just add the substatement.
2040 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
2041 if (S == Case) {
2042 FoundCase = true;
2043 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
2044 ResultStmts);
2045 }
2046
2047 // Otherwise, this is some other case or default statement, just ignore it.
2048 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
2049 ResultStmts);
2050 }
2051
2052 // If we are in the live part of the code and we found our break statement,
2053 // return a success!
2054 if (!Case && isa<BreakStmt>(S))
2055 return CSFC_Success;
2056
2057 // If this is a switch statement, then it might contain the SwitchCase, the
2058 // break, or neither.
2059 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
2060 // Handle this as two cases: we might be looking for the SwitchCase (if so
2061 // the skipped statements must be skippable) or we might already have it.
2062 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2063 bool StartedInLiveCode = FoundCase;
2064 unsigned StartSize = ResultStmts.size();
2065
2066 // If we've not found the case yet, scan through looking for it.
2067 if (Case) {
2068 // Keep track of whether we see a skipped declaration. The code could be
2069 // using the declaration even if it is skipped, so we can't optimize out
2070 // the decl if the kept statements might refer to it.
2071 bool HadSkippedDecl = false;
2072
2073 // If we're looking for the case, just see if we can skip each of the
2074 // substatements.
2075 for (; Case && I != E; ++I) {
2076 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
2077
2078 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
2079 case CSFC_Failure: return CSFC_Failure;
2080 case CSFC_Success:
2081 // A successful result means that either 1) that the statement doesn't
2082 // have the case and is skippable, or 2) does contain the case value
2083 // and also contains the break to exit the switch. In the later case,
2084 // we just verify the rest of the statements are elidable.
2085 if (FoundCase) {
2086 // If we found the case and skipped declarations, we can't do the
2087 // optimization.
2088 if (HadSkippedDecl)
2089 return CSFC_Failure;
2090
2091 for (++I; I != E; ++I)
2092 if (CodeGenFunction::ContainsLabel(*I, true))
2093 return CSFC_Failure;
2094 return CSFC_Success;
2095 }
2096 break;
2097 case CSFC_FallThrough:
2098 // If we have a fallthrough condition, then we must have found the
2099 // case started to include statements. Consider the rest of the
2100 // statements in the compound statement as candidates for inclusion.
2101 assert(FoundCase && "Didn't find case but returned fallthrough?");
2102 // We recursively found Case, so we're not looking for it anymore.
2103 Case = nullptr;
2104
2105 // If we found the case and skipped declarations, we can't do the
2106 // optimization.
2107 if (HadSkippedDecl)
2108 return CSFC_Failure;
2109 break;
2110 }
2111 }
2112
2113 if (!FoundCase)
2114 return CSFC_Success;
2115
2116 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2117 }
2118
2119 // If we have statements in our range, then we know that the statements are
2120 // live and need to be added to the set of statements we're tracking.
2121 bool AnyDecls = false;
2122 for (; I != E; ++I) {
2124
2125 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2126 case CSFC_Failure: return CSFC_Failure;
2127 case CSFC_FallThrough:
2128 // A fallthrough result means that the statement was simple and just
2129 // included in ResultStmt, keep adding them afterwards.
2130 break;
2131 case CSFC_Success:
2132 // A successful result means that we found the break statement and
2133 // stopped statement inclusion. We just ensure that any leftover stmts
2134 // are skippable and return success ourselves.
2135 for (++I; I != E; ++I)
2136 if (CodeGenFunction::ContainsLabel(*I, true))
2137 return CSFC_Failure;
2138 return CSFC_Success;
2139 }
2140 }
2141
2142 // If we're about to fall out of a scope without hitting a 'break;', we
2143 // can't perform the optimization if there were any decls in that scope
2144 // (we'd lose their end-of-lifetime).
2145 if (AnyDecls) {
2146 // If the entire compound statement was live, there's one more thing we
2147 // can try before giving up: emit the whole thing as a single statement.
2148 // We can do that unless the statement contains a 'break;'.
2149 // FIXME: Such a break must be at the end of a construct within this one.
2150 // We could emit this by just ignoring the BreakStmts entirely.
2151 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2152 ResultStmts.resize(StartSize);
2153 ResultStmts.push_back(S);
2154 } else {
2155 return CSFC_Failure;
2156 }
2157 }
2158
2159 return CSFC_FallThrough;
2160 }
2161
2162 // Okay, this is some other statement that we don't handle explicitly, like a
2163 // for statement or increment etc. If we are skipping over this statement,
2164 // just verify it doesn't have labels, which would make it invalid to elide.
2165 if (Case) {
2166 if (CodeGenFunction::ContainsLabel(S, true))
2167 return CSFC_Failure;
2168 return CSFC_Success;
2169 }
2170
2171 // Otherwise, we want to include this statement. Everything is cool with that
2172 // so long as it doesn't contain a break out of the switch we're in.
2174
2175 // Otherwise, everything is great. Include the statement and tell the caller
2176 // that we fall through and include the next statement as well.
2177 ResultStmts.push_back(S);
2178 return CSFC_FallThrough;
2179}
2180
2181/// FindCaseStatementsForValue - Find the case statement being jumped to and
2182/// then invoke CollectStatementsForCase to find the list of statements to emit
2183/// for a switch on constant. See the comment above CollectStatementsForCase
2184/// for more details.
2186 const llvm::APSInt &ConstantCondValue,
2187 SmallVectorImpl<const Stmt*> &ResultStmts,
2188 ASTContext &C,
2189 const SwitchCase *&ResultCase) {
2190 // First step, find the switch case that is being branched to. We can do this
2191 // efficiently by scanning the SwitchCase list.
2192 const SwitchCase *Case = S.getSwitchCaseList();
2193 const DefaultStmt *DefaultCase = nullptr;
2194
2195 for (; Case; Case = Case->getNextSwitchCase()) {
2196 // It's either a default or case. Just remember the default statement in
2197 // case we're not jumping to any numbered cases.
2198 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2199 DefaultCase = DS;
2200 continue;
2201 }
2202
2203 // Check to see if this case is the one we're looking for.
2204 const CaseStmt *CS = cast<CaseStmt>(Case);
2205 // Don't handle case ranges yet.
2206 if (CS->getRHS()) return false;
2207
2208 // If we found our case, remember it as 'case'.
2209 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2210 break;
2211 }
2212
2213 // If we didn't find a matching case, we use a default if it exists, or we
2214 // elide the whole switch body!
2215 if (!Case) {
2216 // It is safe to elide the body of the switch if it doesn't contain labels
2217 // etc. If it is safe, return successfully with an empty ResultStmts list.
2218 if (!DefaultCase)
2220 Case = DefaultCase;
2221 }
2222
2223 // Ok, we know which case is being jumped to, try to collect all the
2224 // statements that follow it. This can fail for a variety of reasons. Also,
2225 // check to see that the recursive walk actually found our case statement.
2226 // Insane cases like this can fail to find it in the recursive walk since we
2227 // don't handle every stmt kind:
2228 // switch (4) {
2229 // while (1) {
2230 // case 4: ...
2231 bool FoundCase = false;
2232 ResultCase = Case;
2233 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2234 ResultStmts) != CSFC_Failure &&
2235 FoundCase;
2236}
2237
2238static std::optional<SmallVector<uint64_t, 16>>
2240 // Are there enough branches to weight them?
2241 if (Likelihoods.size() <= 1)
2242 return std::nullopt;
2243
2244 uint64_t NumUnlikely = 0;
2245 uint64_t NumNone = 0;
2246 uint64_t NumLikely = 0;
2247 for (const auto LH : Likelihoods) {
2248 switch (LH) {
2249 case Stmt::LH_Unlikely:
2250 ++NumUnlikely;
2251 break;
2252 case Stmt::LH_None:
2253 ++NumNone;
2254 break;
2255 case Stmt::LH_Likely:
2256 ++NumLikely;
2257 break;
2258 }
2259 }
2260
2261 // Is there a likelihood attribute used?
2262 if (NumUnlikely == 0 && NumLikely == 0)
2263 return std::nullopt;
2264
2265 // When multiple cases share the same code they can be combined during
2266 // optimization. In that case the weights of the branch will be the sum of
2267 // the individual weights. Make sure the combined sum of all neutral cases
2268 // doesn't exceed the value of a single likely attribute.
2269 // The additions both avoid divisions by 0 and make sure the weights of None
2270 // don't exceed the weight of Likely.
2271 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2272 const uint64_t None = Likely / (NumNone + 1);
2273 const uint64_t Unlikely = 0;
2274
2276 Result.reserve(Likelihoods.size());
2277 for (const auto LH : Likelihoods) {
2278 switch (LH) {
2279 case Stmt::LH_Unlikely:
2280 Result.push_back(Unlikely);
2281 break;
2282 case Stmt::LH_None:
2283 Result.push_back(None);
2284 break;
2285 case Stmt::LH_Likely:
2286 Result.push_back(Likely);
2287 break;
2288 }
2289 }
2290
2291 return Result;
2292}
2293
2295 // Handle nested switch statements.
2296 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2297 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2298 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2299 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2300
2301 // See if we can constant fold the condition of the switch and therefore only
2302 // emit the live case statement (if any) of the switch.
2303 llvm::APSInt ConstantCondValue;
2304 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2306 const SwitchCase *Case = nullptr;
2307 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2308 getContext(), Case)) {
2309 if (Case)
2311 RunCleanupsScope ExecutedScope(*this);
2312
2313 if (S.getInit())
2314 EmitStmt(S.getInit());
2315
2316 // Emit the condition variable if needed inside the entire cleanup scope
2317 // used by this special case for constant folded switches.
2318 if (S.getConditionVariable())
2319 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2320
2321 // At this point, we are no longer "within" a switch instance, so
2322 // we can temporarily enforce this to ensure that any embedded case
2323 // statements are not emitted.
2324 SwitchInsn = nullptr;
2325
2326 // Okay, we can dead code eliminate everything except this case. Emit the
2327 // specified series of statements and we're good.
2328 for (const Stmt *CaseStmt : CaseStmts)
2331 PGO->markStmtMaybeUsed(S.getBody());
2332
2333 // Now we want to restore the saved switch instance so that nested
2334 // switches continue to function properly
2335 SwitchInsn = SavedSwitchInsn;
2336
2337 return;
2338 }
2339 }
2340
2341 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2342
2343 RunCleanupsScope ConditionScope(*this);
2344
2345 if (S.getInit())
2346 EmitStmt(S.getInit());
2347
2348 if (S.getConditionVariable())
2350 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2352
2353 // Create basic block to hold stuff that comes after switch
2354 // statement. We also need to create a default block now so that
2355 // explicit case ranges tests can have a place to jump to on
2356 // failure.
2357 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2358 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2359 addInstToNewSourceAtom(SwitchInsn, CondV);
2360
2361 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2362 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2363 llvm::ConstantInt *BranchHintConstant =
2365 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2366 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2367 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2368 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"),
2369 MDHelper.createConstant(BranchHintConstant)};
2370 SwitchInsn->setMetadata("hlsl.controlflow.hint",
2371 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2372 }
2373
2374 if (PGO->haveRegionCounts()) {
2375 // Walk the SwitchCase list to find how many there are.
2376 uint64_t DefaultCount = 0;
2377 unsigned NumCases = 0;
2378 for (const SwitchCase *Case = S.getSwitchCaseList();
2379 Case;
2380 Case = Case->getNextSwitchCase()) {
2381 if (isa<DefaultStmt>(Case))
2382 DefaultCount = getProfileCount(Case);
2383 NumCases += 1;
2384 }
2385 SwitchWeights = new SmallVector<uint64_t, 16>();
2386 SwitchWeights->reserve(NumCases);
2387 // The default needs to be first. We store the edge count, so we already
2388 // know the right weight.
2389 SwitchWeights->push_back(DefaultCount);
2390 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2391 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2392 // Initialize the default case.
2393 SwitchLikelihood->push_back(Stmt::LH_None);
2394 }
2395
2396 CaseRangeBlock = DefaultBlock;
2397
2398 // Clear the insertion point to indicate we are in unreachable code.
2399 Builder.ClearInsertionPoint();
2400
2401 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2402 // then reuse last ContinueBlock.
2403 JumpDest OuterContinue;
2404 if (!BreakContinueStack.empty())
2405 OuterContinue = BreakContinueStack.back().ContinueBlock;
2406
2407 BreakContinueStack.push_back(BreakContinue(S, SwitchExit, OuterContinue));
2408
2409 // Emit switch body.
2410 EmitStmt(S.getBody());
2411
2412 BreakContinueStack.pop_back();
2413
2414 // Update the default block in case explicit case range tests have
2415 // been chained on top.
2416 SwitchInsn->setDefaultDest(CaseRangeBlock);
2417
2418 // If a default was never emitted:
2419 if (!DefaultBlock->getParent()) {
2420 // If we have cleanups, emit the default block so that there's a
2421 // place to jump through the cleanups from.
2422 if (ConditionScope.requiresCleanups()) {
2423 EmitBlock(DefaultBlock);
2424
2425 // Otherwise, just forward the default block to the switch end.
2426 } else {
2427 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2428 delete DefaultBlock;
2429 }
2430 }
2431
2432 ConditionScope.ForceCleanup();
2433
2434 // Emit continuation.
2435 EmitBlock(SwitchExit.getBlock(), true);
2437
2438 // If the switch has a condition wrapped by __builtin_unpredictable,
2439 // create metadata that specifies that the switch is unpredictable.
2440 // Don't bother if not optimizing because that metadata would not be used.
2441 auto *Call = dyn_cast<CallExpr>(S.getCond());
2442 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2443 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2444 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2445 llvm::MDBuilder MDHelper(getLLVMContext());
2446 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2447 MDHelper.createUnpredictable());
2448 }
2449 }
2450
2451 if (SwitchWeights) {
2452 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2453 "switch weights do not match switch cases");
2454 // If there's only one jump destination there's no sense weighting it.
2455 if (SwitchWeights->size() > 1)
2456 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2457 createProfileWeights(*SwitchWeights));
2458 delete SwitchWeights;
2459 } else if (SwitchLikelihood) {
2460 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2461 "switch likelihoods do not match switch cases");
2462 std::optional<SmallVector<uint64_t, 16>> LHW =
2463 getLikelihoodWeights(*SwitchLikelihood);
2464 if (LHW) {
2465 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2466 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2467 createProfileWeights(*LHW));
2468 }
2469 delete SwitchLikelihood;
2470 }
2471 SwitchInsn = SavedSwitchInsn;
2472 SwitchWeights = SavedSwitchWeights;
2473 SwitchLikelihood = SavedSwitchLikelihood;
2474 CaseRangeBlock = SavedCRBlock;
2475}
2476
2477/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2478/// as using a particular register add that as a constraint that will be used
2479/// in this asm stmt.
2480static std::string
2481AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2483 const AsmStmt &Stmt, const bool EarlyClobber,
2484 std::string *GCCReg = nullptr) {
2485 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2486 if (!AsmDeclRef)
2487 return Constraint;
2488 const ValueDecl &Value = *AsmDeclRef->getDecl();
2489 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2490 if (!Variable)
2491 return Constraint;
2492 if (Variable->getStorageClass() != SC_Register)
2493 return Constraint;
2494 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2495 if (!Attr)
2496 return Constraint;
2497 StringRef Register = Attr->getLabel();
2498 assert(Target.isValidGCCRegisterName(Register));
2499 // We're using validateOutputConstraint here because we only care if
2500 // this is a register constraint.
2501 TargetInfo::ConstraintInfo Info(Constraint, "");
2502 if (Target.validateOutputConstraint(Info) &&
2503 !Info.allowsRegister()) {
2504 CGM.ErrorUnsupported(&Stmt, "__asm__");
2505 return Constraint;
2506 }
2507 // Canonicalize the register here before returning it.
2508 Register = Target.getNormalizedGCCRegisterName(Register);
2509 if (GCCReg != nullptr)
2510 *GCCReg = Register.str();
2511 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2512}
2513
2514std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2515 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2516 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2517 if (Info.allowsRegister() || !Info.allowsMemory()) {
2519 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2520
2521 llvm::Type *Ty = ConvertType(InputType);
2522 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2523 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2524 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2525 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2526
2527 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2528 nullptr};
2529 }
2530 }
2531
2532 Address Addr = InputValue.getAddress();
2533 ConstraintStr += '*';
2534 return {InputValue.getPointer(*this), Addr.getElementType()};
2535}
2536
2537std::pair<llvm::Value *, llvm::Type *>
2538CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2539 const Expr *InputExpr,
2540 std::string &ConstraintStr) {
2541 // If this can't be a register or memory, i.e., has to be a constant
2542 // (immediate or symbolic), try to emit it as such.
2543 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2544 if (Info.requiresImmediateConstant()) {
2545 Expr::EvalResult EVResult;
2546 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2547
2548 llvm::APSInt IntResult;
2549 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2550 getContext()))
2551 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2552 }
2553
2554 Expr::EvalResult Result;
2555 if (InputExpr->EvaluateAsInt(Result, getContext()))
2556 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2557 nullptr};
2558 }
2559
2560 if (Info.allowsRegister() || !Info.allowsMemory())
2562 return {EmitScalarExpr(InputExpr), nullptr};
2563 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2564 return {EmitScalarExpr(InputExpr), nullptr};
2565 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2566 LValue Dest = EmitLValue(InputExpr);
2567 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2568 InputExpr->getExprLoc());
2569}
2570
2571/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2572/// asm call instruction. The !srcloc MDNode contains a list of constant
2573/// integers which are the source locations of the start of each line in the
2574/// asm.
2575static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2576 CodeGenFunction &CGF) {
2578 // Add the location of the first line to the MDNode.
2579 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2580 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2581 StringRef StrVal = Str->getString();
2582 if (!StrVal.empty()) {
2584 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2585 unsigned StartToken = 0;
2586 unsigned ByteOffset = 0;
2587
2588 // Add the location of the start of each subsequent line of the asm to the
2589 // MDNode.
2590 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2591 if (StrVal[i] != '\n') continue;
2592 SourceLocation LineLoc = Str->getLocationOfByte(
2593 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2594 Locs.push_back(llvm::ConstantAsMetadata::get(
2595 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2596 }
2597 }
2598
2599 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2600}
2601
2602static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2603 bool HasUnwindClobber, bool ReadOnly,
2604 bool ReadNone, bool NoMerge, bool NoConvergent,
2605 const AsmStmt &S,
2606 const std::vector<llvm::Type *> &ResultRegTypes,
2607 const std::vector<llvm::Type *> &ArgElemTypes,
2608 CodeGenFunction &CGF,
2609 std::vector<llvm::Value *> &RegResults) {
2610 if (!HasUnwindClobber)
2611 Result.addFnAttr(llvm::Attribute::NoUnwind);
2612
2613 if (NoMerge)
2614 Result.addFnAttr(llvm::Attribute::NoMerge);
2615 // Attach readnone and readonly attributes.
2616 if (!HasSideEffect) {
2617 if (ReadNone)
2618 Result.setDoesNotAccessMemory();
2619 else if (ReadOnly)
2620 Result.setOnlyReadsMemory();
2621 }
2622
2623 // Add elementtype attribute for indirect constraints.
2624 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2625 if (Pair.value()) {
2626 auto Attr = llvm::Attribute::get(
2627 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2628 Result.addParamAttr(Pair.index(), Attr);
2629 }
2630 }
2631
2632 // Slap the source location of the inline asm into a !srcloc metadata on the
2633 // call.
2634 const StringLiteral *SL;
2635 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S);
2636 gccAsmStmt &&
2637 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) {
2638 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF));
2639 } else {
2640 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2641 // strings.
2642 llvm::Constant *Loc =
2643 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2644 Result.setMetadata("srcloc",
2645 llvm::MDNode::get(CGF.getLLVMContext(),
2646 llvm::ConstantAsMetadata::get(Loc)));
2647 }
2648
2649 // Make inline-asm calls Key for the debug info feature Key Instructions.
2650 CGF.addInstToNewSourceAtom(&Result, nullptr);
2651
2652 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2653 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2654 // convergent (meaning, they may call an intrinsically convergent op, such
2655 // as bar.sync, and so can't have certain optimizations applied around
2656 // them) unless it's explicitly marked 'noconvergent'.
2657 Result.addFnAttr(llvm::Attribute::Convergent);
2658 // Extract all of the register value results from the asm.
2659 if (ResultRegTypes.size() == 1) {
2660 RegResults.push_back(&Result);
2661 } else {
2662 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2663 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2664 RegResults.push_back(Tmp);
2665 }
2666 }
2667}
2668
2669static void
2671 const llvm::ArrayRef<llvm::Value *> RegResults,
2672 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2673 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2674 const llvm::ArrayRef<LValue> ResultRegDests,
2675 const llvm::ArrayRef<QualType> ResultRegQualTys,
2676 const llvm::BitVector &ResultTypeRequiresCast,
2677 const std::vector<std::optional<std::pair<unsigned, unsigned>>>
2678 &ResultBounds) {
2679 CGBuilderTy &Builder = CGF.Builder;
2680 CodeGenModule &CGM = CGF.CGM;
2681 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2682
2683 assert(RegResults.size() == ResultRegTypes.size());
2684 assert(RegResults.size() == ResultTruncRegTypes.size());
2685 assert(RegResults.size() == ResultRegDests.size());
2686 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2687 // in which case its size may grow.
2688 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2689 assert(ResultBounds.size() <= ResultRegDests.size());
2690
2691 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2692 llvm::Value *Tmp = RegResults[i];
2693 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2694
2695 if ((i < ResultBounds.size()) && ResultBounds[i].has_value()) {
2696 const auto [LowerBound, UpperBound] = ResultBounds[i].value();
2697 // FIXME: Support for nonzero lower bounds not yet implemented.
2698 assert(LowerBound == 0 && "Output operand lower bound is not zero.");
2699 llvm::Constant *UpperBoundConst =
2700 llvm::ConstantInt::get(Tmp->getType(), UpperBound);
2701 llvm::Value *IsBooleanValue =
2702 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, UpperBoundConst);
2703 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2704 Builder.CreateCall(FnAssume, IsBooleanValue);
2705 }
2706
2707 // If the result type of the LLVM IR asm doesn't match the result type of
2708 // the expression, do the conversion.
2709 if (ResultRegTypes[i] != TruncTy) {
2710
2711 // Truncate the integer result to the right size, note that TruncTy can be
2712 // a pointer.
2713 if (TruncTy->isFloatingPointTy())
2714 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2715 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2716 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2717 Tmp = Builder.CreateTrunc(
2718 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2719 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2720 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2721 uint64_t TmpSize =
2722 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2723 Tmp = Builder.CreatePtrToInt(
2724 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2725 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2726 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2727 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2728 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2729 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2730 }
2731 }
2732
2733 ApplyAtomGroup Grp(CGF.getDebugInfo());
2734 LValue Dest = ResultRegDests[i];
2735 // ResultTypeRequiresCast elements correspond to the first
2736 // ResultTypeRequiresCast.size() elements of RegResults.
2737 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2738 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2739 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2740 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2741 llvm::StoreInst *S = Builder.CreateStore(Tmp, A);
2742 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand());
2743 continue;
2744 }
2745
2746 QualType Ty =
2747 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2748 if (Ty.isNull()) {
2749 const Expr *OutExpr = S.getOutputExpr(i);
2750 CGM.getDiags().Report(OutExpr->getExprLoc(),
2751 diag::err_store_value_to_reg);
2752 return;
2753 }
2754 Dest = CGF.MakeAddrLValue(A, Ty);
2755 }
2756 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2757 }
2758}
2759
2761 const AsmStmt &S) {
2762 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2763
2764 std::string Asm;
2765 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2766 Asm = GCCAsm->getAsmString();
2767
2768 auto &Ctx = CGF->CGM.getLLVMContext();
2769
2770 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2771 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2772 {StrTy->getType()}, false);
2773 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2774
2775 CGF->Builder.CreateCall(UBF, {StrTy});
2776}
2777
2779 // Pop all cleanup blocks at the end of the asm statement.
2780 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2781
2782 // Assemble the final asm string.
2783 std::string AsmString = S.generateAsmString(getContext());
2784
2785 // Get all the output and input constraints together.
2786 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2787 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2788
2789 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2790 bool IsValidTargetAsm = true;
2791 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2792 StringRef Name;
2793 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2794 Name = GAS->getOutputName(i);
2796 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2797 if (IsHipStdPar && !IsValid)
2798 IsValidTargetAsm = false;
2799 else
2800 assert(IsValid && "Failed to parse output constraint");
2801 OutputConstraintInfos.push_back(Info);
2802 }
2803
2804 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2805 StringRef Name;
2806 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2807 Name = GAS->getInputName(i);
2809 bool IsValid =
2810 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2811 if (IsHipStdPar && !IsValid)
2812 IsValidTargetAsm = false;
2813 else
2814 assert(IsValid && "Failed to parse input constraint");
2815 InputConstraintInfos.push_back(Info);
2816 }
2817
2818 if (!IsValidTargetAsm)
2819 return EmitHipStdParUnsupportedAsm(this, S);
2820
2821 std::string Constraints;
2822
2823 std::vector<LValue> ResultRegDests;
2824 std::vector<QualType> ResultRegQualTys;
2825 std::vector<llvm::Type *> ResultRegTypes;
2826 std::vector<llvm::Type *> ResultTruncRegTypes;
2827 std::vector<llvm::Type *> ArgTypes;
2828 std::vector<llvm::Type *> ArgElemTypes;
2829 std::vector<llvm::Value*> Args;
2830 llvm::BitVector ResultTypeRequiresCast;
2831 std::vector<std::optional<std::pair<unsigned, unsigned>>> ResultBounds;
2832
2833 // Keep track of inout constraints.
2834 std::string InOutConstraints;
2835 std::vector<llvm::Value*> InOutArgs;
2836 std::vector<llvm::Type*> InOutArgTypes;
2837 std::vector<llvm::Type*> InOutArgElemTypes;
2838
2839 // Keep track of out constraints for tied input operand.
2840 std::vector<std::string> OutputConstraints;
2841
2842 // Keep track of defined physregs.
2843 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2844
2845 // An inline asm can be marked readonly if it meets the following conditions:
2846 // - it doesn't have any sideeffects
2847 // - it doesn't clobber memory
2848 // - it doesn't return a value by-reference
2849 // It can be marked readnone if it doesn't have any input memory constraints
2850 // in addition to meeting the conditions listed above.
2851 bool ReadOnly = true, ReadNone = true;
2852
2853 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2854 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2855
2856 // Simplify the output constraint.
2857 std::string OutputConstraint(S.getOutputConstraint(i));
2858 OutputConstraint = getTarget().simplifyConstraint(
2859 StringRef(OutputConstraint).substr(1), &OutputConstraintInfos);
2860
2861 const Expr *OutExpr = S.getOutputExpr(i);
2862 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2863
2864 std::string GCCReg;
2865 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2866 getTarget(), CGM, S,
2867 Info.earlyClobber(),
2868 &GCCReg);
2869 // Give an error on multiple outputs to same physreg.
2870 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2871 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2872
2873 OutputConstraints.push_back(OutputConstraint);
2874 LValue Dest = EmitLValue(OutExpr);
2875 if (!Constraints.empty())
2876 Constraints += ',';
2877
2878 // If this is a register output, then make the inline asm return it
2879 // by-value. If this is a memory result, return the value by-reference.
2880 QualType QTy = OutExpr->getType();
2881 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2883 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2884
2885 Constraints += "=" + OutputConstraint;
2886 ResultRegQualTys.push_back(QTy);
2887 ResultRegDests.push_back(Dest);
2888
2889 ResultBounds.emplace_back(Info.getOutputOperandBounds());
2890
2891 llvm::Type *Ty = ConvertTypeForMem(QTy);
2892 const bool RequiresCast = Info.allowsRegister() &&
2894 Ty->isAggregateType());
2895
2896 ResultTruncRegTypes.push_back(Ty);
2897 ResultTypeRequiresCast.push_back(RequiresCast);
2898
2899 if (RequiresCast) {
2900 unsigned Size = getContext().getTypeSize(QTy);
2901 if (Size)
2902 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2903 else
2904 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2905 }
2906 ResultRegTypes.push_back(Ty);
2907 // If this output is tied to an input, and if the input is larger, then
2908 // we need to set the actual result type of the inline asm node to be the
2909 // same as the input type.
2910 if (Info.hasMatchingInput()) {
2911 unsigned InputNo;
2912 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2913 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2914 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2915 break;
2916 }
2917 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2918
2919 QualType InputTy = S.getInputExpr(InputNo)->getType();
2920 QualType OutputType = OutExpr->getType();
2921
2922 uint64_t InputSize = getContext().getTypeSize(InputTy);
2923 if (getContext().getTypeSize(OutputType) < InputSize) {
2924 // Form the asm to return the value as a larger integer or fp type.
2925 ResultRegTypes.back() = ConvertType(InputTy);
2926 }
2927 }
2928 if (llvm::Type* AdjTy =
2929 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2930 ResultRegTypes.back()))
2931 ResultRegTypes.back() = AdjTy;
2932 else {
2933 CGM.getDiags().Report(S.getAsmLoc(),
2934 diag::err_asm_invalid_type_in_input)
2935 << OutExpr->getType() << OutputConstraint;
2936 }
2937
2938 // Update largest vector width for any vector types.
2939 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2940 LargestVectorWidth =
2941 std::max((uint64_t)LargestVectorWidth,
2942 VT->getPrimitiveSizeInBits().getKnownMinValue());
2943 } else {
2944 Address DestAddr = Dest.getAddress();
2945 // Matrix types in memory are represented by arrays, but accessed through
2946 // vector pointers, with the alignment specified on the access operation.
2947 // For inline assembly, update pointer arguments to use vector pointers.
2948 // Otherwise there will be a mis-match if the matrix is also an
2949 // input-argument which is represented as vector.
2950 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2951 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2952
2953 ArgTypes.push_back(DestAddr.getType());
2954 ArgElemTypes.push_back(DestAddr.getElementType());
2955 Args.push_back(DestAddr.emitRawPointer(*this));
2956 Constraints += "=*";
2957 Constraints += OutputConstraint;
2958 ReadOnly = ReadNone = false;
2959 }
2960
2961 if (Info.isReadWrite()) {
2962 InOutConstraints += ',';
2963
2964 const Expr *InputExpr = S.getOutputExpr(i);
2965 llvm::Value *Arg;
2966 llvm::Type *ArgElemType;
2967 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2968 Info, Dest, InputExpr->getType(), InOutConstraints,
2969 InputExpr->getExprLoc());
2970
2971 if (llvm::Type* AdjTy =
2972 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2973 Arg->getType()))
2974 Arg = Builder.CreateBitCast(Arg, AdjTy);
2975
2976 // Update largest vector width for any vector types.
2977 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2978 LargestVectorWidth =
2979 std::max((uint64_t)LargestVectorWidth,
2980 VT->getPrimitiveSizeInBits().getKnownMinValue());
2981 // Only tie earlyclobber physregs.
2982 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2983 InOutConstraints += llvm::utostr(i);
2984 else
2985 InOutConstraints += OutputConstraint;
2986
2987 InOutArgTypes.push_back(Arg->getType());
2988 InOutArgElemTypes.push_back(ArgElemType);
2989 InOutArgs.push_back(Arg);
2990 }
2991 }
2992
2993 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2994 // to the return value slot. Only do this when returning in registers.
2995 if (isa<MSAsmStmt>(&S)) {
2996 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2997 if (RetAI.isDirect() || RetAI.isExtend()) {
2998 // Make a fake lvalue for the return value slot.
3000 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
3001 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3002 ResultRegDests, AsmString, S.getNumOutputs());
3003 SawAsmBlock = true;
3004 }
3005 }
3006
3007 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3008 const Expr *InputExpr = S.getInputExpr(i);
3009
3010 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3011
3012 if (Info.allowsMemory())
3013 ReadNone = false;
3014
3015 if (!Constraints.empty())
3016 Constraints += ',';
3017
3018 // Simplify the input constraint.
3019 std::string InputConstraint(S.getInputConstraint(i));
3020 InputConstraint =
3021 getTarget().simplifyConstraint(InputConstraint, &OutputConstraintInfos);
3022
3023 InputConstraint = AddVariableConstraints(
3024 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
3025 getTarget(), CGM, S, false /* No EarlyClobber */);
3026
3027 std::string ReplaceConstraint (InputConstraint);
3028 llvm::Value *Arg;
3029 llvm::Type *ArgElemType;
3030 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
3031
3032 // If this input argument is tied to a larger output result, extend the
3033 // input to be the same size as the output. The LLVM backend wants to see
3034 // the input and output of a matching constraint be the same size. Note
3035 // that GCC does not define what the top bits are here. We use zext because
3036 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3037 if (Info.hasTiedOperand()) {
3038 unsigned Output = Info.getTiedOperand();
3039 QualType OutputType = S.getOutputExpr(Output)->getType();
3040 QualType InputTy = InputExpr->getType();
3041
3042 if (getContext().getTypeSize(OutputType) >
3043 getContext().getTypeSize(InputTy)) {
3044 // Use ptrtoint as appropriate so that we can do our extension.
3045 if (isa<llvm::PointerType>(Arg->getType()))
3046 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
3047 llvm::Type *OutputTy = ConvertType(OutputType);
3048 if (isa<llvm::IntegerType>(OutputTy))
3049 Arg = Builder.CreateZExt(Arg, OutputTy);
3050 else if (isa<llvm::PointerType>(OutputTy))
3051 Arg = Builder.CreateZExt(Arg, IntPtrTy);
3052 else if (OutputTy->isFloatingPointTy())
3053 Arg = Builder.CreateFPExt(Arg, OutputTy);
3054 }
3055 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3056 ReplaceConstraint = OutputConstraints[Output];
3057 }
3058 if (llvm::Type* AdjTy =
3059 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
3060 Arg->getType()))
3061 Arg = Builder.CreateBitCast(Arg, AdjTy);
3062 else
3063 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
3064 << InputExpr->getType() << InputConstraint;
3065
3066 // Update largest vector width for any vector types.
3067 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3068 LargestVectorWidth =
3069 std::max((uint64_t)LargestVectorWidth,
3070 VT->getPrimitiveSizeInBits().getKnownMinValue());
3071
3072 ArgTypes.push_back(Arg->getType());
3073 ArgElemTypes.push_back(ArgElemType);
3074 Args.push_back(Arg);
3075 Constraints += InputConstraint;
3076 }
3077
3078 // Append the "input" part of inout constraints.
3079 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3080 ArgTypes.push_back(InOutArgTypes[i]);
3081 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3082 Args.push_back(InOutArgs[i]);
3083 }
3084 Constraints += InOutConstraints;
3085
3086 // Labels
3088 llvm::BasicBlock *Fallthrough = nullptr;
3089 bool IsGCCAsmGoto = false;
3090 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3091 IsGCCAsmGoto = GS->isAsmGoto();
3092 if (IsGCCAsmGoto) {
3093 for (const auto *E : GS->labels()) {
3094 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3095 Transfer.push_back(Dest.getBlock());
3096 if (!Constraints.empty())
3097 Constraints += ',';
3098 Constraints += "!i";
3099 }
3100 Fallthrough = createBasicBlock("asm.fallthrough");
3101 }
3102 }
3103
3104 bool HasUnwindClobber = false;
3105
3106 // Clobbers
3107 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3108 std::string Clobber = S.getClobber(i);
3109
3110 if (Clobber == "memory")
3111 ReadOnly = ReadNone = false;
3112 else if (Clobber == "unwind") {
3113 HasUnwindClobber = true;
3114 continue;
3115 } else if (Clobber != "cc") {
3116 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3117 if (CGM.getCodeGenOpts().StackClashProtector &&
3118 getTarget().isSPRegName(Clobber)) {
3119 CGM.getDiags().Report(S.getAsmLoc(),
3120 diag::warn_stack_clash_protection_inline_asm);
3121 }
3122 }
3123
3124 if (isa<MSAsmStmt>(&S)) {
3125 if (Clobber == "eax" || Clobber == "edx") {
3126 if (Constraints.find("=&A") != std::string::npos)
3127 continue;
3128 std::string::size_type position1 =
3129 Constraints.find("={" + Clobber + "}");
3130 if (position1 != std::string::npos) {
3131 Constraints.insert(position1 + 1, "&");
3132 continue;
3133 }
3134 std::string::size_type position2 = Constraints.find("=A");
3135 if (position2 != std::string::npos) {
3136 Constraints.insert(position2 + 1, "&");
3137 continue;
3138 }
3139 }
3140 }
3141 if (!Constraints.empty())
3142 Constraints += ',';
3143
3144 Constraints += "~{";
3145 Constraints += Clobber;
3146 Constraints += '}';
3147 }
3148
3149 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3150 "unwind clobber can't be used with asm goto");
3151
3152 // Add machine specific clobbers
3153 std::string_view MachineClobbers = getTarget().getClobbers();
3154 if (!MachineClobbers.empty()) {
3155 if (!Constraints.empty())
3156 Constraints += ',';
3157 Constraints += MachineClobbers;
3158 }
3159
3160 llvm::Type *ResultType;
3161 if (ResultRegTypes.empty())
3162 ResultType = VoidTy;
3163 else if (ResultRegTypes.size() == 1)
3164 ResultType = ResultRegTypes[0];
3165 else
3166 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3167
3168 llvm::FunctionType *FTy =
3169 llvm::FunctionType::get(ResultType, ArgTypes, false);
3170
3171 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3172
3173 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3174 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3175 ? llvm::InlineAsm::AD_ATT
3176 : llvm::InlineAsm::AD_Intel;
3177 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3178 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3179
3180 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3181 FTy, AsmString, Constraints, HasSideEffect,
3182 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3183 std::vector<llvm::Value*> RegResults;
3184 llvm::CallBrInst *CBR;
3185 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3186 CBRRegResults;
3187 if (IsGCCAsmGoto) {
3188 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3189 EmitBlock(Fallthrough);
3190 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3191 ReadNone, InNoMergeAttributedStmt,
3192 InNoConvergentAttributedStmt, S, ResultRegTypes,
3193 ArgElemTypes, *this, RegResults);
3194 // Because we are emitting code top to bottom, we don't have enough
3195 // information at this point to know precisely whether we have a critical
3196 // edge. If we have outputs, split all indirect destinations.
3197 if (!RegResults.empty()) {
3198 unsigned i = 0;
3199 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3200 llvm::Twine SynthName = Dest->getName() + ".split";
3201 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3202 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3203 Builder.SetInsertPoint(SynthBB);
3204
3205 if (ResultRegTypes.size() == 1) {
3206 CBRRegResults[SynthBB].push_back(CBR);
3207 } else {
3208 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3209 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3210 CBRRegResults[SynthBB].push_back(Tmp);
3211 }
3212 }
3213
3214 EmitBranch(Dest);
3215 EmitBlock(SynthBB);
3216 CBR->setIndirectDest(i++, SynthBB);
3217 }
3218 }
3219 } else if (HasUnwindClobber) {
3220 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3221 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3222 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3223 InNoConvergentAttributedStmt, S, ResultRegTypes,
3224 ArgElemTypes, *this, RegResults);
3225 } else {
3226 llvm::CallInst *Result =
3227 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3228 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3229 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3230 InNoConvergentAttributedStmt, S, ResultRegTypes,
3231 ArgElemTypes, *this, RegResults);
3232 }
3233
3234 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3235 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3236 ResultBounds);
3237
3238 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3239 // different insertion point; one for each indirect destination and with
3240 // CBRRegResults rather than RegResults.
3241 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3242 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3243 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3244 Builder.SetInsertPoint(Succ, --(Succ->end()));
3245 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3246 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3247 ResultTypeRequiresCast, ResultBounds);
3248 }
3249 }
3250}
3251
3253 const RecordDecl *RD = S.getCapturedRecordDecl();
3255
3256 // Initialize the captured struct.
3257 LValue SlotLV =
3258 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3259
3260 RecordDecl::field_iterator CurField = RD->field_begin();
3262 E = S.capture_init_end();
3263 I != E; ++I, ++CurField) {
3264 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3265 if (CurField->hasCapturedVLAType()) {
3266 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3267 } else {
3268 EmitInitializerForField(*CurField, LV, *I);
3269 }
3270 }
3271
3272 return SlotLV;
3273}
3274
3275/// Generate an outlined function for the body of a CapturedStmt, store any
3276/// captured variables into the captured struct, and call the outlined function.
3277llvm::Function *
3279 LValue CapStruct = InitCapturedStruct(S);
3280
3281 // Emit the CapturedDecl
3282 CodeGenFunction CGF(CGM, true);
3283 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3284 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3285 delete CGF.CapturedStmtInfo;
3286
3287 // Emit call to the helper function.
3288 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3289
3290 return F;
3291}
3292
3294 LValue CapStruct = InitCapturedStruct(S);
3295 return CapStruct.getAddress();
3296}
3297
3298/// Creates the outlined function for a CapturedStmt.
3299llvm::Function *
3301 assert(CapturedStmtInfo &&
3302 "CapturedStmtInfo should be set when generating the captured function");
3303 const CapturedDecl *CD = S.getCapturedDecl();
3304 const RecordDecl *RD = S.getCapturedRecordDecl();
3305 SourceLocation Loc = S.getBeginLoc();
3306 assert(CD->hasBody() && "missing CapturedDecl body");
3307
3308 // Build the argument list.
3309 ASTContext &Ctx = CGM.getContext();
3310 FunctionArgList Args;
3311 Args.append(CD->param_begin(), CD->param_end());
3312
3313 // Create the function declaration.
3314 const CGFunctionInfo &FuncInfo =
3315 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
3316 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3317
3318 llvm::Function *F =
3319 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3320 CapturedStmtInfo->getHelperName(), &CGM.getModule());
3321 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3322 if (CD->isNothrow())
3323 F->addFnAttr(llvm::Attribute::NoUnwind);
3324
3325 // Generate the function.
3326 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3327 CD->getBody()->getBeginLoc());
3328 // Set the context parameter in CapturedStmtInfo.
3329 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3330 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
3331
3332 // Initialize variable-length arrays.
3334 CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
3335 for (auto *FD : RD->fields()) {
3336 if (FD->hasCapturedVLAType()) {
3337 auto *ExprArg =
3339 .getScalarVal();
3340 auto VAT = FD->getCapturedVLAType();
3341 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3342 }
3343 }
3344
3345 // If 'this' is captured, load it into CXXThisValue.
3346 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3347 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3348 LValue ThisLValue = EmitLValueForField(Base, FD);
3349 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3350 }
3351
3352 PGO->assignRegionCounters(GlobalDecl(CD), F);
3353 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3355
3356 return F;
3357}
3358
3359// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3360// std::nullptr otherwise.
3361static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3362 for (auto &I : *BB) {
3363 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3364 return CI;
3365 }
3366 return nullptr;
3367}
3368
3369llvm::CallBase *
3370CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3371 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3372 assert(ParentToken);
3373
3374 llvm::Value *bundleArgs[] = {ParentToken};
3375 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3376 auto *Output = llvm::CallBase::addOperandBundle(
3377 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3378 Input->replaceAllUsesWith(Output);
3379 Input->eraseFromParent();
3380 return Output;
3381}
3382
3383llvm::ConvergenceControlInst *
3384CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3385 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3386 assert(ParentToken);
3387 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken);
3388}
3389
3390llvm::ConvergenceControlInst *
3391CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3392 llvm::BasicBlock *BB = &F->getEntryBlock();
3393 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3394 if (Token)
3395 return Token;
3396
3397 // Adding a convergence token requires the function to be marked as
3398 // convergent.
3399 F->setConvergent();
3400 return llvm::ConvergenceControlInst::CreateEntry(*BB);
3401}
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition CGStmt.cpp:2481
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition CGStmt.cpp:2185
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition CGStmt.cpp:3361
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition CGStmt.cpp:2760
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition CGStmt.cpp:2239
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition CGStmt.cpp:2575
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition CGStmt.cpp:1590
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition CGStmt.cpp:2030
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const std::vector< std::optional< std::pair< unsigned, unsigned > > > &ResultBounds)
Definition CGStmt.cpp:2670
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition CGStmt.cpp:1065
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition CGStmt.cpp:2029
@ CSFC_Failure
Definition CGStmt.cpp:2029
@ CSFC_Success
Definition CGStmt.cpp:2029
@ CSFC_FallThrough
Definition CGStmt.cpp:2029
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition CGStmt.cpp:2602
llvm::MachO::Target Target
Definition MachO.h:51
#define SM(sm)
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition APValue.cpp:963
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:833
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CanQualType VoidTy
CanQualType getCanonicalTagType(const TagDecl *TD) const
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3236
std::string getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition Stmt.cpp:477
bool isVolatile() const
Definition Stmt.h:3272
std::string getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition Stmt.cpp:461
SourceLocation getAsmLoc() const
Definition Stmt.h:3266
const Expr * getInputExpr(unsigned i) const
Definition Stmt.cpp:485
unsigned getNumClobbers() const
Definition Stmt.h:3317
const Expr * getOutputExpr(unsigned i) const
Definition Stmt.cpp:469
unsigned getNumOutputs() const
Definition Stmt.h:3285
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition Stmt.cpp:453
unsigned getNumInputs() const
Definition Stmt.h:3307
std::string getClobber(unsigned i) const
Definition Stmt.cpp:493
Attr - This represents one attribute.
Definition Attr.h:44
Represents an attribute applied to a statement.
Definition Stmt.h:2203
Stmt * getSubStmt()
Definition Stmt.h:2239
ArrayRef< const Attr * > getAttrs() const
Definition Stmt.h:2235
BreakStmt - This represents a break.
Definition Stmt.h:3135
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
DeclStmt * getBeginStmt()
Definition StmtCXX.h:163
DeclStmt * getLoopVarStmt()
Definition StmtCXX.h:169
DeclStmt * getEndStmt()
Definition StmtCXX.h:166
DeclStmt * getRangeStmt()
Definition StmtCXX.h:162
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getCallee()
Definition Expr.h:3024
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition Decl.h:4926
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition Decl.h:4984
bool isNothrow() const
Definition Decl.cpp:5621
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition Decl.h:5001
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition Decl.h:4999
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition Decl.cpp:5618
This captures a statement into a function.
Definition Stmt.h:3886
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition Stmt.cpp:1455
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition Stmt.h:4007
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition Stmt.h:4063
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:4081
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition Stmt.h:4073
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition Stmt.h:4050
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition Stmt.cpp:1470
CaseStmt - Represent a case statement.
Definition Stmt.h:1920
Stmt * getSubStmt()
Definition Stmt.h:2033
Expr * getLHS()
Definition Stmt.h:2003
Expr * getRHS()
Definition Stmt.h:2015
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:587
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
API for captured statement code generation.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition CGStmt.cpp:757
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1451
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition CGStmt.cpp:3252
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition CGCall.cpp:5087
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:709
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition CGObjC.cpp:2129
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3689
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition CGStmt.cpp:508
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:686
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition CGStmt.cpp:692
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition CGStmt.cpp:633
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
void EmitOMPScopeDirective(const OMPScopeDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5293
const TargetInfo & getTarget() const
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:581
void EmitGotoStmt(const GotoStmt &S)
Definition CGStmt.cpp:845
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:244
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2377
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1293
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition CGObjC.cpp:3688
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:225
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1078
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition CGStmt.cpp:1015
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5467
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition CGObjC.cpp:2125
const TargetCodeGenInfo & getTargetHooks() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition CGCall.cpp:5015
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition CGStmt.cpp:51
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition CGStmt.cpp:881
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2574
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition CGObjC.cpp:2133
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:569
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPFuseDirective(const OMPFuseDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition CGClass.cpp:682
void EmitAsmStmt(const AsmStmt &S)
Definition CGStmt.cpp:2778
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1984
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition CGStmt.cpp:2294
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:295
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:266
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:61
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition CGStmt.cpp:3278
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1869
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition CGStmt.cpp:1755
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1205
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition CGStmt.cpp:3293
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:675
void EmitOMPSimdDirective(const OMPSimdDirective &S)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:188
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
const BreakContinue * GetDestForLoopControlStmt(const LoopControlStmt &S)
Definition CGStmt.cpp:1741
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPStripeDirective(const OMPStripeDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition CGStmt.cpp:1784
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition CGStmt.cpp:1616
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition CGStmt.cpp:3300
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition CGStmt.cpp:1731
void EmitLabelStmt(const LabelStmt &S)
Definition CGStmt.cpp:778
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition CGDecl.cpp:52
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1668
void EmitAttributedStmt(const AttributedStmt &S)
Definition CGStmt.cpp:788
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition CGObjC.cpp:1804
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition CGStmt.cpp:857
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2074
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition CGStmt.cpp:720
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:655
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition CGStmt.cpp:1768
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
A saved depth on the scope stack.
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:361
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:98
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:78
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition TargetInfo.h:202
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1720
Stmt *const * const_body_iterator
Definition Stmt.h:1792
body_range body()
Definition Stmt.h:1783
SourceLocation getLBracLoc() const
Definition Stmt.h:1857
Stmt * getStmtExprResult()
Definition Stmt.h:1842
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1082
ContinueStmt - This represents a continue.
Definition Stmt.h:3119
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
ValueDecl * getDecl()
Definition Expr.h:1338
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1611
decl_range decls()
Definition Stmt.h:1659
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition DeclBase.h:1093
SourceLocation getLocation() const
Definition DeclBase.h:439
Stmt * getSubStmt()
Definition Stmt.h:2081
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2832
Stmt * getBody()
Definition Stmt.h:2857
Expr * getCond()
Definition Stmt.h:2850
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3112
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3665
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2888
Stmt * getInit()
Definition Stmt.h:2903
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition Stmt.cpp:1082
Stmt * getBody()
Definition Stmt.h:2932
Expr * getInc()
Definition Stmt.h:2931
Expr * getCond()
Definition Stmt.h:2930
const Expr * getSubExpr() const
Definition Expr.h:1062
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4450
CallingConv getCallConv() const
Definition TypeBase.h:4805
This represents a GCC inline-assembly statement extension.
Definition Stmt.h:3395
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2969
LabelDecl * getLabel() const
Definition Stmt.h:2982
IfStmt - This represents an if/then/else.
Definition Stmt.h:2259
Stmt * getThen()
Definition Stmt.h:2348
Stmt * getInit()
Definition Stmt.h:2409
Expr * getCond()
Definition Stmt.h:2336
bool isConstexpr() const
Definition Stmt.h:2452
bool isNegatedConsteval() const
Definition Stmt.h:2448
Stmt * getElse()
Definition Stmt.h:2357
bool isConsteval() const
Definition Stmt.h:2439
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition Stmt.cpp:1030
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3008
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition Stmt.cpp:1231
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt * getStmt() const
Definition Decl.h:548
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2146
LabelDecl * getDecl() const
Definition Stmt.h:2164
bool isSideEntry() const
Definition Stmt.h:2193
Stmt * getSubStmt()
Definition Stmt.h:2168
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
bool assumeFunctionsAreConvergent() const
Base class for BreakStmt and ContinueStmt.
Definition Stmt.h:3057
Represents a point when we exit a loop.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
QualType getCanonicalType() const
Definition TypeBase.h:8330
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Represents a struct/union/class.
Definition Decl.h:4312
field_range fields() const
Definition Decl.h:4515
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4512
field_iterator field_begin() const
Definition Decl.cpp:5202
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3160
SourceLocation getBeginLoc() const
Definition Stmt.h:3212
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization.
Definition Stmt.h:3196
Expr * getRetValue()
Definition Stmt.h:3187
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
@ NoStmtClass
Definition Stmt.h:88
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1415
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1416
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1417
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1419
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition Stmt.cpp:171
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition Stmt.cpp:163
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:1973
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition Expr.cpp:1322
StringRef getString() const
Definition Expr.h:1867
const SwitchCase * getNextSwitchCase() const
Definition Stmt.h:1893
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2509
Expr * getCond()
Definition Stmt.h:2572
Stmt * getBody()
Definition Stmt.h:2584
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition Stmt.cpp:1148
Stmt * getInit()
Definition Stmt.h:2589
SwitchCase * getSwitchCaseList()
Definition Stmt.h:2640
Exposes information about the current target.
Definition TargetInfo.h:226
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
std::string simplifyConstraint(StringRef Constraint, SmallVectorImpl< ConstraintInfo > *OutCons=nullptr) const
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
bool validateOutputConstraint(ConstraintInfo &Info) const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition TypeBase.h:8871
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO).
Definition Decl.h:1512
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2697
Expr * getCond()
Definition Stmt.h:2749
SourceLocation getWhileLoc() const
Definition Stmt.h:2802
SourceLocation getRParenLoc() const
Definition Stmt.h:2807
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition Stmt.cpp:1209
Stmt * getBody()
Definition Stmt.h:2761
Defines the clang::TargetInfo interface.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ CPlusPlus11
CapturedRegionKind
The different kinds of captured statement.
@ SC_Register
Definition Specifiers.h:257
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ CC_SwiftAsync
Definition Specifiers.h:294
U cast(CodeGen::Address addr)
Definition Address.h:327
@ None
The alignment was not explicit in code.
Definition ASTContext.h:178
ActionResult< Expr * > ExprResult
Definition Ownership.h:249
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
std::optional< std::pair< unsigned, unsigned > > getOutputOperandBounds() const
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.