clang 23.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
22#include "clang/AST/StmtSYCL.h"
29#include "llvm/ADT/ArrayRef.h"
30#include "llvm/ADT/DenseMap.h"
31#include "llvm/ADT/SmallSet.h"
32#include "llvm/ADT/StringExtras.h"
33#include "llvm/IR/Assumptions.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/InlineAsm.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/MDBuilder.h"
38#include "llvm/Support/SaveAndRestore.h"
39#include <optional>
40
41using namespace clang;
42using namespace CodeGen;
43
44//===----------------------------------------------------------------------===//
45// Statement Emission
46//===----------------------------------------------------------------------===//
47
49 if (CGDebugInfo *DI = getDebugInfo()) {
51 Loc = S->getBeginLoc();
52 DI->EmitLocation(Builder, Loc);
53
54 LastStopPoint = Loc;
55 }
56}
57
59 assert(S && "Null statement?");
60 PGO->setCurrentStmt(S);
61
62 // These statements have their own debug info handling.
63 if (EmitSimpleStmt(S, Attrs))
64 return;
65
66 // Check if we are generating unreachable code.
67 if (!HaveInsertPoint()) {
68 // If so, and the statement doesn't contain a label, then we do not need to
69 // generate actual code. This is safe because (1) the current point is
70 // unreachable, so we don't need to execute the code, and (2) we've already
71 // handled the statements which update internal data structures (like the
72 // local variable map) which could be used by subsequent statements.
73 if (!ContainsLabel(S)) {
74 // Verify that any decl statements were handled as simple, they may be in
75 // scope of subsequent reachable statements.
76 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
77 PGO->markStmtMaybeUsed(S);
78 return;
79 }
80
81 // Otherwise, make a new block to hold the code.
83 }
84
85 // Generate a stoppoint if we are emitting debug info.
87
88 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
89 // enabled.
90 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
91 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
93 return;
94 }
95 }
96
97 switch (S->getStmtClass()) {
99 case Stmt::CXXCatchStmtClass:
100 case Stmt::SEHExceptStmtClass:
101 case Stmt::SEHFinallyStmtClass:
102 case Stmt::MSDependentExistsStmtClass:
103 case Stmt::UnresolvedSYCLKernelCallStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::DeferStmtClass:
116 case Stmt::SEHLeaveStmtClass:
117 case Stmt::SYCLKernelCallStmtClass:
118 llvm_unreachable("should have emitted these statements as simple");
119
120#define STMT(Type, Base)
121#define ABSTRACT_STMT(Op)
122#define EXPR(Type, Base) \
123 case Stmt::Type##Class:
124#include "clang/AST/StmtNodes.inc"
125 {
126 // Remember the block we came in on.
127 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
128 assert(incoming && "expression emission must have an insertion point");
129
131
132 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
133 assert(outgoing && "expression emission cleared block!");
134
135 // The expression emitters assume (reasonably!) that the insertion
136 // point is always set. To maintain that, the call-emission code
137 // for noreturn functions has to enter a new block with no
138 // predecessors. We want to kill that block and mark the current
139 // insertion point unreachable in the common case of a call like
140 // "exit();". Since expression emission doesn't otherwise create
141 // blocks with no predecessors, we can just test for that.
142 // However, we must be careful not to do this to our incoming
143 // block, because *statement* emission does sometimes create
144 // reachable blocks which will have no predecessors until later in
145 // the function. This occurs with, e.g., labels that are not
146 // reachable by fallthrough.
147 if (incoming != outgoing && outgoing->use_empty()) {
148 outgoing->eraseFromParent();
149 Builder.ClearInsertionPoint();
150 }
151 break;
152 }
153
154 case Stmt::IndirectGotoStmtClass:
156
157 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
158 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
159 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
160 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
161
162 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
163
164 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
165 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
166 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
167 case Stmt::CoroutineBodyStmtClass:
169 break;
170 case Stmt::CoreturnStmtClass:
172 break;
173 case Stmt::CapturedStmtClass: {
174 const CapturedStmt *CS = cast<CapturedStmt>(S);
176 }
177 break;
178 case Stmt::ObjCAtTryStmtClass:
180 break;
181 case Stmt::ObjCAtCatchStmtClass:
182 llvm_unreachable(
183 "@catch statements should be handled by EmitObjCAtTryStmt");
184 case Stmt::ObjCAtFinallyStmtClass:
185 llvm_unreachable(
186 "@finally statements should be handled by EmitObjCAtTryStmt");
187 case Stmt::ObjCAtThrowStmtClass:
189 break;
190 case Stmt::ObjCAtSynchronizedStmtClass:
192 break;
193 case Stmt::ObjCForCollectionStmtClass:
195 break;
196 case Stmt::ObjCAutoreleasePoolStmtClass:
198 break;
199
200 case Stmt::CXXTryStmtClass:
202 break;
203 case Stmt::CXXForRangeStmtClass:
205 break;
206 case Stmt::SEHTryStmtClass:
208 break;
209 case Stmt::OMPMetaDirectiveClass:
211 break;
212 case Stmt::OMPCanonicalLoopClass:
214 break;
215 case Stmt::OMPParallelDirectiveClass:
217 break;
218 case Stmt::OMPSimdDirectiveClass:
220 break;
221 case Stmt::OMPTileDirectiveClass:
223 break;
224 case Stmt::OMPStripeDirectiveClass:
226 break;
227 case Stmt::OMPUnrollDirectiveClass:
229 break;
230 case Stmt::OMPReverseDirectiveClass:
232 break;
233 case Stmt::OMPInterchangeDirectiveClass:
235 break;
236 case Stmt::OMPFuseDirectiveClass:
238 break;
239 case Stmt::OMPForDirectiveClass:
241 break;
242 case Stmt::OMPForSimdDirectiveClass:
244 break;
245 case Stmt::OMPSectionsDirectiveClass:
247 break;
248 case Stmt::OMPSectionDirectiveClass:
250 break;
251 case Stmt::OMPSingleDirectiveClass:
253 break;
254 case Stmt::OMPMasterDirectiveClass:
256 break;
257 case Stmt::OMPCriticalDirectiveClass:
259 break;
260 case Stmt::OMPParallelForDirectiveClass:
262 break;
263 case Stmt::OMPParallelForSimdDirectiveClass:
265 break;
266 case Stmt::OMPParallelMasterDirectiveClass:
268 break;
269 case Stmt::OMPParallelSectionsDirectiveClass:
271 break;
272 case Stmt::OMPTaskDirectiveClass:
274 break;
275 case Stmt::OMPTaskyieldDirectiveClass:
277 break;
278 case Stmt::OMPErrorDirectiveClass:
280 break;
281 case Stmt::OMPBarrierDirectiveClass:
283 break;
284 case Stmt::OMPTaskwaitDirectiveClass:
286 break;
287 case Stmt::OMPTaskgroupDirectiveClass:
289 break;
290 case Stmt::OMPFlushDirectiveClass:
292 break;
293 case Stmt::OMPDepobjDirectiveClass:
295 break;
296 case Stmt::OMPScanDirectiveClass:
298 break;
299 case Stmt::OMPOrderedDirectiveClass:
301 break;
302 case Stmt::OMPAtomicDirectiveClass:
304 break;
305 case Stmt::OMPTargetDirectiveClass:
307 break;
308 case Stmt::OMPTeamsDirectiveClass:
310 break;
311 case Stmt::OMPCancellationPointDirectiveClass:
313 break;
314 case Stmt::OMPCancelDirectiveClass:
316 break;
317 case Stmt::OMPTargetDataDirectiveClass:
319 break;
320 case Stmt::OMPTargetEnterDataDirectiveClass:
322 break;
323 case Stmt::OMPTargetExitDataDirectiveClass:
325 break;
326 case Stmt::OMPTargetParallelDirectiveClass:
328 break;
329 case Stmt::OMPTargetParallelForDirectiveClass:
331 break;
332 case Stmt::OMPTaskLoopDirectiveClass:
334 break;
335 case Stmt::OMPTaskLoopSimdDirectiveClass:
337 break;
338 case Stmt::OMPMasterTaskLoopDirectiveClass:
340 break;
341 case Stmt::OMPMaskedTaskLoopDirectiveClass:
343 break;
344 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
347 break;
348 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
351 break;
352 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
355 break;
356 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
359 break;
360 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
363 break;
364 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
367 break;
368 case Stmt::OMPDistributeDirectiveClass:
370 break;
371 case Stmt::OMPTargetUpdateDirectiveClass:
373 break;
374 case Stmt::OMPDistributeParallelForDirectiveClass:
377 break;
378 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
381 break;
382 case Stmt::OMPDistributeSimdDirectiveClass:
384 break;
385 case Stmt::OMPTargetParallelForSimdDirectiveClass:
388 break;
389 case Stmt::OMPTargetSimdDirectiveClass:
391 break;
392 case Stmt::OMPTeamsDistributeDirectiveClass:
394 break;
395 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
398 break;
399 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
402 break;
403 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
406 break;
407 case Stmt::OMPTargetTeamsDirectiveClass:
409 break;
410 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
413 break;
414 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
417 break;
418 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
421 break;
422 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
425 break;
426 case Stmt::OMPInteropDirectiveClass:
428 break;
429 case Stmt::OMPDispatchDirectiveClass:
430 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
431 break;
432 case Stmt::OMPScopeDirectiveClass:
434 break;
435 case Stmt::OMPMaskedDirectiveClass:
437 break;
438 case Stmt::OMPGenericLoopDirectiveClass:
440 break;
441 case Stmt::OMPTeamsGenericLoopDirectiveClass:
443 break;
444 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
447 break;
448 case Stmt::OMPParallelGenericLoopDirectiveClass:
451 break;
452 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
455 break;
456 case Stmt::OMPParallelMaskedDirectiveClass:
458 break;
459 case Stmt::OMPAssumeDirectiveClass:
461 break;
462 case Stmt::OpenACCComputeConstructClass:
464 break;
465 case Stmt::OpenACCLoopConstructClass:
467 break;
468 case Stmt::OpenACCCombinedConstructClass:
470 break;
471 case Stmt::OpenACCDataConstructClass:
473 break;
474 case Stmt::OpenACCEnterDataConstructClass:
476 break;
477 case Stmt::OpenACCExitDataConstructClass:
479 break;
480 case Stmt::OpenACCHostDataConstructClass:
482 break;
483 case Stmt::OpenACCWaitConstructClass:
485 break;
486 case Stmt::OpenACCInitConstructClass:
488 break;
489 case Stmt::OpenACCShutdownConstructClass:
491 break;
492 case Stmt::OpenACCSetConstructClass:
494 break;
495 case Stmt::OpenACCUpdateConstructClass:
497 break;
498 case Stmt::OpenACCAtomicConstructClass:
500 break;
501 case Stmt::OpenACCCacheConstructClass:
503 break;
504 }
505}
506
509 switch (S->getStmtClass()) {
510 default:
511 return false;
512 case Stmt::NullStmtClass:
513 break;
514 case Stmt::CompoundStmtClass:
516 break;
517 case Stmt::DeclStmtClass:
519 break;
520 case Stmt::LabelStmtClass:
522 break;
523 case Stmt::AttributedStmtClass:
525 break;
526 case Stmt::GotoStmtClass:
528 break;
529 case Stmt::BreakStmtClass:
531 break;
532 case Stmt::ContinueStmtClass:
534 break;
535 case Stmt::DefaultStmtClass:
537 break;
538 case Stmt::CaseStmtClass:
539 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
540 break;
541 case Stmt::DeferStmtClass:
543 break;
544 case Stmt::SEHLeaveStmtClass:
546 break;
547 case Stmt::SYCLKernelCallStmtClass:
549 break;
550 }
551 return true;
552}
553
554/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
555/// this captures the expression result of the last sub-statement and returns it
556/// (for use by the statement expression extension).
558 AggValueSlot AggSlot) {
559 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
560 "LLVM IR generation of compound statement ('{}')");
561
562 // Keep track of the current cleanup stack depth, including debug scopes.
564
565 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
566}
567
570 bool GetLast,
571 AggValueSlot AggSlot) {
572
574 E = S.body_end() - GetLast;
575 I != E; ++I)
576 EmitStmt(*I);
577
578 Address RetAlloca = Address::invalid();
579 if (GetLast) {
580 // We have to special case labels here. They are statements, but when put
581 // at the end of a statement expression, they yield the value of their
582 // subexpression. Handle this by walking through all labels we encounter,
583 // emitting them before we evaluate the subexpr.
584 // Similar issues arise for attributed statements.
585 const Stmt *LastStmt = S.body_back();
586 while (!isa<Expr>(LastStmt)) {
587 if (const auto *LS = dyn_cast<LabelStmt>(LastStmt)) {
588 EmitLabel(LS->getDecl());
589 LastStmt = LS->getSubStmt();
590 } else if (const auto *AS = dyn_cast<AttributedStmt>(LastStmt)) {
591 // FIXME: Update this if we ever have attributes that affect the
592 // semantics of an expression.
593 LastStmt = AS->getSubStmt();
594 } else {
595 llvm_unreachable("unknown value statement");
596 }
597 }
598
600
601 const Expr *E = cast<Expr>(LastStmt);
602 QualType ExprTy = E->getType();
603 if (hasAggregateEvaluationKind(ExprTy)) {
604 EmitAggExpr(E, AggSlot);
605 } else {
606 // We can't return an RValue here because there might be cleanups at
607 // the end of the StmtExpr. Because of that, we have to emit the result
608 // here into a temporary alloca.
609 RetAlloca = CreateMemTemp(ExprTy);
610 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
611 /*IsInit*/ false);
612 }
613 }
614
615 return RetAlloca;
616}
617
619 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
620
621 // If there is a cleanup stack, then we it isn't worth trying to
622 // simplify this block (we would need to remove it from the scope map
623 // and cleanup entry).
624 if (!EHStack.empty())
625 return;
626
627 // Can only simplify direct branches.
628 if (!BI || !BI->isUnconditional())
629 return;
630
631 // Can only simplify empty blocks.
632 if (BI->getIterator() != BB->begin())
633 return;
634
635 BB->replaceAllUsesWith(BI->getSuccessor(0));
636 BI->eraseFromParent();
637 BB->eraseFromParent();
638}
639
640void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
641 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
642
643 // Fall out of the current block (if necessary).
644 EmitBranch(BB);
645
646 if (IsFinished && BB->use_empty()) {
647 delete BB;
648 return;
649 }
650
651 // Place the block after the current block, if possible, or else at
652 // the end of the function.
653 if (CurBB && CurBB->getParent())
654 CurFn->insert(std::next(CurBB->getIterator()), BB);
655 else
656 CurFn->insert(CurFn->end(), BB);
657 Builder.SetInsertPoint(BB);
658}
659
660void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
661 // Emit a branch from the current block to the target one if this
662 // was a real block. If this was just a fall-through block after a
663 // terminator, don't emit it.
664 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
665
666 if (!CurBB || CurBB->getTerminator()) {
667 // If there is no insert point or the previous block is already
668 // terminated, don't touch it.
669 } else {
670 // Otherwise, create a fall-through branch.
671 Builder.CreateBr(Target);
672 }
673
674 Builder.ClearInsertionPoint();
675}
676
677void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
678 bool inserted = false;
679 for (llvm::User *u : block->users()) {
680 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
681 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
682 inserted = true;
683 break;
684 }
685 }
686
687 if (!inserted)
688 CurFn->insert(CurFn->end(), block);
689
690 Builder.SetInsertPoint(block);
691}
692
695 JumpDest &Dest = LabelMap[D];
696 if (Dest.isValid()) return Dest;
697
698 // Create, but don't insert, the new block.
699 Dest = JumpDest(createBasicBlock(D->getName()),
702 return Dest;
703}
704
706 // Add this label to the current lexical scope if we're within any
707 // normal cleanups. Jumps "in" to this label --- when permitted by
708 // the language --- may need to be routed around such cleanups.
709 if (EHStack.hasNormalCleanups() && CurLexicalScope)
710 CurLexicalScope->addLabel(D);
711
712 JumpDest &Dest = LabelMap[D];
713
714 // If we didn't need a forward reference to this label, just go
715 // ahead and create a destination at the current scope.
716 if (!Dest.isValid()) {
718
719 // Otherwise, we need to give this label a target depth and remove
720 // it from the branch-fixups list.
721 } else {
722 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
723 Dest.setScopeDepth(EHStack.stable_begin());
725 }
726
727 EmitBlock(Dest.getBlock());
728
729 // Emit debug info for labels.
730 if (CGDebugInfo *DI = getDebugInfo()) {
731 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
732 DI->setLocation(D->getLocation());
733 DI->EmitLabel(D, Builder);
734 }
735 }
736
738}
739
740/// Change the cleanup scope of the labels in this lexical scope to
741/// match the scope of the enclosing context.
743 assert(!Labels.empty());
744 EHScopeStack::stable_iterator innermostScope
745 = CGF.EHStack.getInnermostNormalCleanup();
746
747 // Change the scope depth of all the labels.
748 for (const LabelDecl *Label : Labels) {
749 assert(CGF.LabelMap.count(Label));
750 JumpDest &dest = CGF.LabelMap.find(Label)->second;
751 assert(dest.getScopeDepth().isValid());
752 assert(innermostScope.encloses(dest.getScopeDepth()));
753 dest.setScopeDepth(innermostScope);
754 }
755
756 // Reparent the labels if the new scope also has cleanups.
757 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
758 ParentScope->Labels.append(Labels.begin(), Labels.end());
759 }
760}
761
762
764 EmitLabel(S.getDecl());
765
766 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
767 if (getLangOpts().EHAsynch && S.isSideEntry())
769
770 EmitStmt(S.getSubStmt());
771}
772
774 bool nomerge = false;
775 bool noinline = false;
776 bool alwaysinline = false;
777 bool noconvergent = false;
778 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
779 HLSLControlFlowHintAttr::SpellingNotCalculated;
780 const CallExpr *musttail = nullptr;
781 const AtomicAttr *AA = nullptr;
782
783 for (const auto *A : S.getAttrs()) {
784 switch (A->getKind()) {
785 default:
786 break;
787 case attr::NoMerge:
788 nomerge = true;
789 break;
790 case attr::NoInline:
791 noinline = true;
792 break;
793 case attr::AlwaysInline:
794 alwaysinline = true;
795 break;
796 case attr::NoConvergent:
797 noconvergent = true;
798 break;
799 case attr::MustTail: {
800 const Stmt *Sub = S.getSubStmt();
801 const ReturnStmt *R = cast<ReturnStmt>(Sub);
802 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
803 } break;
804 case attr::CXXAssume: {
805 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
806 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
807 !Assumption->HasSideEffects(getContext())) {
808 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
809 Builder.CreateAssumption(AssumptionVal);
810 }
811 } break;
812 case attr::Atomic:
813 AA = cast<AtomicAttr>(A);
814 break;
815 case attr::HLSLControlFlowHint: {
816 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
817 } break;
818 }
819 }
820 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
821 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
822 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
823 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
824 SaveAndRestore save_musttail(MustTailCall, musttail);
825 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
826 CGAtomicOptionsRAII AORAII(CGM, AA);
827 EmitStmt(S.getSubStmt(), S.getAttrs());
828}
829
831 // If this code is reachable then emit a stop point (if generating
832 // debug info). We have to do this ourselves because we are on the
833 // "simple" statement path.
834 if (HaveInsertPoint())
835 EmitStopPoint(&S);
836
839}
840
841
844 if (const LabelDecl *Target = S.getConstantTarget()) {
846 return;
847 }
848
849 // Ensure that we have an i8* for our PHI node.
850 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
851 Int8PtrTy, "addr");
852 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
853
854 // Get the basic block for the indirect goto.
855 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
856
857 // The first instruction in the block has to be the PHI for the switch dest,
858 // add an entry for this branch.
859 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
860
861 EmitBranch(IndGotoBB);
862 if (CurBB && CurBB->getTerminator())
863 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr);
864}
865
867 const Stmt *Else = S.getElse();
868
869 // The else branch of a consteval if statement is always the only branch that
870 // can be runtime evaluated.
871 if (S.isConsteval()) {
872 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
873 if (Executed) {
874 RunCleanupsScope ExecutedScope(*this);
875 EmitStmt(Executed);
876 }
877 return;
878 }
879
880 // C99 6.8.4.1: The first substatement is executed if the expression compares
881 // unequal to 0. The condition must be a scalar type.
882 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
883 ApplyDebugLocation DL(*this, S.getCond());
884
885 if (S.getInit())
886 EmitStmt(S.getInit());
887
888 if (S.getConditionVariable())
890
891 // If the condition constant folds and can be elided, try to avoid emitting
892 // the condition and the dead arm of the if/else.
893 bool CondConstant;
894 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
895 S.isConstexpr())) {
896 // Figure out which block (then or else) is executed.
897 const Stmt *Executed = S.getThen();
898 const Stmt *Skipped = Else;
899 if (!CondConstant) // Condition false?
900 std::swap(Executed, Skipped);
901
902 // If the skipped block has no labels in it, just emit the executed block.
903 // This avoids emitting dead code and simplifies the CFG substantially.
904 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
906 /*UseBoth=*/true);
907 if (Executed) {
909 RunCleanupsScope ExecutedScope(*this);
910 EmitStmt(Executed);
911 }
912 PGO->markStmtMaybeUsed(Skipped);
913 return;
914 }
915 }
916
917 auto HasSkip = hasSkipCounter(&S);
918
919 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
920 // the conditional branch.
921 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
922 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
923 llvm::BasicBlock *ElseBlock =
924 (Else || HasSkip ? createBasicBlock("if.else") : ContBlock);
925 // Prefer the PGO based weights over the likelihood attribute.
926 // When the build isn't optimized the metadata isn't used, so don't generate
927 // it.
928 // Also, differentiate between disabled PGO and a never executed branch with
929 // PGO. Assuming PGO is in use:
930 // - we want to ignore the [[likely]] attribute if the branch is never
931 // executed,
932 // - assuming the profile is poor, preserving the attribute may still be
933 // beneficial.
934 // As an approximation, preserve the attribute only if both the branch and the
935 // parent context were not executed.
937 uint64_t ThenCount = getProfileCount(S.getThen());
938 if (!ThenCount && !getCurrentProfileCount() &&
939 CGM.getCodeGenOpts().OptimizationLevel)
940 LH = Stmt::getLikelihood(S.getThen(), Else);
941
942 // When measuring MC/DC, always fully evaluate the condition up front using
943 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
944 // executing the body of the if.then or if.else. This is useful for when
945 // there is a 'return' within the body, but this is particularly beneficial
946 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
947 // updates are kept linear and consistent.
948 if (!CGM.getCodeGenOpts().MCDCCoverage) {
949 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH,
950 /*ConditionalOp=*/nullptr,
951 /*ConditionalDecl=*/S.getConditionVariable());
952 } else {
953 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
955 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
956 }
957
958 // Emit the 'then' code.
959 EmitBlock(ThenBlock);
961 {
962 RunCleanupsScope ThenScope(*this);
963 EmitStmt(S.getThen());
964 }
965 EmitBranch(ContBlock);
966
967 // Emit the 'else' code if present.
968 if (Else) {
969 {
970 // There is no need to emit line number for an unconditional branch.
971 auto NL = ApplyDebugLocation::CreateEmpty(*this);
972 EmitBlock(ElseBlock);
973 }
974 // Add a counter to else block unless it has CounterExpr.
975 if (HasSkip)
977 {
978 RunCleanupsScope ElseScope(*this);
979 EmitStmt(Else);
980 }
981 {
982 // There is no need to emit line number for an unconditional branch.
983 auto NL = ApplyDebugLocation::CreateEmpty(*this);
984 EmitBranch(ContBlock);
985 }
986 } else if (HasSkip) {
987 EmitBlock(ElseBlock);
989 EmitBranch(ContBlock);
990 }
991
992 // Emit the continuation block for code after the if.
993 EmitBlock(ContBlock, true);
994}
995
996bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
997 bool HasEmptyBody) {
998 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1000 return false;
1001
1002 // Now apply rules for plain C (see 6.8.5.6 in C11).
1003 // Loops with constant conditions do not have to make progress in any C
1004 // version.
1005 // As an extension, we consisider loops whose constant expression
1006 // can be constant-folded.
1008 bool CondIsConstInt =
1009 !ControllingExpression ||
1010 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1011 Result.Val.isInt());
1012
1013 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1014 Result.Val.getInt().getBoolValue());
1015
1016 // Loops with non-constant conditions must make progress in C11 and later.
1017 if (getLangOpts().C11 && !CondIsConstInt)
1018 return true;
1019
1020 // [C++26][intro.progress] (DR)
1021 // The implementation may assume that any thread will eventually do one of the
1022 // following:
1023 // [...]
1024 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1025 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1028 if (HasEmptyBody && CondIsTrue) {
1029 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1030 return false;
1031 }
1032 return true;
1033 }
1034 return false;
1035}
1036
1037// [C++26][stmt.iter.general] (DR)
1038// A trivially empty iteration statement is an iteration statement matching one
1039// of the following forms:
1040// - while ( expression ) ;
1041// - while ( expression ) { }
1042// - do ; while ( expression ) ;
1043// - do { } while ( expression ) ;
1044// - for ( init-statement expression(opt); ) ;
1045// - for ( init-statement expression(opt); ) { }
1046template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1047 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1048 if (S.getInc())
1049 return false;
1050 }
1051 const Stmt *Body = S.getBody();
1052 if (!Body || isa<NullStmt>(Body))
1053 return true;
1054 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1055 return Compound->body_empty();
1056 return false;
1057}
1058
1060 ArrayRef<const Attr *> WhileAttrs) {
1061 // Emit the header for the loop, which will also become
1062 // the continue target.
1063 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1064 EmitBlock(LoopHeader.getBlock());
1065
1066 if (CGM.shouldEmitConvergenceTokens())
1067 ConvergenceTokenStack.push_back(
1068 emitConvergenceLoopToken(LoopHeader.getBlock()));
1069
1070 // Create an exit block for when the condition fails, which will
1071 // also become the break target.
1073
1074 // Store the blocks to use for break and continue.
1075 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopHeader));
1076
1077 // C++ [stmt.while]p2:
1078 // When the condition of a while statement is a declaration, the
1079 // scope of the variable that is declared extends from its point
1080 // of declaration (3.3.2) to the end of the while statement.
1081 // [...]
1082 // The object created in a condition is destroyed and created
1083 // with each iteration of the loop.
1084 RunCleanupsScope ConditionScope(*this);
1085
1086 if (S.getConditionVariable())
1088
1089 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1090 // evaluation of the controlling expression takes place before each
1091 // execution of the loop body.
1092 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1093
1095
1096 // while(1) is common, avoid extra exit blocks. Be sure
1097 // to correctly handle break/continue though.
1098 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1099 bool EmitBoolCondBranch = !C || !C->isOne();
1100 const SourceRange &R = S.getSourceRange();
1101 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1102 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1105
1106 // As long as the condition is true, go to the loop body.
1107 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1108 if (EmitBoolCondBranch) {
1109 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1110 if (hasSkipCounter(&S) || ConditionScope.requiresCleanups())
1111 ExitBlock = createBasicBlock("while.exit");
1112 llvm::MDNode *Weights =
1113 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1114 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1115 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1116 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1117 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1118 // Key Instructions: Emit the condition and branch as separate source
1119 // location atoms otherwise we may omit a step onto the loop condition in
1120 // favour of the `while` keyword.
1121 // FIXME: We could have the branch as the backup location for the condition,
1122 // which would probably be a better experience. Explore this later.
1123 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1124 addInstToNewSourceAtom(CondI, nullptr);
1125 addInstToNewSourceAtom(I, nullptr);
1126
1127 if (ExitBlock != LoopExit.getBlock()) {
1128 EmitBlock(ExitBlock);
1131 }
1132 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1133 CGM.getDiags().Report(A->getLocation(),
1134 diag::warn_attribute_has_no_effect_on_infinite_loop)
1135 << A << A->getRange();
1136 CGM.getDiags().Report(
1137 S.getWhileLoc(),
1138 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1140 }
1141
1142 // Emit the loop body. We have to emit this in a cleanup scope
1143 // because it might be a singleton DeclStmt.
1144 {
1145 RunCleanupsScope BodyScope(*this);
1146 EmitBlock(LoopBody);
1148 EmitStmt(S.getBody());
1149 }
1150
1151 BreakContinueStack.pop_back();
1152
1153 // Immediately force cleanup.
1154 ConditionScope.ForceCleanup();
1155
1156 EmitStopPoint(&S);
1157 // Branch to the loop header again.
1158 EmitBranch(LoopHeader.getBlock());
1159
1160 LoopStack.pop();
1161
1162 // Emit the exit block.
1163 EmitBlock(LoopExit.getBlock(), true);
1164
1165 // The LoopHeader typically is just a branch if we skipped emitting
1166 // a branch, try to erase it.
1167 if (!EmitBoolCondBranch) {
1168 SimplifyForwardingBlocks(LoopHeader.getBlock());
1169 PGO->markStmtAsUsed(true, &S);
1170 }
1171
1172 if (CGM.shouldEmitConvergenceTokens())
1173 ConvergenceTokenStack.pop_back();
1174}
1175
1177 ArrayRef<const Attr *> DoAttrs) {
1179 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1180
1181 uint64_t ParentCount = getCurrentProfileCount();
1182
1183 // Store the blocks to use for break and continue.
1184 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopCond));
1185
1186 // Emit the body of the loop.
1187 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1188
1189 EmitBlockWithFallThrough(LoopBody, &S);
1190
1191 if (CGM.shouldEmitConvergenceTokens())
1192 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1193
1194 {
1195 RunCleanupsScope BodyScope(*this);
1196 EmitStmt(S.getBody());
1197 }
1198
1199 EmitBlock(LoopCond.getBlock());
1200
1201 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1202 // after each execution of the loop body."
1203
1204 // Evaluate the conditional in the while header.
1205 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1206 // compares unequal to 0. The condition must be a scalar type.
1207 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1208
1209 BreakContinueStack.pop_back();
1210
1211 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1212 // to correctly handle break/continue though.
1213 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1214 bool EmitBoolCondBranch = !C || !C->isZero();
1215
1216 const SourceRange &R = S.getSourceRange();
1217 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1221
1222 auto *LoopFalse = (hasSkipCounter(&S) ? createBasicBlock("do.loopfalse")
1223 : LoopExit.getBlock());
1224
1225 // As long as the condition is true, iterate the loop.
1226 if (EmitBoolCondBranch) {
1227 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1228 auto *I = Builder.CreateCondBr(
1229 BoolCondVal, LoopBody, LoopFalse,
1230 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1231
1232 // Key Instructions: Emit the condition and branch as separate source
1233 // location atoms otherwise we may omit a step onto the loop condition in
1234 // favour of the closing brace.
1235 // FIXME: We could have the branch as the backup location for the condition,
1236 // which would probably be a better experience (no jumping to the brace).
1237 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1238 addInstToNewSourceAtom(CondI, nullptr);
1239 addInstToNewSourceAtom(I, nullptr);
1240 }
1241
1242 LoopStack.pop();
1243
1244 if (LoopFalse != LoopExit.getBlock()) {
1245 EmitBlock(LoopFalse);
1246 incrementProfileCounter(UseSkipPath, &S, /*UseBoth=*/true);
1247 }
1248
1249 // Emit the exit block.
1250 EmitBlock(LoopExit.getBlock());
1251
1252 // The DoCond block typically is just a branch if we skipped
1253 // emitting a branch, try to erase it.
1254 if (!EmitBoolCondBranch)
1256
1257 if (CGM.shouldEmitConvergenceTokens())
1258 ConvergenceTokenStack.pop_back();
1259}
1260
1262 ArrayRef<const Attr *> ForAttrs) {
1264
1265 std::optional<LexicalScope> ForScope;
1267 ForScope.emplace(*this, S.getSourceRange());
1268
1269 // Evaluate the first part before the loop.
1270 if (S.getInit())
1271 EmitStmt(S.getInit());
1272
1273 // Start the loop with a block that tests the condition.
1274 // If there's an increment, the continue scope will be overwritten
1275 // later.
1276 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1277 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1278 EmitBlock(CondBlock);
1279
1280 if (CGM.shouldEmitConvergenceTokens())
1281 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1282
1283 const SourceRange &R = S.getSourceRange();
1284 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1288
1289 // Create a cleanup scope for the condition variable cleanups.
1290 LexicalScope ConditionScope(*this, S.getSourceRange());
1291
1292 // If the for loop doesn't have an increment we can just use the condition as
1293 // the continue block. Otherwise, if there is no condition variable, we can
1294 // form the continue block now. If there is a condition variable, we can't
1295 // form the continue block until after we've emitted the condition, because
1296 // the condition is in scope in the increment, but Sema's jump diagnostics
1297 // ensure that there are no continues from the condition variable that jump
1298 // to the loop increment.
1299 JumpDest Continue;
1300 if (!S.getInc())
1301 Continue = CondDest;
1302 else if (!S.getConditionVariable())
1303 Continue = getJumpDestInCurrentScope("for.inc");
1304 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1305
1306 if (S.getCond()) {
1307 // If the for statement has a condition scope, emit the local variable
1308 // declaration.
1309 if (S.getConditionVariable()) {
1311
1312 // We have entered the condition variable's scope, so we're now able to
1313 // jump to the continue block.
1314 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1315 BreakContinueStack.back().ContinueBlock = Continue;
1316 }
1317
1318 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1319 // If there are any cleanups between here and the loop-exit scope,
1320 // create a block to stage a loop exit along.
1321 if (hasSkipCounter(&S) || (ForScope && ForScope->requiresCleanups()))
1322 ExitBlock = createBasicBlock("for.cond.cleanup");
1323
1324 // As long as the condition is true, iterate the loop.
1325 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1326
1327 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1328 // compares unequal to 0. The condition must be a scalar type.
1329 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1330
1332
1333 llvm::MDNode *Weights =
1334 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1335 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1336 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1337 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1338
1339 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1340 // Key Instructions: Emit the condition and branch as separate atoms to
1341 // match existing loop stepping behaviour. FIXME: We could have the branch
1342 // as the backup location for the condition, which would probably be a
1343 // better experience (no jumping to the brace).
1344 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1345 addInstToNewSourceAtom(CondI, nullptr);
1346 addInstToNewSourceAtom(I, nullptr);
1347
1348 if (ExitBlock != LoopExit.getBlock()) {
1349 EmitBlock(ExitBlock);
1352 }
1353
1354 EmitBlock(ForBody);
1355 } else {
1356 // Treat it as a non-zero constant. Don't even create a new block for the
1357 // body, just fall into it.
1358 PGO->markStmtAsUsed(true, &S);
1359 }
1360
1362
1363 {
1364 // Create a separate cleanup scope for the body, in case it is not
1365 // a compound statement.
1366 RunCleanupsScope BodyScope(*this);
1367 EmitStmt(S.getBody());
1368 }
1369
1370 // The last block in the loop's body (which unconditionally branches to the
1371 // `inc` block if there is one).
1372 auto *FinalBodyBB = Builder.GetInsertBlock();
1373
1374 // If there is an increment, emit it next.
1375 if (S.getInc()) {
1376 EmitBlock(Continue.getBlock());
1377 EmitStmt(S.getInc());
1378 }
1379
1380 BreakContinueStack.pop_back();
1381
1382 ConditionScope.ForceCleanup();
1383
1384 EmitStopPoint(&S);
1385 EmitBranch(CondBlock);
1386
1387 if (ForScope)
1388 ForScope->ForceCleanup();
1389
1390 LoopStack.pop();
1391
1392 // Emit the fall-through block.
1393 EmitBlock(LoopExit.getBlock(), true);
1394
1395 if (CGM.shouldEmitConvergenceTokens())
1396 ConvergenceTokenStack.pop_back();
1397
1398 if (FinalBodyBB) {
1399 // Key Instructions: We want the for closing brace to be step-able on to
1400 // match existing behaviour.
1401 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1402 }
1403}
1404
1405void
1407 ArrayRef<const Attr *> ForAttrs) {
1409
1410 LexicalScope ForScope(*this, S.getSourceRange());
1411
1412 // Evaluate the first pieces before the loop.
1413 if (S.getInit())
1414 EmitStmt(S.getInit());
1417 EmitStmt(S.getEndStmt());
1418
1419 // Start the loop with a block that tests the condition.
1420 // If there's an increment, the continue scope will be overwritten
1421 // later.
1422 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1423 EmitBlock(CondBlock);
1424
1425 if (CGM.shouldEmitConvergenceTokens())
1426 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1427
1428 const SourceRange &R = S.getSourceRange();
1429 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1432
1433 // If there are any cleanups between here and the loop-exit scope,
1434 // create a block to stage a loop exit along.
1435 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1436 if (hasSkipCounter(&S) || ForScope.requiresCleanups())
1437 ExitBlock = createBasicBlock("for.cond.cleanup");
1438
1439 // The loop body, consisting of the specified body and the loop variable.
1440 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1441
1442 // The body is executed if the expression, contextually converted
1443 // to bool, is true.
1444 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1445 llvm::MDNode *Weights =
1446 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1447 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1448 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1449 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1450 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1451 // Key Instructions: Emit the condition and branch as separate atoms to
1452 // match existing loop stepping behaviour. FIXME: We could have the branch as
1453 // the backup location for the condition, which would probably be a better
1454 // experience.
1455 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1456 addInstToNewSourceAtom(CondI, nullptr);
1457 addInstToNewSourceAtom(I, nullptr);
1458
1459 if (ExitBlock != LoopExit.getBlock()) {
1460 EmitBlock(ExitBlock);
1463 }
1464
1465 EmitBlock(ForBody);
1467
1468 // Create a block for the increment. In case of a 'continue', we jump there.
1469 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1470
1471 // Store the blocks to use for break and continue.
1472 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1473
1474 {
1475 // Create a separate cleanup scope for the loop variable and body.
1476 LexicalScope BodyScope(*this, S.getSourceRange());
1478 EmitStmt(S.getBody());
1479 }
1480 // The last block in the loop's body (which unconditionally branches to the
1481 // `inc` block if there is one).
1482 auto *FinalBodyBB = Builder.GetInsertBlock();
1483
1484 EmitStopPoint(&S);
1485 // If there is an increment, emit it next.
1486 EmitBlock(Continue.getBlock());
1487 EmitStmt(S.getInc());
1488
1489 BreakContinueStack.pop_back();
1490
1491 EmitBranch(CondBlock);
1492
1493 ForScope.ForceCleanup();
1494
1495 LoopStack.pop();
1496
1497 // Emit the fall-through block.
1498 EmitBlock(LoopExit.getBlock(), true);
1499
1500 if (CGM.shouldEmitConvergenceTokens())
1501 ConvergenceTokenStack.pop_back();
1502
1503 if (FinalBodyBB) {
1504 // We want the for closing brace to be step-able on to match existing
1505 // behaviour.
1506 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1507 }
1508}
1509
1510void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1511 if (RV.isScalar()) {
1512 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1513 } else if (RV.isAggregate()) {
1514 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1517 } else {
1519 /*init*/ true);
1520 }
1522}
1523
1524namespace {
1525// RAII struct used to save and restore a return statment's result expression.
1526struct SaveRetExprRAII {
1527 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1528 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1529 CGF.RetExpr = RetExpr;
1530 }
1531 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1532 const Expr *OldRetExpr;
1533 CodeGenFunction &CGF;
1534};
1535} // namespace
1536
1537/// Determine if the given call uses the swiftasync calling convention.
1538static bool isSwiftAsyncCallee(const CallExpr *CE) {
1539 auto calleeQualType = CE->getCallee()->getType();
1540 const FunctionType *calleeType = nullptr;
1541 if (calleeQualType->isFunctionPointerType() ||
1542 calleeQualType->isFunctionReferenceType() ||
1543 calleeQualType->isBlockPointerType() ||
1544 calleeQualType->isMemberFunctionPointerType()) {
1545 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1546 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1547 calleeType = ty;
1548 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1549 if (auto methodDecl = CMCE->getMethodDecl()) {
1550 // getMethodDecl() doesn't handle member pointers at the moment.
1551 calleeType = methodDecl->getType()->castAs<FunctionType>();
1552 } else {
1553 return false;
1554 }
1555 } else {
1556 return false;
1557 }
1558 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1559}
1560
1561/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1562/// if the function returns void, or may be missing one if the function returns
1563/// non-void. Fun stuff :).
1566 if (requiresReturnValueCheck()) {
1567 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1568 auto *SLocPtr =
1569 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1570 llvm::GlobalVariable::PrivateLinkage, SLoc);
1571 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1572 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1573 assert(ReturnLocation.isValid() && "No valid return location");
1574 Builder.CreateStore(SLocPtr, ReturnLocation);
1575 }
1576
1577 // Returning from an outlined SEH helper is UB, and we already warn on it.
1578 if (IsOutlinedSEHHelper) {
1579 Builder.CreateUnreachable();
1580 Builder.ClearInsertionPoint();
1581 }
1582
1583 // Emit the result value, even if unused, to evaluate the side effects.
1584 const Expr *RV = S.getRetValue();
1585
1586 // Record the result expression of the return statement. The recorded
1587 // expression is used to determine whether a block capture's lifetime should
1588 // end at the end of the full expression as opposed to the end of the scope
1589 // enclosing the block expression.
1590 //
1591 // This permits a small, easily-implemented exception to our over-conservative
1592 // rules about not jumping to statements following block literals with
1593 // non-trivial cleanups.
1594 SaveRetExprRAII SaveRetExpr(RV, *this);
1595
1596 RunCleanupsScope cleanupScope(*this);
1597 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1598 RV = EWC->getSubExpr();
1599
1600 // If we're in a swiftasynccall function, and the return expression is a
1601 // call to a swiftasynccall function, mark the call as the musttail call.
1602 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1603 if (RV && CurFnInfo &&
1604 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1605 if (auto CE = dyn_cast<CallExpr>(RV)) {
1606 if (isSwiftAsyncCallee(CE)) {
1607 SaveMustTail.emplace(MustTailCall, CE);
1608 }
1609 }
1610 }
1611
1612 // FIXME: Clean this up by using an LValue for ReturnTemp,
1613 // EmitStoreThroughLValue, and EmitAnyExpr.
1614 // Check if the NRVO candidate was not globalized in OpenMP mode.
1615 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1617 (!getLangOpts().OpenMP ||
1618 !CGM.getOpenMPRuntime()
1619 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1620 .isValid())) {
1621 // Apply the named return value optimization for this return statement,
1622 // which means doing nothing: the appropriate result has already been
1623 // constructed into the NRVO variable.
1624
1625 // If there is an NRVO flag for this variable, set it to 1 into indicate
1626 // that the cleanup code should not destroy the variable.
1627 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1628 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1629 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1630 // Make sure not to return anything, but evaluate the expression
1631 // for side effects.
1632 if (RV) {
1633 EmitAnyExpr(RV);
1634 }
1635 } else if (!RV) {
1636 // Do nothing (return value is left uninitialized)
1637 } else if (FnRetTy->isReferenceType()) {
1638 // If this function returns a reference, take the address of the expression
1639 // rather than the value.
1641 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1642 addInstToCurrentSourceAtom(I, I->getValueOperand());
1643 } else {
1644 switch (getEvaluationKind(RV->getType())) {
1645 case TEK_Scalar: {
1646 llvm::Value *Ret = EmitScalarExpr(RV);
1647 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1649 /*isInit*/ true);
1650 } else {
1651 auto *I = Builder.CreateStore(Ret, ReturnValue);
1652 addInstToCurrentSourceAtom(I, I->getValueOperand());
1653 }
1654 break;
1655 }
1656 case TEK_Complex:
1658 /*isInit*/ true);
1659 break;
1660 case TEK_Aggregate:
1667 break;
1668 }
1669 }
1670
1671 ++NumReturnExprs;
1672 if (!RV || RV->isEvaluatable(getContext()))
1673 ++NumSimpleReturnExprs;
1674
1675 cleanupScope.ForceCleanup();
1677}
1678
1680 // As long as debug info is modeled with instructions, we have to ensure we
1681 // have a place to insert here and write the stop point here.
1682 if (HaveInsertPoint())
1683 EmitStopPoint(&S);
1684
1685 for (const auto *I : S.decls())
1686 EmitDecl(*I, /*EvaluateConditionDecl=*/true);
1687}
1688
1690 -> const BreakContinue * {
1691 if (!S.hasLabelTarget())
1692 return &BreakContinueStack.back();
1693
1694 const Stmt *LoopOrSwitch = S.getNamedLoopOrSwitch();
1695 assert(LoopOrSwitch && "break/continue target not set?");
1696 for (const BreakContinue &BC : llvm::reverse(BreakContinueStack))
1697 if (BC.LoopOrSwitch == LoopOrSwitch)
1698 return &BC;
1699
1700 llvm_unreachable("break/continue target not found");
1701}
1702
1704 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1705
1706 // If this code is reachable then emit a stop point (if generating
1707 // debug info). We have to do this ourselves because we are on the
1708 // "simple" statement path.
1709 if (HaveInsertPoint())
1710 EmitStopPoint(&S);
1711
1714}
1715
1717 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1718
1719 // If this code is reachable then emit a stop point (if generating
1720 // debug info). We have to do this ourselves because we are on the
1721 // "simple" statement path.
1722 if (HaveInsertPoint())
1723 EmitStopPoint(&S);
1724
1727}
1728
1729/// EmitCaseStmtRange - If case statement range is not too big then
1730/// add multiple cases to switch instruction, one for each value within
1731/// the range. If range is too big then emit "if" condition check.
1733 ArrayRef<const Attr *> Attrs) {
1734 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1735
1736 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1737 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1738
1739 // Emit the code for this case. We do this first to make sure it is
1740 // properly chained from our predecessor before generating the
1741 // switch machinery to enter this block.
1742 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1743 EmitBlockWithFallThrough(CaseDest, &S);
1744 EmitStmt(S.getSubStmt());
1745
1746 // If range is empty, do nothing.
1747 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1748 return;
1749
1751 llvm::APInt Range = RHS - LHS;
1752 // FIXME: parameters such as this should not be hardcoded.
1753 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1754 // Range is small enough to add multiple switch instruction cases.
1755 uint64_t Total = getProfileCount(&S);
1756 unsigned NCases = Range.getZExtValue() + 1;
1757 // We only have one region counter for the entire set of cases here, so we
1758 // need to divide the weights evenly between the generated cases, ensuring
1759 // that the total weight is preserved. E.g., a weight of 5 over three cases
1760 // will be distributed as weights of 2, 2, and 1.
1761 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1762 for (unsigned I = 0; I != NCases; ++I) {
1763 if (SwitchWeights)
1764 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1765 else if (SwitchLikelihood)
1766 SwitchLikelihood->push_back(LH);
1767
1768 if (Rem)
1769 Rem--;
1770 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1771 ++LHS;
1772 }
1773 return;
1774 }
1775
1776 // The range is too big. Emit "if" condition into a new block,
1777 // making sure to save and restore the current insertion point.
1778 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1779
1780 // Push this test onto the chain of range checks (which terminates
1781 // in the default basic block). The switch's default will be changed
1782 // to the top of this chain after switch emission is complete.
1783 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1784 CaseRangeBlock = createBasicBlock("sw.caserange");
1785
1786 CurFn->insert(CurFn->end(), CaseRangeBlock);
1787 Builder.SetInsertPoint(CaseRangeBlock);
1788
1789 // Emit range check.
1790 llvm::Value *Diff =
1791 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1792 llvm::Value *Cond =
1793 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1794
1795 llvm::MDNode *Weights = nullptr;
1796 if (SwitchWeights) {
1797 uint64_t ThisCount = getProfileCount(&S);
1798 uint64_t DefaultCount = (*SwitchWeights)[0];
1799 Weights = createProfileWeights(ThisCount, DefaultCount);
1800
1801 // Since we're chaining the switch default through each large case range, we
1802 // need to update the weight for the default, ie, the first case, to include
1803 // this case.
1804 (*SwitchWeights)[0] += ThisCount;
1805 } else if (SwitchLikelihood)
1806 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1807
1808 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1809
1810 // Restore the appropriate insertion point.
1811 if (RestoreBB)
1812 Builder.SetInsertPoint(RestoreBB);
1813 else
1814 Builder.ClearInsertionPoint();
1815}
1816
1818 ArrayRef<const Attr *> Attrs) {
1819 // If there is no enclosing switch instance that we're aware of, then this
1820 // case statement and its block can be elided. This situation only happens
1821 // when we've constant-folded the switch, are emitting the constant case,
1822 // and part of the constant case includes another case statement. For
1823 // instance: switch (4) { case 4: do { case 5: } while (1); }
1824 if (!SwitchInsn) {
1825 EmitStmt(S.getSubStmt());
1826 return;
1827 }
1828
1829 // Handle case ranges.
1830 if (S.getRHS()) {
1831 EmitCaseStmtRange(S, Attrs);
1832 return;
1833 }
1834
1835 llvm::ConstantInt *CaseVal =
1837
1838 // Emit debuginfo for the case value if it is an enum value.
1839 const ConstantExpr *CE;
1840 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1841 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1842 else
1843 CE = dyn_cast<ConstantExpr>(S.getLHS());
1844 if (CE) {
1845 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1846 if (CGDebugInfo *Dbg = getDebugInfo())
1847 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1848 Dbg->EmitGlobalVariable(DE->getDecl(),
1849 APValue(llvm::APSInt(CaseVal->getValue())));
1850 }
1851
1852 if (SwitchLikelihood)
1853 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1854
1855 // If the body of the case is just a 'break', try to not emit an empty block.
1856 // If we're profiling or we're not optimizing, leave the block in for better
1857 // debug and coverage analysis.
1858 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1859 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1861 JumpDest Block = BreakContinueStack.back().BreakBlock;
1862
1863 // Only do this optimization if there are no cleanups that need emitting.
1865 if (SwitchWeights)
1866 SwitchWeights->push_back(getProfileCount(&S));
1867 SwitchInsn->addCase(CaseVal, Block.getBlock());
1868
1869 // If there was a fallthrough into this case, make sure to redirect it to
1870 // the end of the switch as well.
1871 if (Builder.GetInsertBlock()) {
1872 Builder.CreateBr(Block.getBlock());
1873 Builder.ClearInsertionPoint();
1874 }
1875 return;
1876 }
1877 }
1878
1879 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1880 EmitBlockWithFallThrough(CaseDest, &S);
1881 if (SwitchWeights)
1882 SwitchWeights->push_back(getProfileCount(&S));
1883 SwitchInsn->addCase(CaseVal, CaseDest);
1884
1885 // Recursively emitting the statement is acceptable, but is not wonderful for
1886 // code where we have many case statements nested together, i.e.:
1887 // case 1:
1888 // case 2:
1889 // case 3: etc.
1890 // Handling this recursively will create a new block for each case statement
1891 // that falls through to the next case which is IR intensive. It also causes
1892 // deep recursion which can run into stack depth limitations. Handle
1893 // sequential non-range case statements specially.
1894 //
1895 // TODO When the next case has a likelihood attribute the code returns to the
1896 // recursive algorithm. Maybe improve this case if it becomes common practice
1897 // to use a lot of attributes.
1898 const CaseStmt *CurCase = &S;
1899 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1900
1901 // Otherwise, iteratively add consecutive cases to this switch stmt.
1902 while (NextCase && NextCase->getRHS() == nullptr) {
1903 CurCase = NextCase;
1904 llvm::ConstantInt *CaseVal =
1905 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1906
1907 if (SwitchWeights)
1908 SwitchWeights->push_back(getProfileCount(NextCase));
1909 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1910 CaseDest = createBasicBlock("sw.bb");
1911 EmitBlockWithFallThrough(CaseDest, CurCase);
1912 }
1913 // Since this loop is only executed when the CaseStmt has no attributes
1914 // use a hard-coded value.
1915 if (SwitchLikelihood)
1916 SwitchLikelihood->push_back(Stmt::LH_None);
1917
1918 SwitchInsn->addCase(CaseVal, CaseDest);
1919 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1920 }
1921
1922 // Generate a stop point for debug info if the case statement is
1923 // followed by a default statement. A fallthrough case before a
1924 // default case gets its own branch target.
1925 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1926 EmitStopPoint(CurCase);
1927
1928 // Normal default recursion for non-cases.
1929 EmitStmt(CurCase->getSubStmt());
1930}
1931
1933 ArrayRef<const Attr *> Attrs) {
1934 // If there is no enclosing switch instance that we're aware of, then this
1935 // default statement can be elided. This situation only happens when we've
1936 // constant-folded the switch.
1937 if (!SwitchInsn) {
1938 EmitStmt(S.getSubStmt());
1939 return;
1940 }
1941
1942 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1943 assert(DefaultBlock->empty() &&
1944 "EmitDefaultStmt: Default block already defined?");
1945
1946 if (SwitchLikelihood)
1947 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1948
1949 EmitBlockWithFallThrough(DefaultBlock, &S);
1950
1951 EmitStmt(S.getSubStmt());
1952}
1953
1954namespace {
1955struct EmitDeferredStatement final : EHScopeStack::Cleanup {
1956 const DeferStmt &Stmt;
1957 EmitDeferredStatement(const DeferStmt *Stmt) : Stmt(*Stmt) {}
1958
1959 void Emit(CodeGenFunction &CGF, Flags) override {
1960 // Take care that any cleanups pushed by the body of a '_Defer' statement
1961 // don't clobber the current cleanup slot value.
1962 //
1963 // Assume we have a scope that pushes a cleanup; when that scope is exited,
1964 // we need to run that cleanup; this is accomplished by emitting the cleanup
1965 // into a separate block and then branching to that block at scope exit.
1966 //
1967 // Where this gets complicated is if we exit the scope in multiple different
1968 // ways; e.g. in a 'for' loop, we may exit the scope of its body by falling
1969 // off the end (in which case we need to run the cleanup and then branch to
1970 // the increment), or by 'break'ing out of the loop (in which case we need
1971 // to run the cleanup and then branch to the loop exit block); in both cases
1972 // we first branch to the cleanup block to run the cleanup, but the block we
1973 // need to jump to *after* running the cleanup is different.
1974 //
1975 // This is accomplished using a local integer variable called the 'cleanup
1976 // slot': before branching to the cleanup block, we store a value into that
1977 // slot. Then, in the cleanup block, after running the cleanup, we load the
1978 // value of that variable and 'switch' on it to branch to the appropriate
1979 // continuation block.
1980 //
1981 // The problem that arises once '_Defer' statements are involved is that the
1982 // body of a '_Defer' is an arbitrary statement which itself can create more
1983 // cleanups. This means we may end up overwriting the cleanup slot before we
1984 // ever have a chance to 'switch' on it, which means that once we *do* get
1985 // to the 'switch', we end up in whatever block the cleanup code happened to
1986 // pick as the default 'switch' exit label!
1987 //
1988 // That is, what is normally supposed to happen is something like:
1989 //
1990 // 1. Store 'X' to cleanup slot.
1991 // 2. Branch to cleanup block.
1992 // 3. Execute cleanup.
1993 // 4. Read value from cleanup slot.
1994 // 5. Branch to the block associated with 'X'.
1995 //
1996 // But if we encounter a _Defer' statement that contains a cleanup, then
1997 // what might instead happen is:
1998 //
1999 // 1. Store 'X' to cleanup slot.
2000 // 2. Branch to cleanup block.
2001 // 3. Execute cleanup; this ends up pushing another cleanup, so:
2002 // 3a. Store 'Y' to cleanup slot.
2003 // 3b. Run steps 2–5 recursively.
2004 // 4. Read value from cleanup slot, which is now 'Y' instead of 'X'.
2005 // 5. Branch to the block associated with 'Y'... which doesn't even
2006 // exist because the value 'Y' is only meaningful for the inner
2007 // cleanup. The result is we just branch 'somewhere random'.
2008 //
2009 // The rest of the cleanup code simply isn't prepared to handle this case
2010 // because most other cleanups can't push more cleanups, and thus, emitting
2011 // other cleanups generally cannot clobber the cleanup slot.
2012 //
2013 // To prevent this from happening, save the current cleanup slot value and
2014 // restore it after emitting the '_Defer' statement.
2015 llvm::Value *SavedCleanupDest = nullptr;
2016 if (CGF.NormalCleanupDest.isValid())
2017 SavedCleanupDest =
2018 CGF.Builder.CreateLoad(CGF.NormalCleanupDest, "cleanup.dest.saved");
2019
2020 CGF.EmitStmt(Stmt.getBody());
2021
2022 if (SavedCleanupDest && CGF.HaveInsertPoint())
2023 CGF.Builder.CreateStore(SavedCleanupDest, CGF.NormalCleanupDest);
2024
2025 // Cleanups must end with an insert point.
2026 CGF.EnsureInsertPoint();
2027 }
2028};
2029} // namespace
2030
2032 EHStack.pushCleanup<EmitDeferredStatement>(NormalAndEHCleanup, &S);
2033}
2034
2035/// CollectStatementsForCase - Given the body of a 'switch' statement and a
2036/// constant value that is being switched on, see if we can dead code eliminate
2037/// the body of the switch to a simple series of statements to emit. Basically,
2038/// on a switch (5) we want to find these statements:
2039/// case 5:
2040/// printf(...); <--
2041/// ++i; <--
2042/// break;
2043///
2044/// and add them to the ResultStmts vector. If it is unsafe to do this
2045/// transformation (for example, one of the elided statements contains a label
2046/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
2047/// should include statements after it (e.g. the printf() line is a substmt of
2048/// the case) then return CSFC_FallThrough. If we handled it and found a break
2049/// statement, then return CSFC_Success.
2050///
2051/// If Case is non-null, then we are looking for the specified case, checking
2052/// that nothing we jump over contains labels. If Case is null, then we found
2053/// the case and are looking for the break.
2054///
2055/// If the recursive walk actually finds our Case, then we set FoundCase to
2056/// true.
2057///
2060 const SwitchCase *Case,
2061 bool &FoundCase,
2062 SmallVectorImpl<const Stmt*> &ResultStmts) {
2063 // If this is a null statement, just succeed.
2064 if (!S)
2065 return Case ? CSFC_Success : CSFC_FallThrough;
2066
2067 // If this is the switchcase (case 4: or default) that we're looking for, then
2068 // we're in business. Just add the substatement.
2069 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
2070 if (S == Case) {
2071 FoundCase = true;
2072 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
2073 ResultStmts);
2074 }
2075
2076 // Otherwise, this is some other case or default statement, just ignore it.
2077 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
2078 ResultStmts);
2079 }
2080
2081 // If we are in the live part of the code and we found our break statement,
2082 // return a success!
2083 if (!Case && isa<BreakStmt>(S))
2084 return CSFC_Success;
2085
2086 // If this is a switch statement, then it might contain the SwitchCase, the
2087 // break, or neither.
2088 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
2089 // Handle this as two cases: we might be looking for the SwitchCase (if so
2090 // the skipped statements must be skippable) or we might already have it.
2091 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2092 bool StartedInLiveCode = FoundCase;
2093 unsigned StartSize = ResultStmts.size();
2094
2095 // If we've not found the case yet, scan through looking for it.
2096 if (Case) {
2097 // Keep track of whether we see a skipped declaration. The code could be
2098 // using the declaration even if it is skipped, so we can't optimize out
2099 // the decl if the kept statements might refer to it.
2100 bool HadSkippedDecl = false;
2101
2102 // If we're looking for the case, just see if we can skip each of the
2103 // substatements.
2104 for (; Case && I != E; ++I) {
2105 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
2106
2107 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
2108 case CSFC_Failure: return CSFC_Failure;
2109 case CSFC_Success:
2110 // A successful result means that either 1) that the statement doesn't
2111 // have the case and is skippable, or 2) does contain the case value
2112 // and also contains the break to exit the switch. In the later case,
2113 // we just verify the rest of the statements are elidable.
2114 if (FoundCase) {
2115 // If we found the case and skipped declarations, we can't do the
2116 // optimization.
2117 if (HadSkippedDecl)
2118 return CSFC_Failure;
2119
2120 for (++I; I != E; ++I)
2121 if (CodeGenFunction::ContainsLabel(*I, true))
2122 return CSFC_Failure;
2123 return CSFC_Success;
2124 }
2125 break;
2126 case CSFC_FallThrough:
2127 // If we have a fallthrough condition, then we must have found the
2128 // case started to include statements. Consider the rest of the
2129 // statements in the compound statement as candidates for inclusion.
2130 assert(FoundCase && "Didn't find case but returned fallthrough?");
2131 // We recursively found Case, so we're not looking for it anymore.
2132 Case = nullptr;
2133
2134 // If we found the case and skipped declarations, we can't do the
2135 // optimization.
2136 if (HadSkippedDecl)
2137 return CSFC_Failure;
2138 break;
2139 }
2140 }
2141
2142 if (!FoundCase)
2143 return CSFC_Success;
2144
2145 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2146 }
2147
2148 // If we have statements in our range, then we know that the statements are
2149 // live and need to be added to the set of statements we're tracking.
2150 bool AnyDecls = false;
2151 for (; I != E; ++I) {
2153
2154 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2155 case CSFC_Failure: return CSFC_Failure;
2156 case CSFC_FallThrough:
2157 // A fallthrough result means that the statement was simple and just
2158 // included in ResultStmt, keep adding them afterwards.
2159 break;
2160 case CSFC_Success:
2161 // A successful result means that we found the break statement and
2162 // stopped statement inclusion. We just ensure that any leftover stmts
2163 // are skippable and return success ourselves.
2164 for (++I; I != E; ++I)
2165 if (CodeGenFunction::ContainsLabel(*I, true))
2166 return CSFC_Failure;
2167 return CSFC_Success;
2168 }
2169 }
2170
2171 // If we're about to fall out of a scope without hitting a 'break;', we
2172 // can't perform the optimization if there were any decls in that scope
2173 // (we'd lose their end-of-lifetime).
2174 if (AnyDecls) {
2175 // If the entire compound statement was live, there's one more thing we
2176 // can try before giving up: emit the whole thing as a single statement.
2177 // We can do that unless the statement contains a 'break;'.
2178 // FIXME: Such a break must be at the end of a construct within this one.
2179 // We could emit this by just ignoring the BreakStmts entirely.
2180 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2181 ResultStmts.resize(StartSize);
2182 ResultStmts.push_back(S);
2183 } else {
2184 return CSFC_Failure;
2185 }
2186 }
2187
2188 return CSFC_FallThrough;
2189 }
2190
2191 // Okay, this is some other statement that we don't handle explicitly, like a
2192 // for statement or increment etc. If we are skipping over this statement,
2193 // just verify it doesn't have labels, which would make it invalid to elide.
2194 if (Case) {
2195 if (CodeGenFunction::ContainsLabel(S, true))
2196 return CSFC_Failure;
2197 return CSFC_Success;
2198 }
2199
2200 // Otherwise, we want to include this statement. Everything is cool with that
2201 // so long as it doesn't contain a break out of the switch we're in.
2203
2204 // Otherwise, everything is great. Include the statement and tell the caller
2205 // that we fall through and include the next statement as well.
2206 ResultStmts.push_back(S);
2207 return CSFC_FallThrough;
2208}
2209
2210/// FindCaseStatementsForValue - Find the case statement being jumped to and
2211/// then invoke CollectStatementsForCase to find the list of statements to emit
2212/// for a switch on constant. See the comment above CollectStatementsForCase
2213/// for more details.
2215 const llvm::APSInt &ConstantCondValue,
2216 SmallVectorImpl<const Stmt*> &ResultStmts,
2217 ASTContext &C,
2218 const SwitchCase *&ResultCase) {
2219 // First step, find the switch case that is being branched to. We can do this
2220 // efficiently by scanning the SwitchCase list.
2221 const SwitchCase *Case = S.getSwitchCaseList();
2222 const DefaultStmt *DefaultCase = nullptr;
2223
2224 for (; Case; Case = Case->getNextSwitchCase()) {
2225 // It's either a default or case. Just remember the default statement in
2226 // case we're not jumping to any numbered cases.
2227 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2228 DefaultCase = DS;
2229 continue;
2230 }
2231
2232 // Check to see if this case is the one we're looking for.
2233 const CaseStmt *CS = cast<CaseStmt>(Case);
2234 // Don't handle case ranges yet.
2235 if (CS->getRHS()) return false;
2236
2237 // If we found our case, remember it as 'case'.
2238 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2239 break;
2240 }
2241
2242 // If we didn't find a matching case, we use a default if it exists, or we
2243 // elide the whole switch body!
2244 if (!Case) {
2245 // It is safe to elide the body of the switch if it doesn't contain labels
2246 // etc. If it is safe, return successfully with an empty ResultStmts list.
2247 if (!DefaultCase)
2249 Case = DefaultCase;
2250 }
2251
2252 // Ok, we know which case is being jumped to, try to collect all the
2253 // statements that follow it. This can fail for a variety of reasons. Also,
2254 // check to see that the recursive walk actually found our case statement.
2255 // Insane cases like this can fail to find it in the recursive walk since we
2256 // don't handle every stmt kind:
2257 // switch (4) {
2258 // while (1) {
2259 // case 4: ...
2260 bool FoundCase = false;
2261 ResultCase = Case;
2262 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2263 ResultStmts) != CSFC_Failure &&
2264 FoundCase;
2265}
2266
2267static std::optional<SmallVector<uint64_t, 16>>
2269 // Are there enough branches to weight them?
2270 if (Likelihoods.size() <= 1)
2271 return std::nullopt;
2272
2273 uint64_t NumUnlikely = 0;
2274 uint64_t NumNone = 0;
2275 uint64_t NumLikely = 0;
2276 for (const auto LH : Likelihoods) {
2277 switch (LH) {
2278 case Stmt::LH_Unlikely:
2279 ++NumUnlikely;
2280 break;
2281 case Stmt::LH_None:
2282 ++NumNone;
2283 break;
2284 case Stmt::LH_Likely:
2285 ++NumLikely;
2286 break;
2287 }
2288 }
2289
2290 // Is there a likelihood attribute used?
2291 if (NumUnlikely == 0 && NumLikely == 0)
2292 return std::nullopt;
2293
2294 // When multiple cases share the same code they can be combined during
2295 // optimization. In that case the weights of the branch will be the sum of
2296 // the individual weights. Make sure the combined sum of all neutral cases
2297 // doesn't exceed the value of a single likely attribute.
2298 // The additions both avoid divisions by 0 and make sure the weights of None
2299 // don't exceed the weight of Likely.
2300 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2301 const uint64_t None = Likely / (NumNone + 1);
2302 const uint64_t Unlikely = 0;
2303
2305 Result.reserve(Likelihoods.size());
2306 for (const auto LH : Likelihoods) {
2307 switch (LH) {
2308 case Stmt::LH_Unlikely:
2309 Result.push_back(Unlikely);
2310 break;
2311 case Stmt::LH_None:
2312 Result.push_back(None);
2313 break;
2314 case Stmt::LH_Likely:
2315 Result.push_back(Likely);
2316 break;
2317 }
2318 }
2319
2320 return Result;
2321}
2322
2324 // Handle nested switch statements.
2325 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2326 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2327 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2328 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2329
2330 // See if we can constant fold the condition of the switch and therefore only
2331 // emit the live case statement (if any) of the switch.
2332 llvm::APSInt ConstantCondValue;
2333 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2335 const SwitchCase *Case = nullptr;
2336 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2337 getContext(), Case)) {
2338 if (Case)
2340 RunCleanupsScope ExecutedScope(*this);
2341
2342 if (S.getInit())
2343 EmitStmt(S.getInit());
2344
2345 // Emit the condition variable if needed inside the entire cleanup scope
2346 // used by this special case for constant folded switches.
2347 if (S.getConditionVariable())
2348 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2349
2350 // At this point, we are no longer "within" a switch instance, so
2351 // we can temporarily enforce this to ensure that any embedded case
2352 // statements are not emitted.
2353 SwitchInsn = nullptr;
2354
2355 // Okay, we can dead code eliminate everything except this case. Emit the
2356 // specified series of statements and we're good.
2357 for (const Stmt *CaseStmt : CaseStmts)
2360 PGO->markStmtMaybeUsed(S.getBody());
2361
2362 // Now we want to restore the saved switch instance so that nested
2363 // switches continue to function properly
2364 SwitchInsn = SavedSwitchInsn;
2365
2366 return;
2367 }
2368 }
2369
2370 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2371
2372 RunCleanupsScope ConditionScope(*this);
2373
2374 if (S.getInit())
2375 EmitStmt(S.getInit());
2376
2377 if (S.getConditionVariable())
2379 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2381
2382 // Create basic block to hold stuff that comes after switch
2383 // statement. We also need to create a default block now so that
2384 // explicit case ranges tests can have a place to jump to on
2385 // failure.
2386 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2387 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2388 addInstToNewSourceAtom(SwitchInsn, CondV);
2389
2390 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2391 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2392 llvm::ConstantInt *BranchHintConstant =
2394 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2395 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2396 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2397 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"),
2398 MDHelper.createConstant(BranchHintConstant)};
2399 SwitchInsn->setMetadata("hlsl.controlflow.hint",
2400 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2401 }
2402
2403 if (PGO->haveRegionCounts()) {
2404 // Walk the SwitchCase list to find how many there are.
2405 uint64_t DefaultCount = 0;
2406 unsigned NumCases = 0;
2407 for (const SwitchCase *Case = S.getSwitchCaseList();
2408 Case;
2409 Case = Case->getNextSwitchCase()) {
2410 if (isa<DefaultStmt>(Case))
2411 DefaultCount = getProfileCount(Case);
2412 NumCases += 1;
2413 }
2414 SwitchWeights = new SmallVector<uint64_t, 16>();
2415 SwitchWeights->reserve(NumCases);
2416 // The default needs to be first. We store the edge count, so we already
2417 // know the right weight.
2418 SwitchWeights->push_back(DefaultCount);
2419 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2420 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2421 // Initialize the default case.
2422 SwitchLikelihood->push_back(Stmt::LH_None);
2423 }
2424
2425 CaseRangeBlock = DefaultBlock;
2426
2427 // Clear the insertion point to indicate we are in unreachable code.
2428 Builder.ClearInsertionPoint();
2429
2430 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2431 // then reuse last ContinueBlock.
2432 JumpDest OuterContinue;
2433 if (!BreakContinueStack.empty())
2434 OuterContinue = BreakContinueStack.back().ContinueBlock;
2435
2436 BreakContinueStack.push_back(BreakContinue(S, SwitchExit, OuterContinue));
2437
2438 // Emit switch body.
2439 EmitStmt(S.getBody());
2440
2441 BreakContinueStack.pop_back();
2442
2443 // Update the default block in case explicit case range tests have
2444 // been chained on top.
2445 SwitchInsn->setDefaultDest(CaseRangeBlock);
2446
2447 // If a default was never emitted:
2448 if (!DefaultBlock->getParent()) {
2449 // If we have cleanups, emit the default block so that there's a
2450 // place to jump through the cleanups from.
2451 if (ConditionScope.requiresCleanups()) {
2452 EmitBlock(DefaultBlock);
2453
2454 // Otherwise, just forward the default block to the switch end.
2455 } else {
2456 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2457 delete DefaultBlock;
2458 }
2459 }
2460
2461 ConditionScope.ForceCleanup();
2462
2463 // Close the last case (or DefaultBlock).
2464 EmitBranch(SwitchExit.getBlock());
2465
2466 // Insert a False Counter if SwitchStmt doesn't have DefaultStmt.
2467 if (hasSkipCounter(S.getCond())) {
2468 auto *ImplicitDefaultBlock = createBasicBlock("sw.false");
2469 EmitBlock(ImplicitDefaultBlock);
2471 Builder.CreateBr(SwitchInsn->getDefaultDest());
2472 SwitchInsn->setDefaultDest(ImplicitDefaultBlock);
2473 }
2474
2475 // Emit continuation.
2476 EmitBlock(SwitchExit.getBlock(), true);
2478
2479 // If the switch has a condition wrapped by __builtin_unpredictable,
2480 // create metadata that specifies that the switch is unpredictable.
2481 // Don't bother if not optimizing because that metadata would not be used.
2482 auto *Call = dyn_cast<CallExpr>(S.getCond());
2483 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2484 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2485 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2486 llvm::MDBuilder MDHelper(getLLVMContext());
2487 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2488 MDHelper.createUnpredictable());
2489 }
2490 }
2491
2492 if (SwitchWeights) {
2493 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2494 "switch weights do not match switch cases");
2495 // If there's only one jump destination there's no sense weighting it.
2496 if (SwitchWeights->size() > 1)
2497 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2498 createProfileWeights(*SwitchWeights));
2499 delete SwitchWeights;
2500 } else if (SwitchLikelihood) {
2501 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2502 "switch likelihoods do not match switch cases");
2503 std::optional<SmallVector<uint64_t, 16>> LHW =
2504 getLikelihoodWeights(*SwitchLikelihood);
2505 if (LHW) {
2506 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2507 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2508 createProfileWeights(*LHW));
2509 }
2510 delete SwitchLikelihood;
2511 }
2512 SwitchInsn = SavedSwitchInsn;
2513 SwitchWeights = SavedSwitchWeights;
2514 SwitchLikelihood = SavedSwitchLikelihood;
2515 CaseRangeBlock = SavedCRBlock;
2516}
2517
2518std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2519 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2520 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2521 if (Info.allowsRegister() || !Info.allowsMemory()) {
2523 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2524
2525 llvm::Type *Ty = ConvertType(InputType);
2526 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2527 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2528 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2529 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2530
2531 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2532 nullptr};
2533 }
2534 }
2535
2536 Address Addr = InputValue.getAddress();
2537 ConstraintStr += '*';
2538 return {InputValue.getPointer(*this), Addr.getElementType()};
2539}
2540std::pair<llvm::Value *, llvm::Type *>
2541CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2542 const Expr *InputExpr,
2543 std::string &ConstraintStr) {
2544 // If this can't be a register or memory, i.e., has to be a constant
2545 // (immediate or symbolic), try to emit it as such.
2546 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2547 if (Info.requiresImmediateConstant()) {
2548 Expr::EvalResult EVResult;
2549 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2550
2551 llvm::APSInt IntResult;
2552 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2553 getContext()))
2554 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2555 }
2556
2557 Expr::EvalResult Result;
2558 if (InputExpr->EvaluateAsInt(Result, getContext()))
2559 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2560 nullptr};
2561 }
2562
2563 if (Info.allowsRegister() || !Info.allowsMemory())
2565 return {EmitScalarExpr(InputExpr), nullptr};
2566 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2567 return {EmitScalarExpr(InputExpr), nullptr};
2568 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2569 LValue Dest = EmitLValue(InputExpr);
2570 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2571 InputExpr->getExprLoc());
2572}
2573
2574/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2575/// asm call instruction. The !srcloc MDNode contains a list of constant
2576/// integers which are the source locations of the start of each line in the
2577/// asm.
2578static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2579 CodeGenFunction &CGF) {
2581 // Add the location of the first line to the MDNode.
2582 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2583 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2584 StringRef StrVal = Str->getString();
2585 if (!StrVal.empty()) {
2587 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2588 unsigned StartToken = 0;
2589 unsigned ByteOffset = 0;
2590
2591 // Add the location of the start of each subsequent line of the asm to the
2592 // MDNode.
2593 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2594 if (StrVal[i] != '\n') continue;
2595 SourceLocation LineLoc = Str->getLocationOfByte(
2596 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2597 Locs.push_back(llvm::ConstantAsMetadata::get(
2598 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2599 }
2600 }
2601
2602 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2603}
2604
2605static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2606 bool HasUnwindClobber, bool ReadOnly,
2607 bool ReadNone, bool NoMerge, bool NoConvergent,
2608 const AsmStmt &S,
2609 const std::vector<llvm::Type *> &ResultRegTypes,
2610 const std::vector<llvm::Type *> &ArgElemTypes,
2611 CodeGenFunction &CGF,
2612 std::vector<llvm::Value *> &RegResults) {
2613 if (!HasUnwindClobber)
2614 Result.addFnAttr(llvm::Attribute::NoUnwind);
2615
2616 if (NoMerge)
2617 Result.addFnAttr(llvm::Attribute::NoMerge);
2618 // Attach readnone and readonly attributes.
2619 if (!HasSideEffect) {
2620 if (ReadNone)
2621 Result.setDoesNotAccessMemory();
2622 else if (ReadOnly)
2623 Result.setOnlyReadsMemory();
2624 }
2625
2626 // Add elementtype attribute for indirect constraints.
2627 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2628 if (Pair.value()) {
2629 auto Attr = llvm::Attribute::get(
2630 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2631 Result.addParamAttr(Pair.index(), Attr);
2632 }
2633 }
2634
2635 // Slap the source location of the inline asm into a !srcloc metadata on the
2636 // call.
2637 const StringLiteral *SL;
2638 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S);
2639 gccAsmStmt &&
2640 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) {
2641 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF));
2642 } else {
2643 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2644 // strings.
2645 llvm::Constant *Loc =
2646 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2647 Result.setMetadata("srcloc",
2648 llvm::MDNode::get(CGF.getLLVMContext(),
2649 llvm::ConstantAsMetadata::get(Loc)));
2650 }
2651
2652 // Make inline-asm calls Key for the debug info feature Key Instructions.
2653 CGF.addInstToNewSourceAtom(&Result, nullptr);
2654
2655 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2656 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2657 // convergent (meaning, they may call an intrinsically convergent op, such
2658 // as bar.sync, and so can't have certain optimizations applied around
2659 // them) unless it's explicitly marked 'noconvergent'.
2660 Result.addFnAttr(llvm::Attribute::Convergent);
2661 // Extract all of the register value results from the asm.
2662 if (ResultRegTypes.size() == 1) {
2663 RegResults.push_back(&Result);
2664 } else {
2665 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2666 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2667 RegResults.push_back(Tmp);
2668 }
2669 }
2670}
2671
2672static void
2674 const llvm::ArrayRef<llvm::Value *> RegResults,
2675 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2676 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2677 const llvm::ArrayRef<LValue> ResultRegDests,
2678 const llvm::ArrayRef<QualType> ResultRegQualTys,
2679 const llvm::BitVector &ResultTypeRequiresCast,
2680 const std::vector<std::optional<std::pair<unsigned, unsigned>>>
2681 &ResultBounds) {
2682 CGBuilderTy &Builder = CGF.Builder;
2683 CodeGenModule &CGM = CGF.CGM;
2684 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2685
2686 assert(RegResults.size() == ResultRegTypes.size());
2687 assert(RegResults.size() == ResultTruncRegTypes.size());
2688 assert(RegResults.size() == ResultRegDests.size());
2689 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2690 // in which case its size may grow.
2691 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2692 assert(ResultBounds.size() <= ResultRegDests.size());
2693
2694 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2695 llvm::Value *Tmp = RegResults[i];
2696 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2697
2698 if ((i < ResultBounds.size()) && ResultBounds[i].has_value()) {
2699 const auto [LowerBound, UpperBound] = ResultBounds[i].value();
2700 // FIXME: Support for nonzero lower bounds not yet implemented.
2701 assert(LowerBound == 0 && "Output operand lower bound is not zero.");
2702 llvm::Constant *UpperBoundConst =
2703 llvm::ConstantInt::get(Tmp->getType(), UpperBound);
2704 llvm::Value *IsBooleanValue =
2705 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, UpperBoundConst);
2706 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2707 Builder.CreateCall(FnAssume, IsBooleanValue);
2708 }
2709
2710 // If the result type of the LLVM IR asm doesn't match the result type of
2711 // the expression, do the conversion.
2712 if (ResultRegTypes[i] != TruncTy) {
2713
2714 // Truncate the integer result to the right size, note that TruncTy can be
2715 // a pointer.
2716 if (TruncTy->isFloatingPointTy())
2717 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2718 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2719 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2720 Tmp = Builder.CreateTrunc(
2721 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2722 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2723 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2724 uint64_t TmpSize =
2725 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2726 Tmp = Builder.CreatePtrToInt(
2727 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2728 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2729 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2730 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2731 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2732 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2733 }
2734 }
2735
2736 ApplyAtomGroup Grp(CGF.getDebugInfo());
2737 LValue Dest = ResultRegDests[i];
2738 // ResultTypeRequiresCast elements correspond to the first
2739 // ResultTypeRequiresCast.size() elements of RegResults.
2740 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2741 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2742 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2743 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2744 llvm::StoreInst *S = Builder.CreateStore(Tmp, A);
2745 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand());
2746 continue;
2747 }
2748
2749 QualType Ty =
2750 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2751 if (Ty.isNull()) {
2752 const Expr *OutExpr = S.getOutputExpr(i);
2753 CGM.getDiags().Report(OutExpr->getExprLoc(),
2754 diag::err_store_value_to_reg);
2755 return;
2756 }
2757 Dest = CGF.MakeAddrLValue(A, Ty);
2758 }
2759 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2760 }
2761}
2762
2764 const AsmStmt &S) {
2765 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2766
2767 std::string Asm;
2768 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2769 Asm = GCCAsm->getAsmString();
2770
2771 auto &Ctx = CGF->CGM.getLLVMContext();
2772
2773 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2774 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2775 {StrTy->getType()}, false);
2776 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2777
2778 CGF->Builder.CreateCall(UBF, {StrTy});
2779}
2780
2782 // Pop all cleanup blocks at the end of the asm statement.
2783 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2784
2785 // Assemble the final asm string.
2786 std::string AsmString = S.generateAsmString(getContext());
2787
2788 // Get all the output and input constraints together.
2789 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2790 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2791
2792 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2793 bool IsValidTargetAsm = true;
2794 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2795 StringRef Name;
2796 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2797 Name = GAS->getOutputName(i);
2799 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2800 if (IsHipStdPar && !IsValid)
2801 IsValidTargetAsm = false;
2802 else
2803 assert(IsValid && "Failed to parse output constraint");
2804 OutputConstraintInfos.push_back(Info);
2805 }
2806
2807 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2808 StringRef Name;
2809 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2810 Name = GAS->getInputName(i);
2812 bool IsValid =
2813 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2814 if (IsHipStdPar && !IsValid)
2815 IsValidTargetAsm = false;
2816 else
2817 assert(IsValid && "Failed to parse input constraint");
2818 InputConstraintInfos.push_back(Info);
2819 }
2820
2821 if (!IsValidTargetAsm)
2822 return EmitHipStdParUnsupportedAsm(this, S);
2823
2824 std::string Constraints;
2825
2826 std::vector<LValue> ResultRegDests;
2827 std::vector<QualType> ResultRegQualTys;
2828 std::vector<llvm::Type *> ResultRegTypes;
2829 std::vector<llvm::Type *> ResultTruncRegTypes;
2830 std::vector<llvm::Type *> ArgTypes;
2831 std::vector<llvm::Type *> ArgElemTypes;
2832 std::vector<llvm::Value*> Args;
2833 llvm::BitVector ResultTypeRequiresCast;
2834 std::vector<std::optional<std::pair<unsigned, unsigned>>> ResultBounds;
2835
2836 // Keep track of inout constraints.
2837 std::string InOutConstraints;
2838 std::vector<llvm::Value*> InOutArgs;
2839 std::vector<llvm::Type*> InOutArgTypes;
2840 std::vector<llvm::Type*> InOutArgElemTypes;
2841
2842 // Keep track of out constraints for tied input operand.
2843 std::vector<std::string> OutputConstraints;
2844
2845 // Keep track of defined physregs.
2846 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2847
2848 // An inline asm can be marked readonly if it meets the following conditions:
2849 // - it doesn't have any sideeffects
2850 // - it doesn't clobber memory
2851 // - it doesn't return a value by-reference
2852 // It can be marked readnone if it doesn't have any input memory constraints
2853 // in addition to meeting the conditions listed above.
2854 bool ReadOnly = true, ReadNone = true;
2855
2856 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2857 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2858
2859 // Simplify the output constraint.
2860 std::string OutputConstraint(S.getOutputConstraint(i));
2861 OutputConstraint = getTarget().simplifyConstraint(
2862 StringRef(OutputConstraint).substr(1), &OutputConstraintInfos);
2863
2864 const Expr *OutExpr = S.getOutputExpr(i);
2865 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2866
2867 std::string GCCReg;
2868 OutputConstraint = S.addVariableConstraints(
2869 OutputConstraint, *OutExpr, getTarget(), Info.earlyClobber(),
2870 [&](const Stmt *UnspStmt, StringRef Msg) {
2871 CGM.ErrorUnsupported(UnspStmt, Msg);
2872 },
2873 &GCCReg);
2874 // Give an error on multiple outputs to same physreg.
2875 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2876 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2877
2878 OutputConstraints.push_back(OutputConstraint);
2879 LValue Dest = EmitLValue(OutExpr);
2880 if (!Constraints.empty())
2881 Constraints += ',';
2882
2883 // If this is a register output, then make the inline asm return it
2884 // by-value. If this is a memory result, return the value by-reference.
2885 QualType QTy = OutExpr->getType();
2886 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2888 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2889
2890 Constraints += "=" + OutputConstraint;
2891 ResultRegQualTys.push_back(QTy);
2892 ResultRegDests.push_back(Dest);
2893
2894 ResultBounds.emplace_back(Info.getOutputOperandBounds());
2895
2896 llvm::Type *Ty = ConvertTypeForMem(QTy);
2897 const bool RequiresCast = Info.allowsRegister() &&
2899 Ty->isAggregateType());
2900
2901 ResultTruncRegTypes.push_back(Ty);
2902 ResultTypeRequiresCast.push_back(RequiresCast);
2903
2904 if (RequiresCast) {
2905 unsigned Size = getContext().getTypeSize(QTy);
2906 if (Size)
2907 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2908 else
2909 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2910 }
2911 ResultRegTypes.push_back(Ty);
2912 // If this output is tied to an input, and if the input is larger, then
2913 // we need to set the actual result type of the inline asm node to be the
2914 // same as the input type.
2915 if (Info.hasMatchingInput()) {
2916 unsigned InputNo;
2917 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2918 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2919 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2920 break;
2921 }
2922 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2923
2924 QualType InputTy = S.getInputExpr(InputNo)->getType();
2925 QualType OutputType = OutExpr->getType();
2926
2927 uint64_t InputSize = getContext().getTypeSize(InputTy);
2928 if (getContext().getTypeSize(OutputType) < InputSize) {
2929 // Form the asm to return the value as a larger integer or fp type.
2930 ResultRegTypes.back() = ConvertType(InputTy);
2931 }
2932 }
2933 if (llvm::Type* AdjTy =
2934 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2935 ResultRegTypes.back()))
2936 ResultRegTypes.back() = AdjTy;
2937 else {
2938 CGM.getDiags().Report(S.getAsmLoc(),
2939 diag::err_asm_invalid_type_in_input)
2940 << OutExpr->getType() << OutputConstraint;
2941 }
2942
2943 // Update largest vector width for any vector types.
2944 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2945 LargestVectorWidth =
2946 std::max((uint64_t)LargestVectorWidth,
2947 VT->getPrimitiveSizeInBits().getKnownMinValue());
2948 } else {
2949 Address DestAddr = Dest.getAddress();
2950 // Matrix types in memory are represented by arrays, but accessed through
2951 // vector pointers, with the alignment specified on the access operation.
2952 // For inline assembly, update pointer arguments to use vector pointers.
2953 // Otherwise there will be a mis-match if the matrix is also an
2954 // input-argument which is represented as vector.
2955 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2956 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2957
2958 ArgTypes.push_back(DestAddr.getType());
2959 ArgElemTypes.push_back(DestAddr.getElementType());
2960 Args.push_back(DestAddr.emitRawPointer(*this));
2961 Constraints += "=*";
2962 Constraints += OutputConstraint;
2963 ReadOnly = ReadNone = false;
2964 }
2965
2966 if (Info.isReadWrite()) {
2967 InOutConstraints += ',';
2968
2969 const Expr *InputExpr = S.getOutputExpr(i);
2970 llvm::Value *Arg;
2971 llvm::Type *ArgElemType;
2972 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2973 Info, Dest, InputExpr->getType(), InOutConstraints,
2974 InputExpr->getExprLoc());
2975
2976 if (llvm::Type* AdjTy =
2977 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2978 Arg->getType()))
2979 Arg = Builder.CreateBitCast(Arg, AdjTy);
2980
2981 // Update largest vector width for any vector types.
2982 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2983 LargestVectorWidth =
2984 std::max((uint64_t)LargestVectorWidth,
2985 VT->getPrimitiveSizeInBits().getKnownMinValue());
2986 // Only tie earlyclobber physregs.
2987 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2988 InOutConstraints += llvm::utostr(i);
2989 else
2990 InOutConstraints += OutputConstraint;
2991
2992 InOutArgTypes.push_back(Arg->getType());
2993 InOutArgElemTypes.push_back(ArgElemType);
2994 InOutArgs.push_back(Arg);
2995 }
2996 }
2997
2998 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2999 // to the return value slot. Only do this when returning in registers.
3000 if (isa<MSAsmStmt>(&S)) {
3001 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
3002 if (RetAI.isDirect() || RetAI.isExtend()) {
3003 // Make a fake lvalue for the return value slot.
3005 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
3006 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3007 ResultRegDests, AsmString, S.getNumOutputs());
3008 SawAsmBlock = true;
3009 }
3010 }
3011
3012 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3013 const Expr *InputExpr = S.getInputExpr(i);
3014
3015 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3016
3017 if (Info.allowsMemory())
3018 ReadNone = false;
3019
3020 if (!Constraints.empty())
3021 Constraints += ',';
3022
3023 // Simplify the input constraint.
3024 std::string InputConstraint(S.getInputConstraint(i));
3025 InputConstraint =
3026 getTarget().simplifyConstraint(InputConstraint, &OutputConstraintInfos);
3027
3028 InputConstraint = S.addVariableConstraints(
3029 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
3030 getTarget(), false /* No EarlyClobber */,
3031 [&](const Stmt *UnspStmt, std::string_view Msg) {
3032 CGM.ErrorUnsupported(UnspStmt, Msg);
3033 });
3034
3035 std::string ReplaceConstraint (InputConstraint);
3036 llvm::Value *Arg;
3037 llvm::Type *ArgElemType;
3038 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
3039
3040 // If this input argument is tied to a larger output result, extend the
3041 // input to be the same size as the output. The LLVM backend wants to see
3042 // the input and output of a matching constraint be the same size. Note
3043 // that GCC does not define what the top bits are here. We use zext because
3044 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3045 if (Info.hasTiedOperand()) {
3046 unsigned Output = Info.getTiedOperand();
3047 QualType OutputType = S.getOutputExpr(Output)->getType();
3048 QualType InputTy = InputExpr->getType();
3049
3050 if (getContext().getTypeSize(OutputType) >
3051 getContext().getTypeSize(InputTy)) {
3052 // Use ptrtoint as appropriate so that we can do our extension.
3053 if (isa<llvm::PointerType>(Arg->getType()))
3054 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
3055 llvm::Type *OutputTy = ConvertType(OutputType);
3056 if (isa<llvm::IntegerType>(OutputTy))
3057 Arg = Builder.CreateZExt(Arg, OutputTy);
3058 else if (isa<llvm::PointerType>(OutputTy))
3059 Arg = Builder.CreateZExt(Arg, IntPtrTy);
3060 else if (OutputTy->isFloatingPointTy())
3061 Arg = Builder.CreateFPExt(Arg, OutputTy);
3062 }
3063 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3064 ReplaceConstraint = OutputConstraints[Output];
3065 }
3066 if (llvm::Type* AdjTy =
3067 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
3068 Arg->getType()))
3069 Arg = Builder.CreateBitCast(Arg, AdjTy);
3070 else
3071 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
3072 << InputExpr->getType() << InputConstraint;
3073
3074 // Update largest vector width for any vector types.
3075 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3076 LargestVectorWidth =
3077 std::max((uint64_t)LargestVectorWidth,
3078 VT->getPrimitiveSizeInBits().getKnownMinValue());
3079
3080 ArgTypes.push_back(Arg->getType());
3081 ArgElemTypes.push_back(ArgElemType);
3082 Args.push_back(Arg);
3083 Constraints += InputConstraint;
3084 }
3085
3086 // Append the "input" part of inout constraints.
3087 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3088 ArgTypes.push_back(InOutArgTypes[i]);
3089 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3090 Args.push_back(InOutArgs[i]);
3091 }
3092 Constraints += InOutConstraints;
3093
3094 // Labels
3096 llvm::BasicBlock *Fallthrough = nullptr;
3097 bool IsGCCAsmGoto = false;
3098 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3099 IsGCCAsmGoto = GS->isAsmGoto();
3100 if (IsGCCAsmGoto) {
3101 for (const auto *E : GS->labels()) {
3102 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3103 Transfer.push_back(Dest.getBlock());
3104 if (!Constraints.empty())
3105 Constraints += ',';
3106 Constraints += "!i";
3107 }
3108 Fallthrough = createBasicBlock("asm.fallthrough");
3109 }
3110 }
3111
3112 bool HasUnwindClobber = false;
3113
3114 // Clobbers
3115 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3116 std::string Clobber = S.getClobber(i);
3117
3118 if (Clobber == "memory")
3119 ReadOnly = ReadNone = false;
3120 else if (Clobber == "unwind") {
3121 HasUnwindClobber = true;
3122 continue;
3123 } else if (Clobber != "cc") {
3124 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3125 if (CGM.getCodeGenOpts().StackClashProtector &&
3126 getTarget().isSPRegName(Clobber)) {
3127 CGM.getDiags().Report(S.getAsmLoc(),
3128 diag::warn_stack_clash_protection_inline_asm);
3129 }
3130 }
3131
3132 if (isa<MSAsmStmt>(&S)) {
3133 if (Clobber == "eax" || Clobber == "edx") {
3134 if (Constraints.find("=&A") != std::string::npos)
3135 continue;
3136 std::string::size_type position1 =
3137 Constraints.find("={" + Clobber + "}");
3138 if (position1 != std::string::npos) {
3139 Constraints.insert(position1 + 1, "&");
3140 continue;
3141 }
3142 std::string::size_type position2 = Constraints.find("=A");
3143 if (position2 != std::string::npos) {
3144 Constraints.insert(position2 + 1, "&");
3145 continue;
3146 }
3147 }
3148 }
3149 if (!Constraints.empty())
3150 Constraints += ',';
3151
3152 Constraints += "~{";
3153 Constraints += Clobber;
3154 Constraints += '}';
3155 }
3156
3157 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3158 "unwind clobber can't be used with asm goto");
3159
3160 // Add machine specific clobbers
3161 std::string_view MachineClobbers = getTarget().getClobbers();
3162 if (!MachineClobbers.empty()) {
3163 if (!Constraints.empty())
3164 Constraints += ',';
3165 Constraints += MachineClobbers;
3166 }
3167
3168 llvm::Type *ResultType;
3169 if (ResultRegTypes.empty())
3170 ResultType = VoidTy;
3171 else if (ResultRegTypes.size() == 1)
3172 ResultType = ResultRegTypes[0];
3173 else
3174 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3175
3176 llvm::FunctionType *FTy =
3177 llvm::FunctionType::get(ResultType, ArgTypes, false);
3178
3179 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3180
3181 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3182 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3183 ? llvm::InlineAsm::AD_ATT
3184 : llvm::InlineAsm::AD_Intel;
3185 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3186 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3187
3188 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3189 FTy, AsmString, Constraints, HasSideEffect,
3190 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3191 std::vector<llvm::Value*> RegResults;
3192 llvm::CallBrInst *CBR;
3193 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3194 CBRRegResults;
3195 if (IsGCCAsmGoto) {
3196 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3197 EmitBlock(Fallthrough);
3198 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3199 ReadNone, InNoMergeAttributedStmt,
3200 InNoConvergentAttributedStmt, S, ResultRegTypes,
3201 ArgElemTypes, *this, RegResults);
3202 // Because we are emitting code top to bottom, we don't have enough
3203 // information at this point to know precisely whether we have a critical
3204 // edge. If we have outputs, split all indirect destinations.
3205 if (!RegResults.empty()) {
3206 unsigned i = 0;
3207 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3208 llvm::Twine SynthName = Dest->getName() + ".split";
3209 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3210 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3211 Builder.SetInsertPoint(SynthBB);
3212
3213 if (ResultRegTypes.size() == 1) {
3214 CBRRegResults[SynthBB].push_back(CBR);
3215 } else {
3216 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3217 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3218 CBRRegResults[SynthBB].push_back(Tmp);
3219 }
3220 }
3221
3222 EmitBranch(Dest);
3223 EmitBlock(SynthBB);
3224 CBR->setIndirectDest(i++, SynthBB);
3225 }
3226 }
3227 } else if (HasUnwindClobber) {
3228 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3229 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3230 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3231 InNoConvergentAttributedStmt, S, ResultRegTypes,
3232 ArgElemTypes, *this, RegResults);
3233 } else {
3234 llvm::CallInst *Result =
3235 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3236 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3237 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3238 InNoConvergentAttributedStmt, S, ResultRegTypes,
3239 ArgElemTypes, *this, RegResults);
3240 }
3241
3242 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3243 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3244 ResultBounds);
3245
3246 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3247 // different insertion point; one for each indirect destination and with
3248 // CBRRegResults rather than RegResults.
3249 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3250 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3251 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3252 Builder.SetInsertPoint(Succ, --(Succ->end()));
3253 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3254 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3255 ResultTypeRequiresCast, ResultBounds);
3256 }
3257 }
3258}
3259
3261 const RecordDecl *RD = S.getCapturedRecordDecl();
3263
3264 // Initialize the captured struct.
3265 LValue SlotLV =
3266 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3267
3268 RecordDecl::field_iterator CurField = RD->field_begin();
3270 E = S.capture_init_end();
3271 I != E; ++I, ++CurField) {
3272 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3273 if (CurField->hasCapturedVLAType()) {
3274 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3275 } else {
3276 EmitInitializerForField(*CurField, LV, *I);
3277 }
3278 }
3279
3280 return SlotLV;
3281}
3282
3283/// Generate an outlined function for the body of a CapturedStmt, store any
3284/// captured variables into the captured struct, and call the outlined function.
3285llvm::Function *
3287 LValue CapStruct = InitCapturedStruct(S);
3288
3289 // Emit the CapturedDecl
3290 CodeGenFunction CGF(CGM, true);
3291 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3292 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3293 delete CGF.CapturedStmtInfo;
3294
3295 // Emit call to the helper function.
3296 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3297
3298 return F;
3299}
3300
3302 LValue CapStruct = InitCapturedStruct(S);
3303 return CapStruct.getAddress();
3304}
3305
3306/// Creates the outlined function for a CapturedStmt.
3307llvm::Function *
3309 assert(CapturedStmtInfo &&
3310 "CapturedStmtInfo should be set when generating the captured function");
3311 const CapturedDecl *CD = S.getCapturedDecl();
3312 const RecordDecl *RD = S.getCapturedRecordDecl();
3313 SourceLocation Loc = S.getBeginLoc();
3314 assert(CD->hasBody() && "missing CapturedDecl body");
3315
3316 // Build the argument list.
3317 ASTContext &Ctx = CGM.getContext();
3318 FunctionArgList Args;
3319 Args.append(CD->param_begin(), CD->param_end());
3320
3321 // Create the function declaration.
3322 const CGFunctionInfo &FuncInfo =
3323 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
3324 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3325
3326 llvm::Function *F =
3327 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3328 CapturedStmtInfo->getHelperName(), &CGM.getModule());
3329 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3330 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
3331 F->addFnAttr("sample-profile-suffix-elision-policy", "selected");
3332 if (CD->isNothrow())
3333 F->addFnAttr(llvm::Attribute::NoUnwind);
3334
3335 // Generate the function.
3336 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3337 CD->getBody()->getBeginLoc());
3338 // Set the context parameter in CapturedStmtInfo.
3339 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3340 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
3341
3342 // Initialize variable-length arrays.
3344 CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
3345 for (auto *FD : RD->fields()) {
3346 if (FD->hasCapturedVLAType()) {
3347 auto *ExprArg =
3349 .getScalarVal();
3350 auto VAT = FD->getCapturedVLAType();
3351 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3352 }
3353 }
3354
3355 // If 'this' is captured, load it into CXXThisValue.
3356 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3357 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3358 LValue ThisLValue = EmitLValueForField(Base, FD);
3359 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3360 }
3361
3362 PGO->assignRegionCounters(GlobalDecl(CD), F);
3363 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3365
3366 return F;
3367}
3368
3369// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3370// std::nullptr otherwise.
3371static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3372 for (auto &I : *BB) {
3373 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3374 return CI;
3375 }
3376 return nullptr;
3377}
3378
3379llvm::CallBase *
3380CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3381 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3382 assert(ParentToken);
3383
3384 llvm::Value *bundleArgs[] = {ParentToken};
3385 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3386 auto *Output = llvm::CallBase::addOperandBundle(
3387 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3388 Input->replaceAllUsesWith(Output);
3389 Input->eraseFromParent();
3390 return Output;
3391}
3392
3393llvm::ConvergenceControlInst *
3394CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3395 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3396 assert(ParentToken);
3397 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken);
3398}
3399
3400llvm::ConvergenceControlInst *
3401CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3402 llvm::BasicBlock *BB = &F->getEntryBlock();
3403 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3404 if (Token)
3405 return Token;
3406
3407 // Adding a convergence token requires the function to be marked as
3408 // convergent.
3409 F->setConvergent();
3410 return llvm::ConvergenceControlInst::CreateEntry(*BB);
3411}
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition CGStmt.cpp:2214
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition CGStmt.cpp:3371
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition CGStmt.cpp:2763
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition CGStmt.cpp:2268
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition CGStmt.cpp:2578
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition CGStmt.cpp:1538
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition CGStmt.cpp:2059
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const std::vector< std::optional< std::pair< unsigned, unsigned > > > &ResultBounds)
Definition CGStmt.cpp:2673
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition CGStmt.cpp:1046
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition CGStmt.cpp:2058
@ CSFC_Failure
Definition CGStmt.cpp:2058
@ CSFC_Success
Definition CGStmt.cpp:2058
@ CSFC_FallThrough
Definition CGStmt.cpp:2058
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition CGStmt.cpp:2605
#define SM(sm)
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
This file defines SYCL AST classes used to represent calls to SYCL kernels.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition APValue.cpp:995
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
SourceManager & getSourceManager()
Definition ASTContext.h:858
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CanQualType VoidTy
CanQualType getCanonicalTagType(const TagDecl *TD) const
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3269
std::string getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition Stmt.cpp:515
bool isVolatile() const
Definition Stmt.h:3305
std::string getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition Stmt.cpp:499
SourceLocation getAsmLoc() const
Definition Stmt.h:3299
const Expr * getInputExpr(unsigned i) const
Definition Stmt.cpp:523
std::string addVariableConstraints(StringRef Constraint, const Expr &AsmExpr, const TargetInfo &Target, bool EarlyClobber, UnsupportedConstraintCallbackTy UnsupportedCB, std::string *GCCReg=nullptr) const
Look at AsmExpr and if it is a variable declared as using a particular register add that as a constra...
Definition Stmt.cpp:459
unsigned getNumClobbers() const
Definition Stmt.h:3360
const Expr * getOutputExpr(unsigned i) const
Definition Stmt.cpp:507
unsigned getNumOutputs() const
Definition Stmt.h:3328
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition Stmt.cpp:491
unsigned getNumInputs() const
Definition Stmt.h:3350
std::string getClobber(unsigned i) const
Definition Stmt.cpp:531
Attr - This represents one attribute.
Definition Attr.h:46
Represents an attribute applied to a statement.
Definition Stmt.h:2195
Stmt * getSubStmt()
Definition Stmt.h:2231
ArrayRef< const Attr * > getAttrs() const
Definition Stmt.h:2227
BreakStmt - This represents a break.
Definition Stmt.h:3127
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
DeclStmt * getBeginStmt()
Definition StmtCXX.h:163
DeclStmt * getLoopVarStmt()
Definition StmtCXX.h:169
DeclStmt * getEndStmt()
Definition StmtCXX.h:166
DeclStmt * getRangeStmt()
Definition StmtCXX.h:162
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getCallee()
Definition Expr.h:3093
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition Decl.h:4946
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition Decl.h:5004
bool isNothrow() const
Definition Decl.cpp:5702
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition Decl.h:5021
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition Decl.h:5019
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition Decl.cpp:5699
This captures a statement into a function.
Definition Stmt.h:3929
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition Stmt.cpp:1493
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition Stmt.h:4050
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition Stmt.h:4106
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:4124
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition Stmt.h:4116
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition Stmt.h:4093
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition Stmt.cpp:1508
CaseStmt - Represent a case statement.
Definition Stmt.h:1912
Stmt * getSubStmt()
Definition Stmt.h:2025
Expr * getLHS()
Definition Stmt.h:1995
Expr * getRHS()
Definition Stmt.h:2007
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:551
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:146
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
API for captured statement code generation.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition CGStmt.cpp:742
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1406
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition CGStmt.cpp:3260
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition CGCall.cpp:5196
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:694
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition CGObjC.cpp:2129
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3973
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition CGStmt.cpp:507
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:692
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition CGStmt.cpp:677
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition CGStmt.cpp:618
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
void EmitOMPScopeDirective(const OMPScopeDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasSkipCounter(const Stmt *S) const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5684
const TargetInfo & getTarget() const
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:569
void EmitGotoStmt(const GotoStmt &S)
Definition CGStmt.cpp:830
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:251
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2472
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1261
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition CGObjC.cpp:3688
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1059
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition CGStmt.cpp:996
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5858
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition CGObjC.cpp:2125
const TargetCodeGenInfo & getTargetHooks() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition CGCall.cpp:5124
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition CGStmt.cpp:48
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition CGStmt.cpp:866
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitDeferStmt(const DeferStmt &S)
Definition CGStmt.cpp:2031
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2713
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition CGObjC.cpp:2133
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:557
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPFuseDirective(const OMPFuseDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition CGClass.cpp:678
void EmitAsmStmt(const AsmStmt &S)
Definition CGStmt.cpp:2781
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1932
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition CGStmt.cpp:2323
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:302
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:273
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:58
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitSYCLKernelCallStmt(const SYCLKernelCallStmt &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition CGStmt.cpp:3286
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1817
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition CGStmt.cpp:1703
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1176
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition CGStmt.cpp:3301
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:660
void EmitOMPSimdDirective(const OMPSimdDirective &S)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
const BreakContinue * GetDestForLoopControlStmt(const LoopControlStmt &S)
Definition CGStmt.cpp:1689
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPStripeDirective(const OMPStripeDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition CGStmt.cpp:1732
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition CGStmt.cpp:1564
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition CGStmt.cpp:3308
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition CGStmt.cpp:1679
void EmitLabelStmt(const LabelStmt &S)
Definition CGStmt.cpp:763
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition CGDecl.cpp:52
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
void EmitAttributedStmt(const AttributedStmt &S)
Definition CGStmt.cpp:773
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition CGObjC.cpp:1804
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition CGStmt.cpp:842
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2089
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition CGStmt.cpp:705
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:640
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition CGStmt.cpp:1716
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
A saved depth on the scope stack.
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition TargetInfo.h:204
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
Stmt *const * const_body_iterator
Definition Stmt.h:1804
body_iterator body_end()
Definition Stmt.h:1797
SourceLocation getLBracLoc() const
Definition Stmt.h:1849
body_iterator body_begin()
Definition Stmt.h:1796
Stmt * body_back()
Definition Stmt.h:1800
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1085
ContinueStmt - This represents a continue.
Definition Stmt.h:3111
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1623
decl_range decls()
Definition Stmt.h:1671
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition DeclBase.h:1093
SourceLocation getLocation() const
Definition DeclBase.h:439
Stmt * getSubStmt()
Definition Stmt.h:2073
DeferStmt - This represents a deferred statement.
Definition Stmt.h:3228
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2824
Stmt * getBody()
Definition Stmt.h:2849
Expr * getCond()
Definition Stmt.h:2842
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3670
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2880
Stmt * getInit()
Definition Stmt.h:2895
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition Stmt.cpp:1120
Stmt * getBody()
Definition Stmt.h:2924
Expr * getInc()
Definition Stmt.h:2923
Expr * getCond()
Definition Stmt.h:2922
const Expr * getSubExpr() const
Definition Expr.h:1065
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4511
CallingConv getCallConv() const
Definition TypeBase.h:4866
This represents a GCC inline-assembly statement extension.
Definition Stmt.h:3438
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2961
LabelDecl * getLabel() const
Definition Stmt.h:2974
IfStmt - This represents an if/then/else.
Definition Stmt.h:2251
Stmt * getThen()
Definition Stmt.h:2340
Stmt * getInit()
Definition Stmt.h:2401
Expr * getCond()
Definition Stmt.h:2328
bool isConstexpr() const
Definition Stmt.h:2444
bool isNegatedConsteval() const
Definition Stmt.h:2440
Stmt * getElse()
Definition Stmt.h:2349
bool isConsteval() const
Definition Stmt.h:2431
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition Stmt.cpp:1068
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3000
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition Stmt.cpp:1269
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt * getStmt() const
Definition Decl.h:548
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2138
LabelDecl * getDecl() const
Definition Stmt.h:2156
bool isSideEntry() const
Definition Stmt.h:2185
Stmt * getSubStmt()
Definition Stmt.h:2160
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
bool assumeFunctionsAreConvergent() const
Base class for BreakStmt and ContinueStmt.
Definition Stmt.h:3049
Represents a point when we exit a loop.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
QualType getCanonicalType() const
Definition TypeBase.h:8440
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Represents a struct/union/class.
Definition Decl.h:4327
field_range fields() const
Definition Decl.h:4530
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4527
field_iterator field_begin() const
Definition Decl.cpp:5276
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3152
SourceLocation getBeginLoc() const
Definition Stmt.h:3204
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization.
Definition Stmt.h:3188
Expr * getRetValue()
Definition Stmt.h:3179
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:86
@ NoStmtClass
Definition Stmt.h:89
StmtClass getStmtClass() const
Definition Stmt.h:1485
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1428
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1429
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1430
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1432
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition Stmt.cpp:176
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition Stmt.cpp:168
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:1976
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition Expr.cpp:1326
StringRef getString() const
Definition Expr.h:1870
const SwitchCase * getNextSwitchCase() const
Definition Stmt.h:1885
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2501
Expr * getCond()
Definition Stmt.h:2564
Stmt * getBody()
Definition Stmt.h:2576
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition Stmt.cpp:1186
Stmt * getInit()
Definition Stmt.h:2581
SwitchCase * getSwitchCaseList()
Definition Stmt.h:2632
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
std::string simplifyConstraint(StringRef Constraint, SmallVectorImpl< ConstraintInfo > *OutCons=nullptr) const
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
bool validateOutputConstraint(ConstraintInfo &Info) const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition TypeBase.h:8991
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9285
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO).
Definition Decl.h:1512
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2689
Expr * getCond()
Definition Stmt.h:2741
SourceLocation getWhileLoc() const
Definition Stmt.h:2794
SourceLocation getRParenLoc() const
Definition Stmt.h:2799
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition Stmt.cpp:1247
Stmt * getBody()
Definition Stmt.h:2753
Defines the clang::TargetInfo interface.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ CPlusPlus11
CapturedRegionKind
The different kinds of captured statement.
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ CC_SwiftAsync
Definition Specifiers.h:294
U cast(CodeGen::Address addr)
Definition Address.h:327
@ None
The alignment was not explicit in code.
Definition ASTContext.h:179
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
std::optional< std::pair< unsigned, unsigned > > getOutputOperandBounds() const
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.