clang 23.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
22#include "clang/AST/StmtSYCL.h"
29#include "llvm/ADT/ArrayRef.h"
30#include "llvm/ADT/DenseMap.h"
31#include "llvm/ADT/SmallSet.h"
32#include "llvm/ADT/StringExtras.h"
33#include "llvm/IR/Assumptions.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/InlineAsm.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/MDBuilder.h"
38#include "llvm/Support/SaveAndRestore.h"
39#include <optional>
40
41using namespace clang;
42using namespace CodeGen;
43
44//===----------------------------------------------------------------------===//
45// Statement Emission
46//===----------------------------------------------------------------------===//
47
49 if (CGDebugInfo *DI = getDebugInfo()) {
51 Loc = S->getBeginLoc();
52 DI->EmitLocation(Builder, Loc);
53
54 LastStopPoint = Loc;
55 }
56}
57
59 assert(S && "Null statement?");
60 PGO->setCurrentStmt(S);
61
62 // These statements have their own debug info handling.
63 if (EmitSimpleStmt(S, Attrs))
64 return;
65
66 // Check if we are generating unreachable code.
67 if (!HaveInsertPoint()) {
68 // If so, and the statement doesn't contain a label, then we do not need to
69 // generate actual code. This is safe because (1) the current point is
70 // unreachable, so we don't need to execute the code, and (2) we've already
71 // handled the statements which update internal data structures (like the
72 // local variable map) which could be used by subsequent statements.
73 if (!ContainsLabel(S)) {
74 // Verify that any decl statements were handled as simple, they may be in
75 // scope of subsequent reachable statements.
76 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
77 PGO->markStmtMaybeUsed(S);
78 return;
79 }
80
81 // Otherwise, make a new block to hold the code.
83 }
84
85 // Generate a stoppoint if we are emitting debug info.
87
88 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
89 // enabled.
90 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
91 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
93 return;
94 }
95 }
96
97 switch (S->getStmtClass()) {
99 case Stmt::CXXCatchStmtClass:
100 case Stmt::SEHExceptStmtClass:
101 case Stmt::SEHFinallyStmtClass:
102 case Stmt::MSDependentExistsStmtClass:
103 case Stmt::UnresolvedSYCLKernelCallStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::DeferStmtClass:
116 case Stmt::SEHLeaveStmtClass:
117 case Stmt::SYCLKernelCallStmtClass:
118 llvm_unreachable("should have emitted these statements as simple");
119
120#define STMT(Type, Base)
121#define ABSTRACT_STMT(Op)
122#define EXPR(Type, Base) \
123 case Stmt::Type##Class:
124#include "clang/AST/StmtNodes.inc"
125 {
126 // Remember the block we came in on.
127 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
128 assert(incoming && "expression emission must have an insertion point");
129
131
132 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
133 assert(outgoing && "expression emission cleared block!");
134
135 // The expression emitters assume (reasonably!) that the insertion
136 // point is always set. To maintain that, the call-emission code
137 // for noreturn functions has to enter a new block with no
138 // predecessors. We want to kill that block and mark the current
139 // insertion point unreachable in the common case of a call like
140 // "exit();". Since expression emission doesn't otherwise create
141 // blocks with no predecessors, we can just test for that.
142 // However, we must be careful not to do this to our incoming
143 // block, because *statement* emission does sometimes create
144 // reachable blocks which will have no predecessors until later in
145 // the function. This occurs with, e.g., labels that are not
146 // reachable by fallthrough.
147 if (incoming != outgoing && outgoing->use_empty()) {
148 outgoing->eraseFromParent();
149 Builder.ClearInsertionPoint();
150 }
151 break;
152 }
153
154 case Stmt::IndirectGotoStmtClass:
156
157 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
158 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
159 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
160 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
161
162 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
163
164 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
165 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
166 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
167 case Stmt::CoroutineBodyStmtClass:
169 break;
170 case Stmt::CoreturnStmtClass:
172 break;
173 case Stmt::CapturedStmtClass: {
174 const CapturedStmt *CS = cast<CapturedStmt>(S);
176 }
177 break;
178 case Stmt::ObjCAtTryStmtClass:
180 break;
181 case Stmt::ObjCAtCatchStmtClass:
182 llvm_unreachable(
183 "@catch statements should be handled by EmitObjCAtTryStmt");
184 case Stmt::ObjCAtFinallyStmtClass:
185 llvm_unreachable(
186 "@finally statements should be handled by EmitObjCAtTryStmt");
187 case Stmt::ObjCAtThrowStmtClass:
189 break;
190 case Stmt::ObjCAtSynchronizedStmtClass:
192 break;
193 case Stmt::ObjCForCollectionStmtClass:
195 break;
196 case Stmt::ObjCAutoreleasePoolStmtClass:
198 break;
199
200 case Stmt::CXXTryStmtClass:
202 break;
203 case Stmt::CXXForRangeStmtClass:
205 break;
206 case Stmt::SEHTryStmtClass:
208 break;
209 case Stmt::OMPMetaDirectiveClass:
211 break;
212 case Stmt::OMPCanonicalLoopClass:
214 break;
215 case Stmt::OMPParallelDirectiveClass:
217 break;
218 case Stmt::OMPSimdDirectiveClass:
220 break;
221 case Stmt::OMPTileDirectiveClass:
223 break;
224 case Stmt::OMPStripeDirectiveClass:
226 break;
227 case Stmt::OMPUnrollDirectiveClass:
229 break;
230 case Stmt::OMPReverseDirectiveClass:
232 break;
233 case Stmt::OMPSplitDirectiveClass:
235 break;
236 case Stmt::OMPInterchangeDirectiveClass:
238 break;
239 case Stmt::OMPFuseDirectiveClass:
241 break;
242 case Stmt::OMPForDirectiveClass:
244 break;
245 case Stmt::OMPForSimdDirectiveClass:
247 break;
248 case Stmt::OMPSectionsDirectiveClass:
250 break;
251 case Stmt::OMPSectionDirectiveClass:
253 break;
254 case Stmt::OMPSingleDirectiveClass:
256 break;
257 case Stmt::OMPMasterDirectiveClass:
259 break;
260 case Stmt::OMPCriticalDirectiveClass:
262 break;
263 case Stmt::OMPParallelForDirectiveClass:
265 break;
266 case Stmt::OMPParallelForSimdDirectiveClass:
268 break;
269 case Stmt::OMPParallelMasterDirectiveClass:
271 break;
272 case Stmt::OMPParallelSectionsDirectiveClass:
274 break;
275 case Stmt::OMPTaskDirectiveClass:
277 break;
278 case Stmt::OMPTaskyieldDirectiveClass:
280 break;
281 case Stmt::OMPErrorDirectiveClass:
283 break;
284 case Stmt::OMPBarrierDirectiveClass:
286 break;
287 case Stmt::OMPTaskwaitDirectiveClass:
289 break;
290 case Stmt::OMPTaskgroupDirectiveClass:
292 break;
293 case Stmt::OMPFlushDirectiveClass:
295 break;
296 case Stmt::OMPDepobjDirectiveClass:
298 break;
299 case Stmt::OMPScanDirectiveClass:
301 break;
302 case Stmt::OMPOrderedDirectiveClass:
304 break;
305 case Stmt::OMPAtomicDirectiveClass:
307 break;
308 case Stmt::OMPTargetDirectiveClass:
310 break;
311 case Stmt::OMPTeamsDirectiveClass:
313 break;
314 case Stmt::OMPCancellationPointDirectiveClass:
316 break;
317 case Stmt::OMPCancelDirectiveClass:
319 break;
320 case Stmt::OMPTargetDataDirectiveClass:
322 break;
323 case Stmt::OMPTargetEnterDataDirectiveClass:
325 break;
326 case Stmt::OMPTargetExitDataDirectiveClass:
328 break;
329 case Stmt::OMPTargetParallelDirectiveClass:
331 break;
332 case Stmt::OMPTargetParallelForDirectiveClass:
334 break;
335 case Stmt::OMPTaskLoopDirectiveClass:
337 break;
338 case Stmt::OMPTaskLoopSimdDirectiveClass:
340 break;
341 case Stmt::OMPMasterTaskLoopDirectiveClass:
343 break;
344 case Stmt::OMPMaskedTaskLoopDirectiveClass:
346 break;
347 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
350 break;
351 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
354 break;
355 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
358 break;
359 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
362 break;
363 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
366 break;
367 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
370 break;
371 case Stmt::OMPDistributeDirectiveClass:
373 break;
374 case Stmt::OMPTargetUpdateDirectiveClass:
376 break;
377 case Stmt::OMPDistributeParallelForDirectiveClass:
380 break;
381 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
384 break;
385 case Stmt::OMPDistributeSimdDirectiveClass:
387 break;
388 case Stmt::OMPTargetParallelForSimdDirectiveClass:
391 break;
392 case Stmt::OMPTargetSimdDirectiveClass:
394 break;
395 case Stmt::OMPTeamsDistributeDirectiveClass:
397 break;
398 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
401 break;
402 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
405 break;
406 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
409 break;
410 case Stmt::OMPTargetTeamsDirectiveClass:
412 break;
413 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
416 break;
417 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
420 break;
421 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
424 break;
425 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
428 break;
429 case Stmt::OMPInteropDirectiveClass:
431 break;
432 case Stmt::OMPDispatchDirectiveClass:
433 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
434 break;
435 case Stmt::OMPScopeDirectiveClass:
437 break;
438 case Stmt::OMPMaskedDirectiveClass:
440 break;
441 case Stmt::OMPGenericLoopDirectiveClass:
443 break;
444 case Stmt::OMPTeamsGenericLoopDirectiveClass:
446 break;
447 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
450 break;
451 case Stmt::OMPParallelGenericLoopDirectiveClass:
454 break;
455 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
458 break;
459 case Stmt::OMPParallelMaskedDirectiveClass:
461 break;
462 case Stmt::OMPAssumeDirectiveClass:
464 break;
465 case Stmt::OpenACCComputeConstructClass:
467 break;
468 case Stmt::OpenACCLoopConstructClass:
470 break;
471 case Stmt::OpenACCCombinedConstructClass:
473 break;
474 case Stmt::OpenACCDataConstructClass:
476 break;
477 case Stmt::OpenACCEnterDataConstructClass:
479 break;
480 case Stmt::OpenACCExitDataConstructClass:
482 break;
483 case Stmt::OpenACCHostDataConstructClass:
485 break;
486 case Stmt::OpenACCWaitConstructClass:
488 break;
489 case Stmt::OpenACCInitConstructClass:
491 break;
492 case Stmt::OpenACCShutdownConstructClass:
494 break;
495 case Stmt::OpenACCSetConstructClass:
497 break;
498 case Stmt::OpenACCUpdateConstructClass:
500 break;
501 case Stmt::OpenACCAtomicConstructClass:
503 break;
504 case Stmt::OpenACCCacheConstructClass:
506 break;
507 }
508}
509
512 switch (S->getStmtClass()) {
513 default:
514 return false;
515 case Stmt::NullStmtClass:
516 break;
517 case Stmt::CompoundStmtClass:
519 break;
520 case Stmt::DeclStmtClass:
522 break;
523 case Stmt::LabelStmtClass:
525 break;
526 case Stmt::AttributedStmtClass:
528 break;
529 case Stmt::GotoStmtClass:
531 break;
532 case Stmt::BreakStmtClass:
534 break;
535 case Stmt::ContinueStmtClass:
537 break;
538 case Stmt::DefaultStmtClass:
540 break;
541 case Stmt::CaseStmtClass:
542 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
543 break;
544 case Stmt::DeferStmtClass:
546 break;
547 case Stmt::SEHLeaveStmtClass:
549 break;
550 case Stmt::SYCLKernelCallStmtClass:
552 break;
553 }
554 return true;
555}
556
557/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
558/// this captures the expression result of the last sub-statement and returns it
559/// (for use by the statement expression extension).
561 AggValueSlot AggSlot) {
562 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
563 "LLVM IR generation of compound statement ('{}')");
564
565 // Keep track of the current cleanup stack depth, including debug scopes.
567
568 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
569}
570
573 bool GetLast,
574 AggValueSlot AggSlot) {
575
577 E = S.body_end() - GetLast;
578 I != E; ++I)
579 EmitStmt(*I);
580
581 Address RetAlloca = Address::invalid();
582 if (GetLast) {
583 // We have to special case labels here. They are statements, but when put
584 // at the end of a statement expression, they yield the value of their
585 // subexpression. Handle this by walking through all labels we encounter,
586 // emitting them before we evaluate the subexpr.
587 // Similar issues arise for attributed statements.
588 const Stmt *LastStmt = S.body_back();
589 while (!isa<Expr>(LastStmt)) {
590 if (const auto *LS = dyn_cast<LabelStmt>(LastStmt)) {
591 EmitLabel(LS->getDecl());
592 LastStmt = LS->getSubStmt();
593 } else if (const auto *AS = dyn_cast<AttributedStmt>(LastStmt)) {
594 // FIXME: Update this if we ever have attributes that affect the
595 // semantics of an expression.
596 LastStmt = AS->getSubStmt();
597 } else {
598 llvm_unreachable("unknown value statement");
599 }
600 }
601
603
604 const Expr *E = cast<Expr>(LastStmt);
605 QualType ExprTy = E->getType();
606 if (hasAggregateEvaluationKind(ExprTy)) {
607 EmitAggExpr(E, AggSlot);
608 } else {
609 // We can't return an RValue here because there might be cleanups at
610 // the end of the StmtExpr. Because of that, we have to emit the result
611 // here into a temporary alloca.
612 RetAlloca = CreateMemTemp(ExprTy);
613 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
614 /*IsInit*/ false);
615 }
616 }
617
618 return RetAlloca;
619}
620
622 llvm::UncondBrInst *BI = dyn_cast<llvm::UncondBrInst>(BB->getTerminator());
623
624 // If there is a cleanup stack, then we it isn't worth trying to
625 // simplify this block (we would need to remove it from the scope map
626 // and cleanup entry).
627 if (!EHStack.empty())
628 return;
629
630 // Can only simplify direct branches.
631 if (!BI)
632 return;
633
634 // Can only simplify empty blocks.
635 if (BI->getIterator() != BB->begin())
636 return;
637
638 BB->replaceAllUsesWith(BI->getSuccessor());
639 BI->eraseFromParent();
640 BB->eraseFromParent();
641}
642
643void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
644 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
645
646 // Fall out of the current block (if necessary).
647 EmitBranch(BB);
648
649 if (IsFinished && BB->use_empty()) {
650 delete BB;
651 return;
652 }
653
654 // Place the block after the current block, if possible, or else at
655 // the end of the function.
656 if (CurBB && CurBB->getParent())
657 CurFn->insert(std::next(CurBB->getIterator()), BB);
658 else
659 CurFn->insert(CurFn->end(), BB);
660 Builder.SetInsertPoint(BB);
661}
662
663void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
664 // Emit a branch from the current block to the target one if this
665 // was a real block. If this was just a fall-through block after a
666 // terminator, don't emit it.
667 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
668
669 if (!CurBB || CurBB->hasTerminator()) {
670 // If there is no insert point or the previous block is already
671 // terminated, don't touch it.
672 } else {
673 // Otherwise, create a fall-through branch.
674 Builder.CreateBr(Target);
675 }
676
677 Builder.ClearInsertionPoint();
678}
679
680void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
681 bool inserted = false;
682 for (llvm::User *u : block->users()) {
683 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
684 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
685 inserted = true;
686 break;
687 }
688 }
689
690 if (!inserted)
691 CurFn->insert(CurFn->end(), block);
692
693 Builder.SetInsertPoint(block);
694}
695
698 JumpDest &Dest = LabelMap[D];
699 if (Dest.isValid()) return Dest;
700
701 // Create, but don't insert, the new block.
702 Dest = JumpDest(createBasicBlock(D->getName()),
705 return Dest;
706}
707
709 // Add this label to the current lexical scope if we're within any
710 // normal cleanups. Jumps "in" to this label --- when permitted by
711 // the language --- may need to be routed around such cleanups.
712 if (EHStack.hasNormalCleanups() && CurLexicalScope)
713 CurLexicalScope->addLabel(D);
714
715 JumpDest &Dest = LabelMap[D];
716
717 // If we didn't need a forward reference to this label, just go
718 // ahead and create a destination at the current scope.
719 if (!Dest.isValid()) {
721
722 // Otherwise, we need to give this label a target depth and remove
723 // it from the branch-fixups list.
724 } else {
725 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
726 Dest.setScopeDepth(EHStack.stable_begin());
728 }
729
730 EmitBlock(Dest.getBlock());
731
732 // Emit debug info for labels.
733 if (CGDebugInfo *DI = getDebugInfo()) {
734 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
735 DI->setLocation(D->getLocation());
736 DI->EmitLabel(D, Builder);
737 }
738 }
739
741}
742
743/// Change the cleanup scope of the labels in this lexical scope to
744/// match the scope of the enclosing context.
746 assert(!Labels.empty());
747 EHScopeStack::stable_iterator innermostScope
748 = CGF.EHStack.getInnermostNormalCleanup();
749
750 // Change the scope depth of all the labels.
751 for (const LabelDecl *Label : Labels) {
752 assert(CGF.LabelMap.count(Label));
753 JumpDest &dest = CGF.LabelMap.find(Label)->second;
754 assert(dest.getScopeDepth().isValid());
755 assert(innermostScope.encloses(dest.getScopeDepth()));
756 dest.setScopeDepth(innermostScope);
757 }
758
759 // Reparent the labels if the new scope also has cleanups.
760 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
761 ParentScope->Labels.append(Labels.begin(), Labels.end());
762 }
763}
764
765
767 EmitLabel(S.getDecl());
768
769 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
770 if (getLangOpts().EHAsynch && S.isSideEntry())
772
773 EmitStmt(S.getSubStmt());
774}
775
777 bool nomerge = false;
778 bool noinline = false;
779 bool alwaysinline = false;
780 bool noconvergent = false;
781 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
782 HLSLControlFlowHintAttr::SpellingNotCalculated;
783 const CallExpr *musttail = nullptr;
784 const AtomicAttr *AA = nullptr;
785
786 for (const auto *A : S.getAttrs()) {
787 switch (A->getKind()) {
788 default:
789 break;
790 case attr::NoMerge:
791 nomerge = true;
792 break;
793 case attr::NoInline:
794 noinline = true;
795 break;
796 case attr::AlwaysInline:
797 alwaysinline = true;
798 break;
799 case attr::NoConvergent:
800 noconvergent = true;
801 break;
802 case attr::MustTail: {
803 const Stmt *Sub = S.getSubStmt();
804 const ReturnStmt *R = cast<ReturnStmt>(Sub);
805 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
806 } break;
807 case attr::CXXAssume: {
808 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
809 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
810 !Assumption->HasSideEffects(getContext())) {
811 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
812 Builder.CreateAssumption(AssumptionVal);
813 }
814 } break;
815 case attr::Atomic:
816 AA = cast<AtomicAttr>(A);
817 break;
818 case attr::HLSLControlFlowHint: {
819 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
820 } break;
821 }
822 }
823 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
824 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
825 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
826 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
827 SaveAndRestore save_musttail(MustTailCall, musttail);
828 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
829 CGAtomicOptionsRAII AORAII(CGM, AA);
830 EmitStmt(S.getSubStmt(), S.getAttrs());
831}
832
834 // If this code is reachable then emit a stop point (if generating
835 // debug info). We have to do this ourselves because we are on the
836 // "simple" statement path.
837 if (HaveInsertPoint())
838 EmitStopPoint(&S);
839
842}
843
844
847 if (const LabelDecl *Target = S.getConstantTarget()) {
849 return;
850 }
851
852 // Ensure that we have an i8* for our PHI node.
853 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
854 Int8PtrTy, "addr");
855 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
856
857 // Get the basic block for the indirect goto.
858 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
859
860 // The first instruction in the block has to be the PHI for the switch dest,
861 // add an entry for this branch.
862 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
863
864 EmitBranch(IndGotoBB);
865 if (CurBB && CurBB->hasTerminator())
866 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr);
867}
868
870 const Stmt *Else = S.getElse();
871
872 // The else branch of a consteval if statement is always the only branch that
873 // can be runtime evaluated.
874 if (S.isConsteval()) {
875 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
876 if (Executed) {
877 RunCleanupsScope ExecutedScope(*this);
878 EmitStmt(Executed);
879 }
880 return;
881 }
882
883 // C99 6.8.4.1: The first substatement is executed if the expression compares
884 // unequal to 0. The condition must be a scalar type.
885 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
886 ApplyDebugLocation DL(*this, S.getCond());
887
888 if (S.getInit())
889 EmitStmt(S.getInit());
890
891 if (S.getConditionVariable())
893
894 // If the condition constant folds and can be elided, try to avoid emitting
895 // the condition and the dead arm of the if/else.
896 bool CondConstant;
897 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
898 S.isConstexpr())) {
899 // Figure out which block (then or else) is executed.
900 const Stmt *Executed = S.getThen();
901 const Stmt *Skipped = Else;
902 if (!CondConstant) // Condition false?
903 std::swap(Executed, Skipped);
904
905 // If the skipped block has no labels in it, just emit the executed block.
906 // This avoids emitting dead code and simplifies the CFG substantially.
907 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
909 /*UseBoth=*/true);
910 if (Executed) {
912 RunCleanupsScope ExecutedScope(*this);
913 EmitStmt(Executed);
914 }
915 PGO->markStmtMaybeUsed(Skipped);
916 return;
917 }
918 }
919
920 auto HasSkip = hasSkipCounter(&S);
921
922 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
923 // the conditional branch.
924 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
925 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
926 llvm::BasicBlock *ElseBlock =
927 (Else || HasSkip ? createBasicBlock("if.else") : ContBlock);
928 // Prefer the PGO based weights over the likelihood attribute.
929 // When the build isn't optimized the metadata isn't used, so don't generate
930 // it.
931 // Also, differentiate between disabled PGO and a never executed branch with
932 // PGO. Assuming PGO is in use:
933 // - we want to ignore the [[likely]] attribute if the branch is never
934 // executed,
935 // - assuming the profile is poor, preserving the attribute may still be
936 // beneficial.
937 // As an approximation, preserve the attribute only if both the branch and the
938 // parent context were not executed.
940 uint64_t ThenCount = getProfileCount(S.getThen());
941 if (!ThenCount && !getCurrentProfileCount() &&
942 CGM.getCodeGenOpts().OptimizationLevel)
943 LH = Stmt::getLikelihood(S.getThen(), Else);
944
945 // When measuring MC/DC, always fully evaluate the condition up front using
946 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
947 // executing the body of the if.then or if.else. This is useful for when
948 // there is a 'return' within the body, but this is particularly beneficial
949 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
950 // updates are kept linear and consistent.
951 if (!CGM.getCodeGenOpts().MCDCCoverage) {
952 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH,
953 /*ConditionalOp=*/nullptr,
954 /*ConditionalDecl=*/S.getConditionVariable());
955 } else {
956 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
958 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
959 }
960
961 // Emit the 'then' code.
962 EmitBlock(ThenBlock);
964 {
965 RunCleanupsScope ThenScope(*this);
966 EmitStmt(S.getThen());
967 }
968 EmitBranch(ContBlock);
969
970 // Emit the 'else' code if present.
971 if (Else) {
972 {
973 // There is no need to emit line number for an unconditional branch.
974 auto NL = ApplyDebugLocation::CreateEmpty(*this);
975 EmitBlock(ElseBlock);
976 }
977 // Add a counter to else block unless it has CounterExpr.
978 if (HasSkip)
980 {
981 RunCleanupsScope ElseScope(*this);
982 EmitStmt(Else);
983 }
984 {
985 // There is no need to emit line number for an unconditional branch.
986 auto NL = ApplyDebugLocation::CreateEmpty(*this);
987 EmitBranch(ContBlock);
988 }
989 } else if (HasSkip) {
990 EmitBlock(ElseBlock);
992 EmitBranch(ContBlock);
993 }
994
995 // Emit the continuation block for code after the if.
996 EmitBlock(ContBlock, true);
997}
998
999bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
1000 bool HasEmptyBody) {
1001 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1003 return false;
1004
1005 // Now apply rules for plain C (see 6.8.5.6 in C11).
1006 // Loops with constant conditions do not have to make progress in any C
1007 // version.
1008 // As an extension, we consisider loops whose constant expression
1009 // can be constant-folded.
1011 bool CondIsConstInt =
1012 !ControllingExpression ||
1013 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1014 Result.Val.isInt());
1015
1016 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1017 Result.Val.getInt().getBoolValue());
1018
1019 // Loops with non-constant conditions must make progress in C11 and later.
1020 if (getLangOpts().C11 && !CondIsConstInt)
1021 return true;
1022
1023 // [C++26][intro.progress] (DR)
1024 // The implementation may assume that any thread will eventually do one of the
1025 // following:
1026 // [...]
1027 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1028 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1031 if (HasEmptyBody && CondIsTrue) {
1032 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1033 return false;
1034 }
1035 return true;
1036 }
1037 return false;
1038}
1039
1040// [C++26][stmt.iter.general] (DR)
1041// A trivially empty iteration statement is an iteration statement matching one
1042// of the following forms:
1043// - while ( expression ) ;
1044// - while ( expression ) { }
1045// - do ; while ( expression ) ;
1046// - do { } while ( expression ) ;
1047// - for ( init-statement expression(opt); ) ;
1048// - for ( init-statement expression(opt); ) { }
1049template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1050 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1051 if (S.getInc())
1052 return false;
1053 }
1054 const Stmt *Body = S.getBody();
1055 if (!Body || isa<NullStmt>(Body))
1056 return true;
1057 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1058 return Compound->body_empty();
1059 return false;
1060}
1061
1063 ArrayRef<const Attr *> WhileAttrs) {
1064 // Emit the header for the loop, which will also become
1065 // the continue target.
1066 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1067 EmitBlock(LoopHeader.getBlock());
1068
1069 if (CGM.shouldEmitConvergenceTokens())
1070 ConvergenceTokenStack.push_back(
1071 emitConvergenceLoopToken(LoopHeader.getBlock()));
1072
1073 // Create an exit block for when the condition fails, which will
1074 // also become the break target.
1076
1077 // Store the blocks to use for break and continue.
1078 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopHeader));
1079
1080 // C++ [stmt.while]p2:
1081 // When the condition of a while statement is a declaration, the
1082 // scope of the variable that is declared extends from its point
1083 // of declaration (3.3.2) to the end of the while statement.
1084 // [...]
1085 // The object created in a condition is destroyed and created
1086 // with each iteration of the loop.
1087 RunCleanupsScope ConditionScope(*this);
1088
1089 if (S.getConditionVariable())
1091
1092 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1093 // evaluation of the controlling expression takes place before each
1094 // execution of the loop body.
1095 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1096
1098
1099 // while(1) is common, avoid extra exit blocks. Be sure
1100 // to correctly handle break/continue though.
1101 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1102 bool EmitBoolCondBranch = !C || !C->isOne();
1103 const SourceRange &R = S.getSourceRange();
1104 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1105 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1106 SourceLocToDebugLoc(R.getEnd()),
1108
1109 // As long as the condition is true, go to the loop body.
1110 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1111 if (EmitBoolCondBranch) {
1112 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1113 if (hasSkipCounter(&S) || ConditionScope.requiresCleanups())
1114 ExitBlock = createBasicBlock("while.exit");
1115 llvm::MDNode *Weights =
1116 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1117 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1118 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1119 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1120 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1121 // Key Instructions: Emit the condition and branch as separate source
1122 // location atoms otherwise we may omit a step onto the loop condition in
1123 // favour of the `while` keyword.
1124 // FIXME: We could have the branch as the backup location for the condition,
1125 // which would probably be a better experience. Explore this later.
1126 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1127 addInstToNewSourceAtom(CondI, nullptr);
1128 addInstToNewSourceAtom(I, nullptr);
1129
1130 if (ExitBlock != LoopExit.getBlock()) {
1131 EmitBlock(ExitBlock);
1134 }
1135 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1136 CGM.getDiags().Report(A->getLocation(),
1137 diag::warn_attribute_has_no_effect_on_infinite_loop)
1138 << A << A->getRange();
1139 CGM.getDiags().Report(
1140 S.getWhileLoc(),
1141 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1143 }
1144
1145 // Emit the loop body. We have to emit this in a cleanup scope
1146 // because it might be a singleton DeclStmt.
1147 {
1148 RunCleanupsScope BodyScope(*this);
1149 EmitBlock(LoopBody);
1151 EmitStmt(S.getBody());
1152 }
1153
1154 BreakContinueStack.pop_back();
1155
1156 // Immediately force cleanup.
1157 ConditionScope.ForceCleanup();
1158
1159 EmitStopPoint(&S);
1160 // Branch to the loop header again.
1161 EmitBranch(LoopHeader.getBlock());
1162
1163 LoopStack.pop();
1164
1165 // Emit the exit block.
1166 EmitBlock(LoopExit.getBlock(), true);
1167
1168 // The LoopHeader typically is just a branch if we skipped emitting
1169 // a branch, try to erase it.
1170 if (!EmitBoolCondBranch) {
1171 SimplifyForwardingBlocks(LoopHeader.getBlock());
1172 PGO->markStmtAsUsed(true, &S);
1173 }
1174
1175 if (CGM.shouldEmitConvergenceTokens())
1176 ConvergenceTokenStack.pop_back();
1177}
1178
1180 ArrayRef<const Attr *> DoAttrs) {
1182 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1183
1184 uint64_t ParentCount = getCurrentProfileCount();
1185
1186 // Store the blocks to use for break and continue.
1187 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopCond));
1188
1189 // Emit the body of the loop.
1190 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1191
1192 EmitBlockWithFallThrough(LoopBody, &S);
1193
1194 if (CGM.shouldEmitConvergenceTokens())
1195 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1196
1197 {
1198 RunCleanupsScope BodyScope(*this);
1199 EmitStmt(S.getBody());
1200 }
1201
1202 EmitBlock(LoopCond.getBlock());
1203
1204 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1205 // after each execution of the loop body."
1206
1207 // Evaluate the conditional in the while header.
1208 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1209 // compares unequal to 0. The condition must be a scalar type.
1210 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1211
1212 BreakContinueStack.pop_back();
1213
1214 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1215 // to correctly handle break/continue though.
1216 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1217 bool EmitBoolCondBranch = !C || !C->isZero();
1218
1219 const SourceRange &R = S.getSourceRange();
1220 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1221 SourceLocToDebugLoc(R.getBegin()),
1222 SourceLocToDebugLoc(R.getEnd()),
1224
1225 auto *LoopFalse = (hasSkipCounter(&S) ? createBasicBlock("do.loopfalse")
1226 : LoopExit.getBlock());
1227
1228 // As long as the condition is true, iterate the loop.
1229 if (EmitBoolCondBranch) {
1230 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1231 auto *I = Builder.CreateCondBr(
1232 BoolCondVal, LoopBody, LoopFalse,
1233 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1234
1235 // Key Instructions: Emit the condition and branch as separate source
1236 // location atoms otherwise we may omit a step onto the loop condition in
1237 // favour of the closing brace.
1238 // FIXME: We could have the branch as the backup location for the condition,
1239 // which would probably be a better experience (no jumping to the brace).
1240 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1241 addInstToNewSourceAtom(CondI, nullptr);
1242 addInstToNewSourceAtom(I, nullptr);
1243 }
1244
1245 LoopStack.pop();
1246
1247 if (LoopFalse != LoopExit.getBlock()) {
1248 EmitBlock(LoopFalse);
1249 incrementProfileCounter(UseSkipPath, &S, /*UseBoth=*/true);
1250 }
1251
1252 // Emit the exit block.
1253 EmitBlock(LoopExit.getBlock());
1254
1255 // The DoCond block typically is just a branch if we skipped
1256 // emitting a branch, try to erase it.
1257 if (!EmitBoolCondBranch)
1259
1260 if (CGM.shouldEmitConvergenceTokens())
1261 ConvergenceTokenStack.pop_back();
1262}
1263
1265 ArrayRef<const Attr *> ForAttrs) {
1267
1268 std::optional<LexicalScope> ForScope;
1270 ForScope.emplace(*this, S.getSourceRange());
1271
1272 // Evaluate the first part before the loop.
1273 if (S.getInit())
1274 EmitStmt(S.getInit());
1275
1276 // Start the loop with a block that tests the condition.
1277 // If there's an increment, the continue scope will be overwritten
1278 // later.
1279 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1280 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1281 EmitBlock(CondBlock);
1282
1283 if (CGM.shouldEmitConvergenceTokens())
1284 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1285
1286 const SourceRange &R = S.getSourceRange();
1287 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1288 SourceLocToDebugLoc(R.getBegin()),
1289 SourceLocToDebugLoc(R.getEnd()),
1291
1292 // Create a cleanup scope for the condition variable cleanups.
1293 LexicalScope ConditionScope(*this, S.getSourceRange());
1294
1295 // If the for loop doesn't have an increment we can just use the condition as
1296 // the continue block. Otherwise, if there is no condition variable, we can
1297 // form the continue block now. If there is a condition variable, we can't
1298 // form the continue block until after we've emitted the condition, because
1299 // the condition is in scope in the increment, but Sema's jump diagnostics
1300 // ensure that there are no continues from the condition variable that jump
1301 // to the loop increment.
1302 JumpDest Continue;
1303 if (!S.getInc())
1304 Continue = CondDest;
1305 else if (!S.getConditionVariable())
1306 Continue = getJumpDestInCurrentScope("for.inc");
1307 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1308
1309 if (S.getCond()) {
1310 // If the for statement has a condition scope, emit the local variable
1311 // declaration.
1312 if (S.getConditionVariable()) {
1314
1315 // We have entered the condition variable's scope, so we're now able to
1316 // jump to the continue block.
1317 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1318 BreakContinueStack.back().ContinueBlock = Continue;
1319 }
1320
1321 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1322 // If there are any cleanups between here and the loop-exit scope,
1323 // create a block to stage a loop exit along.
1324 if (hasSkipCounter(&S) || (ForScope && ForScope->requiresCleanups()))
1325 ExitBlock = createBasicBlock("for.cond.cleanup");
1326
1327 // As long as the condition is true, iterate the loop.
1328 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1329
1330 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1331 // compares unequal to 0. The condition must be a scalar type.
1332 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1333
1335
1336 llvm::MDNode *Weights =
1337 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1338 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1339 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1340 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1341
1342 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1343 // Key Instructions: Emit the condition and branch as separate atoms to
1344 // match existing loop stepping behaviour. FIXME: We could have the branch
1345 // as the backup location for the condition, which would probably be a
1346 // better experience (no jumping to the brace).
1347 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1348 addInstToNewSourceAtom(CondI, nullptr);
1349 addInstToNewSourceAtom(I, nullptr);
1350
1351 if (ExitBlock != LoopExit.getBlock()) {
1352 EmitBlock(ExitBlock);
1355 }
1356
1357 EmitBlock(ForBody);
1358 } else {
1359 // Treat it as a non-zero constant. Don't even create a new block for the
1360 // body, just fall into it.
1361 PGO->markStmtAsUsed(true, &S);
1362 }
1363
1365
1366 {
1367 // Create a separate cleanup scope for the body, in case it is not
1368 // a compound statement.
1369 RunCleanupsScope BodyScope(*this);
1370 EmitStmt(S.getBody());
1371 }
1372
1373 // The last block in the loop's body (which unconditionally branches to the
1374 // `inc` block if there is one).
1375 auto *FinalBodyBB = Builder.GetInsertBlock();
1376
1377 // If there is an increment, emit it next.
1378 if (S.getInc()) {
1379 EmitBlock(Continue.getBlock());
1380 EmitStmt(S.getInc());
1381 }
1382
1383 BreakContinueStack.pop_back();
1384
1385 ConditionScope.ForceCleanup();
1386
1387 EmitStopPoint(&S);
1388 EmitBranch(CondBlock);
1389
1390 if (ForScope)
1391 ForScope->ForceCleanup();
1392
1393 LoopStack.pop();
1394
1395 // Emit the fall-through block.
1396 EmitBlock(LoopExit.getBlock(), true);
1397
1398 if (CGM.shouldEmitConvergenceTokens())
1399 ConvergenceTokenStack.pop_back();
1400
1401 if (FinalBodyBB) {
1402 // Key Instructions: We want the for closing brace to be step-able on to
1403 // match existing behaviour.
1404 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1405 }
1406}
1407
1408void
1410 ArrayRef<const Attr *> ForAttrs) {
1412
1413 LexicalScope ForScope(*this, S.getSourceRange());
1414
1415 // Evaluate the first pieces before the loop.
1416 if (S.getInit())
1417 EmitStmt(S.getInit());
1420 EmitStmt(S.getEndStmt());
1421
1422 // Start the loop with a block that tests the condition.
1423 // If there's an increment, the continue scope will be overwritten
1424 // later.
1425 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1426 EmitBlock(CondBlock);
1427
1428 if (CGM.shouldEmitConvergenceTokens())
1429 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1430
1431 const SourceRange &R = S.getSourceRange();
1432 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1433 SourceLocToDebugLoc(R.getBegin()),
1434 SourceLocToDebugLoc(R.getEnd()));
1435
1436 // If there are any cleanups between here and the loop-exit scope,
1437 // create a block to stage a loop exit along.
1438 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1439 if (hasSkipCounter(&S) || ForScope.requiresCleanups())
1440 ExitBlock = createBasicBlock("for.cond.cleanup");
1441
1442 // The loop body, consisting of the specified body and the loop variable.
1443 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1444
1445 // The body is executed if the expression, contextually converted
1446 // to bool, is true.
1447 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1448 llvm::MDNode *Weights =
1449 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1450 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1451 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1452 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1453 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1454 // Key Instructions: Emit the condition and branch as separate atoms to
1455 // match existing loop stepping behaviour. FIXME: We could have the branch as
1456 // the backup location for the condition, which would probably be a better
1457 // experience.
1458 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1459 addInstToNewSourceAtom(CondI, nullptr);
1460 addInstToNewSourceAtom(I, nullptr);
1461
1462 if (ExitBlock != LoopExit.getBlock()) {
1463 EmitBlock(ExitBlock);
1466 }
1467
1468 EmitBlock(ForBody);
1470
1471 // Create a block for the increment. In case of a 'continue', we jump there.
1472 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1473
1474 // Store the blocks to use for break and continue.
1475 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1476
1477 {
1478 // Create a separate cleanup scope for the loop variable and body.
1479 LexicalScope BodyScope(*this, S.getSourceRange());
1481 EmitStmt(S.getBody());
1482 }
1483 // The last block in the loop's body (which unconditionally branches to the
1484 // `inc` block if there is one).
1485 auto *FinalBodyBB = Builder.GetInsertBlock();
1486
1487 EmitStopPoint(&S);
1488 // If there is an increment, emit it next.
1489 EmitBlock(Continue.getBlock());
1490 EmitStmt(S.getInc());
1491
1492 BreakContinueStack.pop_back();
1493
1494 EmitBranch(CondBlock);
1495
1496 ForScope.ForceCleanup();
1497
1498 LoopStack.pop();
1499
1500 // Emit the fall-through block.
1501 EmitBlock(LoopExit.getBlock(), true);
1502
1503 if (CGM.shouldEmitConvergenceTokens())
1504 ConvergenceTokenStack.pop_back();
1505
1506 if (FinalBodyBB) {
1507 // We want the for closing brace to be step-able on to match existing
1508 // behaviour.
1509 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1510 }
1511}
1512
1513void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1514 if (RV.isScalar()) {
1515 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1516 } else if (RV.isAggregate()) {
1517 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1520 } else {
1522 /*init*/ true);
1523 }
1525}
1526
1527namespace {
1528// RAII struct used to save and restore a return statment's result expression.
1529struct SaveRetExprRAII {
1530 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1531 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1532 CGF.RetExpr = RetExpr;
1533 }
1534 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1535 const Expr *OldRetExpr;
1536 CodeGenFunction &CGF;
1537};
1538} // namespace
1539
1540/// Determine if the given call uses the swiftasync calling convention.
1541static bool isSwiftAsyncCallee(const CallExpr *CE) {
1542 auto calleeQualType = CE->getCallee()->getType();
1543 const FunctionType *calleeType = nullptr;
1544 if (calleeQualType->isFunctionPointerType() ||
1545 calleeQualType->isFunctionReferenceType() ||
1546 calleeQualType->isBlockPointerType() ||
1547 calleeQualType->isMemberFunctionPointerType()) {
1548 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1549 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1550 calleeType = ty;
1551 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1552 if (auto methodDecl = CMCE->getMethodDecl()) {
1553 // getMethodDecl() doesn't handle member pointers at the moment.
1554 calleeType = methodDecl->getType()->castAs<FunctionType>();
1555 } else {
1556 return false;
1557 }
1558 } else {
1559 return false;
1560 }
1561 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1562}
1563
1564/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1565/// if the function returns void, or may be missing one if the function returns
1566/// non-void. Fun stuff :).
1569 if (requiresReturnValueCheck()) {
1570 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1571 auto *SLocPtr =
1572 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1573 llvm::GlobalVariable::PrivateLinkage, SLoc);
1574 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1575 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1576 assert(ReturnLocation.isValid() && "No valid return location");
1577 Builder.CreateStore(SLocPtr, ReturnLocation);
1578 }
1579
1580 // Returning from an outlined SEH helper is UB, and we already warn on it.
1581 if (IsOutlinedSEHHelper) {
1582 Builder.CreateUnreachable();
1583 Builder.ClearInsertionPoint();
1584 }
1585
1586 // Emit the result value, even if unused, to evaluate the side effects.
1587 const Expr *RV = S.getRetValue();
1588
1589 // Record the result expression of the return statement. The recorded
1590 // expression is used to determine whether a block capture's lifetime should
1591 // end at the end of the full expression as opposed to the end of the scope
1592 // enclosing the block expression.
1593 //
1594 // This permits a small, easily-implemented exception to our over-conservative
1595 // rules about not jumping to statements following block literals with
1596 // non-trivial cleanups.
1597 SaveRetExprRAII SaveRetExpr(RV, *this);
1598
1599 RunCleanupsScope cleanupScope(*this);
1600 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1601 RV = EWC->getSubExpr();
1602
1603 // If we're in a swiftasynccall function, and the return expression is a
1604 // call to a swiftasynccall function, mark the call as the musttail call.
1605 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1606 if (RV && CurFnInfo &&
1607 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1608 if (auto CE = dyn_cast<CallExpr>(RV)) {
1609 if (isSwiftAsyncCallee(CE)) {
1610 SaveMustTail.emplace(MustTailCall, CE);
1611 }
1612 }
1613 }
1614
1615 // FIXME: Clean this up by using an LValue for ReturnTemp,
1616 // EmitStoreThroughLValue, and EmitAnyExpr.
1617 // Check if the NRVO candidate was not globalized in OpenMP mode.
1618 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1620 (!getLangOpts().OpenMP ||
1621 !CGM.getOpenMPRuntime()
1622 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1623 .isValid())) {
1624 // Apply the named return value optimization for this return statement,
1625 // which means doing nothing: the appropriate result has already been
1626 // constructed into the NRVO variable.
1627
1628 // If there is an NRVO flag for this variable, set it to 1 into indicate
1629 // that the cleanup code should not destroy the variable.
1630 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1631 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1632 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1633 // Make sure not to return anything, but evaluate the expression
1634 // for side effects.
1635 if (RV) {
1636 EmitAnyExpr(RV);
1637 }
1638 } else if (!RV) {
1639 // Do nothing (return value is left uninitialized)
1640 } else if (FnRetTy->isReferenceType()) {
1641 // If this function returns a reference, take the address of the expression
1642 // rather than the value.
1644 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1645 addInstToCurrentSourceAtom(I, I->getValueOperand());
1646 } else {
1647 switch (getEvaluationKind(RV->getType())) {
1648 case TEK_Scalar: {
1649 llvm::Value *Ret = EmitScalarExpr(RV);
1650 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1652 /*isInit*/ true);
1653 } else {
1654 auto *I = Builder.CreateStore(Ret, ReturnValue);
1655 addInstToCurrentSourceAtom(I, I->getValueOperand());
1656 }
1657 break;
1658 }
1659 case TEK_Complex:
1661 /*isInit*/ true);
1662 break;
1663 case TEK_Aggregate:
1670 break;
1671 }
1672 }
1673
1674 ++NumReturnExprs;
1675 if (!RV || RV->isEvaluatable(getContext()))
1676 ++NumSimpleReturnExprs;
1677
1678 cleanupScope.ForceCleanup();
1680}
1681
1683 // As long as debug info is modeled with instructions, we have to ensure we
1684 // have a place to insert here and write the stop point here.
1685 if (HaveInsertPoint())
1686 EmitStopPoint(&S);
1687
1688 for (const auto *I : S.decls())
1689 EmitDecl(*I, /*EvaluateConditionDecl=*/true);
1690}
1691
1693 -> const BreakContinue * {
1694 if (!S.hasLabelTarget())
1695 return &BreakContinueStack.back();
1696
1697 const Stmt *LoopOrSwitch = S.getNamedLoopOrSwitch();
1698 assert(LoopOrSwitch && "break/continue target not set?");
1699 for (const BreakContinue &BC : llvm::reverse(BreakContinueStack))
1700 if (BC.LoopOrSwitch == LoopOrSwitch)
1701 return &BC;
1702
1703 llvm_unreachable("break/continue target not found");
1704}
1705
1707 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1708
1709 // If this code is reachable then emit a stop point (if generating
1710 // debug info). We have to do this ourselves because we are on the
1711 // "simple" statement path.
1712 if (HaveInsertPoint())
1713 EmitStopPoint(&S);
1714
1717}
1718
1720 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1721
1722 // If this code is reachable then emit a stop point (if generating
1723 // debug info). We have to do this ourselves because we are on the
1724 // "simple" statement path.
1725 if (HaveInsertPoint())
1726 EmitStopPoint(&S);
1727
1730}
1731
1732/// EmitCaseStmtRange - If case statement range is not too big then
1733/// add multiple cases to switch instruction, one for each value within
1734/// the range. If range is too big then emit "if" condition check.
1736 ArrayRef<const Attr *> Attrs) {
1737 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1738
1739 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1740 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1741
1742 // Emit the code for this case. We do this first to make sure it is
1743 // properly chained from our predecessor before generating the
1744 // switch machinery to enter this block.
1745 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1746 EmitBlockWithFallThrough(CaseDest, &S);
1747 EmitStmt(S.getSubStmt());
1748
1749 // If range is empty, do nothing.
1750 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1751 return;
1752
1754 llvm::APInt Range = RHS - LHS;
1755 // FIXME: parameters such as this should not be hardcoded.
1756 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1757 // Range is small enough to add multiple switch instruction cases.
1758 uint64_t Total = getProfileCount(&S);
1759 unsigned NCases = Range.getZExtValue() + 1;
1760 // We only have one region counter for the entire set of cases here, so we
1761 // need to divide the weights evenly between the generated cases, ensuring
1762 // that the total weight is preserved. E.g., a weight of 5 over three cases
1763 // will be distributed as weights of 2, 2, and 1.
1764 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1765 for (unsigned I = 0; I != NCases; ++I) {
1766 if (SwitchWeights)
1767 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1768 else if (SwitchLikelihood)
1769 SwitchLikelihood->push_back(LH);
1770
1771 if (Rem)
1772 Rem--;
1773 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1774 ++LHS;
1775 }
1776 return;
1777 }
1778
1779 // The range is too big. Emit "if" condition into a new block,
1780 // making sure to save and restore the current insertion point.
1781 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1782
1783 // Push this test onto the chain of range checks (which terminates
1784 // in the default basic block). The switch's default will be changed
1785 // to the top of this chain after switch emission is complete.
1786 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1787 CaseRangeBlock = createBasicBlock("sw.caserange");
1788
1789 CurFn->insert(CurFn->end(), CaseRangeBlock);
1790 Builder.SetInsertPoint(CaseRangeBlock);
1791
1792 // Emit range check.
1793 llvm::Value *Diff =
1794 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1795 llvm::Value *Cond =
1796 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1797
1798 llvm::MDNode *Weights = nullptr;
1799 if (SwitchWeights) {
1800 uint64_t ThisCount = getProfileCount(&S);
1801 uint64_t DefaultCount = (*SwitchWeights)[0];
1802 Weights = createProfileWeights(ThisCount, DefaultCount);
1803
1804 // Since we're chaining the switch default through each large case range, we
1805 // need to update the weight for the default, ie, the first case, to include
1806 // this case.
1807 (*SwitchWeights)[0] += ThisCount;
1808 } else if (SwitchLikelihood)
1809 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1810
1811 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1812
1813 // Restore the appropriate insertion point.
1814 if (RestoreBB)
1815 Builder.SetInsertPoint(RestoreBB);
1816 else
1817 Builder.ClearInsertionPoint();
1818}
1819
1821 ArrayRef<const Attr *> Attrs) {
1822 // If there is no enclosing switch instance that we're aware of, then this
1823 // case statement and its block can be elided. This situation only happens
1824 // when we've constant-folded the switch, are emitting the constant case,
1825 // and part of the constant case includes another case statement. For
1826 // instance: switch (4) { case 4: do { case 5: } while (1); }
1827 if (!SwitchInsn) {
1828 EmitStmt(S.getSubStmt());
1829 return;
1830 }
1831
1832 // Handle case ranges.
1833 if (S.getRHS()) {
1834 EmitCaseStmtRange(S, Attrs);
1835 return;
1836 }
1837
1838 llvm::ConstantInt *CaseVal =
1840
1841 // Emit debuginfo for the case value if it is an enum value.
1842 const ConstantExpr *CE;
1843 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1844 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1845 else
1846 CE = dyn_cast<ConstantExpr>(S.getLHS());
1847 if (CE) {
1848 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1849 if (CGDebugInfo *Dbg = getDebugInfo())
1850 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1851 Dbg->EmitGlobalVariable(DE->getDecl(),
1852 APValue(llvm::APSInt(CaseVal->getValue())));
1853 }
1854
1855 if (SwitchLikelihood)
1856 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1857
1858 // If the body of the case is just a 'break', try to not emit an empty block.
1859 // If we're profiling or we're not optimizing, leave the block in for better
1860 // debug and coverage analysis.
1861 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1862 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1864 JumpDest Block = BreakContinueStack.back().BreakBlock;
1865
1866 // Only do this optimization if there are no cleanups that need emitting.
1868 if (SwitchWeights)
1869 SwitchWeights->push_back(getProfileCount(&S));
1870 SwitchInsn->addCase(CaseVal, Block.getBlock());
1871
1872 // If there was a fallthrough into this case, make sure to redirect it to
1873 // the end of the switch as well.
1874 if (Builder.GetInsertBlock()) {
1875 Builder.CreateBr(Block.getBlock());
1876 Builder.ClearInsertionPoint();
1877 }
1878 return;
1879 }
1880 }
1881
1882 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1883 EmitBlockWithFallThrough(CaseDest, &S);
1884 if (SwitchWeights)
1885 SwitchWeights->push_back(getProfileCount(&S));
1886 SwitchInsn->addCase(CaseVal, CaseDest);
1887
1888 // Recursively emitting the statement is acceptable, but is not wonderful for
1889 // code where we have many case statements nested together, i.e.:
1890 // case 1:
1891 // case 2:
1892 // case 3: etc.
1893 // Handling this recursively will create a new block for each case statement
1894 // that falls through to the next case which is IR intensive. It also causes
1895 // deep recursion which can run into stack depth limitations. Handle
1896 // sequential non-range case statements specially.
1897 //
1898 // TODO When the next case has a likelihood attribute the code returns to the
1899 // recursive algorithm. Maybe improve this case if it becomes common practice
1900 // to use a lot of attributes.
1901 const CaseStmt *CurCase = &S;
1902 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1903
1904 // Otherwise, iteratively add consecutive cases to this switch stmt.
1905 while (NextCase && NextCase->getRHS() == nullptr) {
1906 CurCase = NextCase;
1907 llvm::ConstantInt *CaseVal =
1908 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1909
1910 if (SwitchWeights)
1911 SwitchWeights->push_back(getProfileCount(NextCase));
1912 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1913 CaseDest = createBasicBlock("sw.bb");
1914 EmitBlockWithFallThrough(CaseDest, CurCase);
1915 }
1916 // Since this loop is only executed when the CaseStmt has no attributes
1917 // use a hard-coded value.
1918 if (SwitchLikelihood)
1919 SwitchLikelihood->push_back(Stmt::LH_None);
1920
1921 SwitchInsn->addCase(CaseVal, CaseDest);
1922 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1923 }
1924
1925 // Generate a stop point for debug info if the case statement is
1926 // followed by a default statement. A fallthrough case before a
1927 // default case gets its own branch target.
1928 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1929 EmitStopPoint(CurCase);
1930
1931 // Normal default recursion for non-cases.
1932 EmitStmt(CurCase->getSubStmt());
1933}
1934
1936 ArrayRef<const Attr *> Attrs) {
1937 // If there is no enclosing switch instance that we're aware of, then this
1938 // default statement can be elided. This situation only happens when we've
1939 // constant-folded the switch.
1940 if (!SwitchInsn) {
1941 EmitStmt(S.getSubStmt());
1942 return;
1943 }
1944
1945 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1946 assert(DefaultBlock->empty() &&
1947 "EmitDefaultStmt: Default block already defined?");
1948
1949 if (SwitchLikelihood)
1950 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1951
1952 EmitBlockWithFallThrough(DefaultBlock, &S);
1953
1954 EmitStmt(S.getSubStmt());
1955}
1956
1957namespace {
1958struct EmitDeferredStatement final : EHScopeStack::Cleanup {
1959 const DeferStmt &Stmt;
1960 EmitDeferredStatement(const DeferStmt *Stmt) : Stmt(*Stmt) {}
1961
1962 void Emit(CodeGenFunction &CGF, Flags) override {
1963 // Take care that any cleanups pushed by the body of a '_Defer' statement
1964 // don't clobber the current cleanup slot value.
1965 //
1966 // Assume we have a scope that pushes a cleanup; when that scope is exited,
1967 // we need to run that cleanup; this is accomplished by emitting the cleanup
1968 // into a separate block and then branching to that block at scope exit.
1969 //
1970 // Where this gets complicated is if we exit the scope in multiple different
1971 // ways; e.g. in a 'for' loop, we may exit the scope of its body by falling
1972 // off the end (in which case we need to run the cleanup and then branch to
1973 // the increment), or by 'break'ing out of the loop (in which case we need
1974 // to run the cleanup and then branch to the loop exit block); in both cases
1975 // we first branch to the cleanup block to run the cleanup, but the block we
1976 // need to jump to *after* running the cleanup is different.
1977 //
1978 // This is accomplished using a local integer variable called the 'cleanup
1979 // slot': before branching to the cleanup block, we store a value into that
1980 // slot. Then, in the cleanup block, after running the cleanup, we load the
1981 // value of that variable and 'switch' on it to branch to the appropriate
1982 // continuation block.
1983 //
1984 // The problem that arises once '_Defer' statements are involved is that the
1985 // body of a '_Defer' is an arbitrary statement which itself can create more
1986 // cleanups. This means we may end up overwriting the cleanup slot before we
1987 // ever have a chance to 'switch' on it, which means that once we *do* get
1988 // to the 'switch', we end up in whatever block the cleanup code happened to
1989 // pick as the default 'switch' exit label!
1990 //
1991 // That is, what is normally supposed to happen is something like:
1992 //
1993 // 1. Store 'X' to cleanup slot.
1994 // 2. Branch to cleanup block.
1995 // 3. Execute cleanup.
1996 // 4. Read value from cleanup slot.
1997 // 5. Branch to the block associated with 'X'.
1998 //
1999 // But if we encounter a _Defer' statement that contains a cleanup, then
2000 // what might instead happen is:
2001 //
2002 // 1. Store 'X' to cleanup slot.
2003 // 2. Branch to cleanup block.
2004 // 3. Execute cleanup; this ends up pushing another cleanup, so:
2005 // 3a. Store 'Y' to cleanup slot.
2006 // 3b. Run steps 2–5 recursively.
2007 // 4. Read value from cleanup slot, which is now 'Y' instead of 'X'.
2008 // 5. Branch to the block associated with 'Y'... which doesn't even
2009 // exist because the value 'Y' is only meaningful for the inner
2010 // cleanup. The result is we just branch 'somewhere random'.
2011 //
2012 // The rest of the cleanup code simply isn't prepared to handle this case
2013 // because most other cleanups can't push more cleanups, and thus, emitting
2014 // other cleanups generally cannot clobber the cleanup slot.
2015 //
2016 // To prevent this from happening, save the current cleanup slot value and
2017 // restore it after emitting the '_Defer' statement.
2018 llvm::Value *SavedCleanupDest = nullptr;
2019 if (CGF.NormalCleanupDest.isValid())
2020 SavedCleanupDest =
2021 CGF.Builder.CreateLoad(CGF.NormalCleanupDest, "cleanup.dest.saved");
2022
2023 CGF.EmitStmt(Stmt.getBody());
2024
2025 if (SavedCleanupDest && CGF.HaveInsertPoint())
2026 CGF.Builder.CreateStore(SavedCleanupDest, CGF.NormalCleanupDest);
2027
2028 // Cleanups must end with an insert point.
2029 CGF.EnsureInsertPoint();
2030 }
2031};
2032} // namespace
2033
2035 EHStack.pushCleanup<EmitDeferredStatement>(NormalAndEHCleanup, &S);
2036}
2037
2038/// CollectStatementsForCase - Given the body of a 'switch' statement and a
2039/// constant value that is being switched on, see if we can dead code eliminate
2040/// the body of the switch to a simple series of statements to emit. Basically,
2041/// on a switch (5) we want to find these statements:
2042/// case 5:
2043/// printf(...); <--
2044/// ++i; <--
2045/// break;
2046///
2047/// and add them to the ResultStmts vector. If it is unsafe to do this
2048/// transformation (for example, one of the elided statements contains a label
2049/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
2050/// should include statements after it (e.g. the printf() line is a substmt of
2051/// the case) then return CSFC_FallThrough. If we handled it and found a break
2052/// statement, then return CSFC_Success.
2053///
2054/// If Case is non-null, then we are looking for the specified case, checking
2055/// that nothing we jump over contains labels. If Case is null, then we found
2056/// the case and are looking for the break.
2057///
2058/// If the recursive walk actually finds our Case, then we set FoundCase to
2059/// true.
2060///
2063 const SwitchCase *Case,
2064 bool &FoundCase,
2065 SmallVectorImpl<const Stmt*> &ResultStmts) {
2066 // If this is a null statement, just succeed.
2067 if (!S)
2068 return Case ? CSFC_Success : CSFC_FallThrough;
2069
2070 // If this is the switchcase (case 4: or default) that we're looking for, then
2071 // we're in business. Just add the substatement.
2072 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
2073 if (S == Case) {
2074 FoundCase = true;
2075 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
2076 ResultStmts);
2077 }
2078
2079 // Otherwise, this is some other case or default statement, just ignore it.
2080 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
2081 ResultStmts);
2082 }
2083
2084 // If we are in the live part of the code and we found our break statement,
2085 // return a success!
2086 if (!Case && isa<BreakStmt>(S))
2087 return CSFC_Success;
2088
2089 // If this is a switch statement, then it might contain the SwitchCase, the
2090 // break, or neither.
2091 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
2092 // Handle this as two cases: we might be looking for the SwitchCase (if so
2093 // the skipped statements must be skippable) or we might already have it.
2094 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2095 bool StartedInLiveCode = FoundCase;
2096 unsigned StartSize = ResultStmts.size();
2097
2098 // If we've not found the case yet, scan through looking for it.
2099 if (Case) {
2100 // Keep track of whether we see a skipped declaration. The code could be
2101 // using the declaration even if it is skipped, so we can't optimize out
2102 // the decl if the kept statements might refer to it.
2103 bool HadSkippedDecl = false;
2104
2105 // If we're looking for the case, just see if we can skip each of the
2106 // substatements.
2107 for (; Case && I != E; ++I) {
2108 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
2109
2110 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
2111 case CSFC_Failure: return CSFC_Failure;
2112 case CSFC_Success:
2113 // A successful result means that either 1) that the statement doesn't
2114 // have the case and is skippable, or 2) does contain the case value
2115 // and also contains the break to exit the switch. In the later case,
2116 // we just verify the rest of the statements are elidable.
2117 if (FoundCase) {
2118 // If we found the case and skipped declarations, we can't do the
2119 // optimization.
2120 if (HadSkippedDecl)
2121 return CSFC_Failure;
2122
2123 for (++I; I != E; ++I)
2124 if (CodeGenFunction::ContainsLabel(*I, true))
2125 return CSFC_Failure;
2126 return CSFC_Success;
2127 }
2128 break;
2129 case CSFC_FallThrough:
2130 // If we have a fallthrough condition, then we must have found the
2131 // case started to include statements. Consider the rest of the
2132 // statements in the compound statement as candidates for inclusion.
2133 assert(FoundCase && "Didn't find case but returned fallthrough?");
2134 // We recursively found Case, so we're not looking for it anymore.
2135 Case = nullptr;
2136
2137 // If we found the case and skipped declarations, we can't do the
2138 // optimization.
2139 if (HadSkippedDecl)
2140 return CSFC_Failure;
2141 break;
2142 }
2143 }
2144
2145 if (!FoundCase)
2146 return CSFC_Success;
2147
2148 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2149 }
2150
2151 // If we have statements in our range, then we know that the statements are
2152 // live and need to be added to the set of statements we're tracking.
2153 bool AnyDecls = false;
2154 for (; I != E; ++I) {
2156
2157 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2158 case CSFC_Failure: return CSFC_Failure;
2159 case CSFC_FallThrough:
2160 // A fallthrough result means that the statement was simple and just
2161 // included in ResultStmt, keep adding them afterwards.
2162 break;
2163 case CSFC_Success:
2164 // A successful result means that we found the break statement and
2165 // stopped statement inclusion. We just ensure that any leftover stmts
2166 // are skippable and return success ourselves.
2167 for (++I; I != E; ++I)
2168 if (CodeGenFunction::ContainsLabel(*I, true))
2169 return CSFC_Failure;
2170 return CSFC_Success;
2171 }
2172 }
2173
2174 // If we're about to fall out of a scope without hitting a 'break;', we
2175 // can't perform the optimization if there were any decls in that scope
2176 // (we'd lose their end-of-lifetime).
2177 if (AnyDecls) {
2178 // If the entire compound statement was live, there's one more thing we
2179 // can try before giving up: emit the whole thing as a single statement.
2180 // We can do that unless the statement contains a 'break;'.
2181 // FIXME: Such a break must be at the end of a construct within this one.
2182 // We could emit this by just ignoring the BreakStmts entirely.
2183 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2184 ResultStmts.resize(StartSize);
2185 ResultStmts.push_back(S);
2186 } else {
2187 return CSFC_Failure;
2188 }
2189 }
2190
2191 return CSFC_FallThrough;
2192 }
2193
2194 // Okay, this is some other statement that we don't handle explicitly, like a
2195 // for statement or increment etc. If we are skipping over this statement,
2196 // just verify it doesn't have labels, which would make it invalid to elide.
2197 if (Case) {
2198 if (CodeGenFunction::ContainsLabel(S, true))
2199 return CSFC_Failure;
2200 return CSFC_Success;
2201 }
2202
2203 // Otherwise, we want to include this statement. Everything is cool with that
2204 // so long as it doesn't contain a break out of the switch we're in.
2206
2207 // Otherwise, everything is great. Include the statement and tell the caller
2208 // that we fall through and include the next statement as well.
2209 ResultStmts.push_back(S);
2210 return CSFC_FallThrough;
2211}
2212
2213/// FindCaseStatementsForValue - Find the case statement being jumped to and
2214/// then invoke CollectStatementsForCase to find the list of statements to emit
2215/// for a switch on constant. See the comment above CollectStatementsForCase
2216/// for more details.
2218 const llvm::APSInt &ConstantCondValue,
2219 SmallVectorImpl<const Stmt*> &ResultStmts,
2220 ASTContext &C,
2221 const SwitchCase *&ResultCase) {
2222 // First step, find the switch case that is being branched to. We can do this
2223 // efficiently by scanning the SwitchCase list.
2224 const SwitchCase *Case = S.getSwitchCaseList();
2225 const DefaultStmt *DefaultCase = nullptr;
2226
2227 for (; Case; Case = Case->getNextSwitchCase()) {
2228 // It's either a default or case. Just remember the default statement in
2229 // case we're not jumping to any numbered cases.
2230 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2231 DefaultCase = DS;
2232 continue;
2233 }
2234
2235 // Check to see if this case is the one we're looking for.
2236 const CaseStmt *CS = cast<CaseStmt>(Case);
2237 // Don't handle case ranges yet.
2238 if (CS->getRHS()) return false;
2239
2240 // If we found our case, remember it as 'case'.
2241 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2242 break;
2243 }
2244
2245 // If we didn't find a matching case, we use a default if it exists, or we
2246 // elide the whole switch body!
2247 if (!Case) {
2248 // It is safe to elide the body of the switch if it doesn't contain labels
2249 // etc. If it is safe, return successfully with an empty ResultStmts list.
2250 if (!DefaultCase)
2252 Case = DefaultCase;
2253 }
2254
2255 // Ok, we know which case is being jumped to, try to collect all the
2256 // statements that follow it. This can fail for a variety of reasons. Also,
2257 // check to see that the recursive walk actually found our case statement.
2258 // Insane cases like this can fail to find it in the recursive walk since we
2259 // don't handle every stmt kind:
2260 // switch (4) {
2261 // while (1) {
2262 // case 4: ...
2263 bool FoundCase = false;
2264 ResultCase = Case;
2265 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2266 ResultStmts) != CSFC_Failure &&
2267 FoundCase;
2268}
2269
2270static std::optional<SmallVector<uint64_t, 16>>
2272 // Are there enough branches to weight them?
2273 if (Likelihoods.size() <= 1)
2274 return std::nullopt;
2275
2276 uint64_t NumUnlikely = 0;
2277 uint64_t NumNone = 0;
2278 uint64_t NumLikely = 0;
2279 for (const auto LH : Likelihoods) {
2280 switch (LH) {
2281 case Stmt::LH_Unlikely:
2282 ++NumUnlikely;
2283 break;
2284 case Stmt::LH_None:
2285 ++NumNone;
2286 break;
2287 case Stmt::LH_Likely:
2288 ++NumLikely;
2289 break;
2290 }
2291 }
2292
2293 // Is there a likelihood attribute used?
2294 if (NumUnlikely == 0 && NumLikely == 0)
2295 return std::nullopt;
2296
2297 // When multiple cases share the same code they can be combined during
2298 // optimization. In that case the weights of the branch will be the sum of
2299 // the individual weights. Make sure the combined sum of all neutral cases
2300 // doesn't exceed the value of a single likely attribute.
2301 // The additions both avoid divisions by 0 and make sure the weights of None
2302 // don't exceed the weight of Likely.
2303 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2304 const uint64_t None = Likely / (NumNone + 1);
2305 const uint64_t Unlikely = 0;
2306
2308 Result.reserve(Likelihoods.size());
2309 for (const auto LH : Likelihoods) {
2310 switch (LH) {
2311 case Stmt::LH_Unlikely:
2312 Result.push_back(Unlikely);
2313 break;
2314 case Stmt::LH_None:
2315 Result.push_back(None);
2316 break;
2317 case Stmt::LH_Likely:
2318 Result.push_back(Likely);
2319 break;
2320 }
2321 }
2322
2323 return Result;
2324}
2325
2327 // Handle nested switch statements.
2328 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2329 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2330 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2331 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2332
2333 // See if we can constant fold the condition of the switch and therefore only
2334 // emit the live case statement (if any) of the switch.
2335 llvm::APSInt ConstantCondValue;
2336 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2338 const SwitchCase *Case = nullptr;
2339 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2340 getContext(), Case)) {
2341 if (Case)
2343 RunCleanupsScope ExecutedScope(*this);
2344
2345 if (S.getInit())
2346 EmitStmt(S.getInit());
2347
2348 // Emit the condition variable if needed inside the entire cleanup scope
2349 // used by this special case for constant folded switches.
2350 if (S.getConditionVariable())
2351 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2352
2353 // At this point, we are no longer "within" a switch instance, so
2354 // we can temporarily enforce this to ensure that any embedded case
2355 // statements are not emitted.
2356 SwitchInsn = nullptr;
2357
2358 // Okay, we can dead code eliminate everything except this case. Emit the
2359 // specified series of statements and we're good.
2360 for (const Stmt *CaseStmt : CaseStmts)
2363 PGO->markStmtMaybeUsed(S.getBody());
2364
2365 // Now we want to restore the saved switch instance so that nested
2366 // switches continue to function properly
2367 SwitchInsn = SavedSwitchInsn;
2368
2369 return;
2370 }
2371 }
2372
2373 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2374
2375 RunCleanupsScope ConditionScope(*this);
2376
2377 if (S.getInit())
2378 EmitStmt(S.getInit());
2379
2380 if (S.getConditionVariable())
2382 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2384
2385 // Create basic block to hold stuff that comes after switch
2386 // statement. We also need to create a default block now so that
2387 // explicit case ranges tests can have a place to jump to on
2388 // failure.
2389 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2390 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2391 addInstToNewSourceAtom(SwitchInsn, CondV);
2392
2393 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2394 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2395 llvm::ConstantInt *BranchHintConstant =
2397 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2398 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2399 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2400 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"),
2401 MDHelper.createConstant(BranchHintConstant)};
2402 SwitchInsn->setMetadata("hlsl.controlflow.hint",
2403 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2404 }
2405
2406 if (PGO->haveRegionCounts()) {
2407 // Walk the SwitchCase list to find how many there are.
2408 uint64_t DefaultCount = 0;
2409 unsigned NumCases = 0;
2410 for (const SwitchCase *Case = S.getSwitchCaseList();
2411 Case;
2412 Case = Case->getNextSwitchCase()) {
2413 if (isa<DefaultStmt>(Case))
2414 DefaultCount = getProfileCount(Case);
2415 NumCases += 1;
2416 }
2417 SwitchWeights = new SmallVector<uint64_t, 16>();
2418 SwitchWeights->reserve(NumCases);
2419 // The default needs to be first. We store the edge count, so we already
2420 // know the right weight.
2421 SwitchWeights->push_back(DefaultCount);
2422 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2423 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2424 // Initialize the default case.
2425 SwitchLikelihood->push_back(Stmt::LH_None);
2426 }
2427
2428 CaseRangeBlock = DefaultBlock;
2429
2430 // Clear the insertion point to indicate we are in unreachable code.
2431 Builder.ClearInsertionPoint();
2432
2433 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2434 // then reuse last ContinueBlock.
2435 JumpDest OuterContinue;
2436 if (!BreakContinueStack.empty())
2437 OuterContinue = BreakContinueStack.back().ContinueBlock;
2438
2439 BreakContinueStack.push_back(BreakContinue(S, SwitchExit, OuterContinue));
2440
2441 // Emit switch body.
2442 EmitStmt(S.getBody());
2443
2444 BreakContinueStack.pop_back();
2445
2446 // Update the default block in case explicit case range tests have
2447 // been chained on top.
2448 SwitchInsn->setDefaultDest(CaseRangeBlock);
2449
2450 // If a default was never emitted:
2451 if (!DefaultBlock->getParent()) {
2452 // If we have cleanups, emit the default block so that there's a
2453 // place to jump through the cleanups from.
2454 if (ConditionScope.requiresCleanups()) {
2455 EmitBlock(DefaultBlock);
2456
2457 // Otherwise, just forward the default block to the switch end.
2458 } else {
2459 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2460 delete DefaultBlock;
2461 }
2462 }
2463
2464 ConditionScope.ForceCleanup();
2465
2466 // Close the last case (or DefaultBlock).
2467 EmitBranch(SwitchExit.getBlock());
2468
2469 // Insert a False Counter if SwitchStmt doesn't have DefaultStmt.
2470 if (hasSkipCounter(S.getCond())) {
2471 auto *ImplicitDefaultBlock = createBasicBlock("sw.false");
2472 EmitBlock(ImplicitDefaultBlock);
2474 Builder.CreateBr(SwitchInsn->getDefaultDest());
2475 SwitchInsn->setDefaultDest(ImplicitDefaultBlock);
2476 }
2477
2478 // Emit continuation.
2479 EmitBlock(SwitchExit.getBlock(), true);
2481
2482 // If the switch has a condition wrapped by __builtin_unpredictable,
2483 // create metadata that specifies that the switch is unpredictable.
2484 // Don't bother if not optimizing because that metadata would not be used.
2485 auto *Call = dyn_cast<CallExpr>(S.getCond());
2486 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2487 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2488 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2489 llvm::MDBuilder MDHelper(getLLVMContext());
2490 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2491 MDHelper.createUnpredictable());
2492 }
2493 }
2494
2495 if (SwitchWeights) {
2496 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2497 "switch weights do not match switch cases");
2498 // If there's only one jump destination there's no sense weighting it.
2499 if (SwitchWeights->size() > 1)
2500 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2501 createProfileWeights(*SwitchWeights));
2502 delete SwitchWeights;
2503 } else if (SwitchLikelihood) {
2504 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2505 "switch likelihoods do not match switch cases");
2506 std::optional<SmallVector<uint64_t, 16>> LHW =
2507 getLikelihoodWeights(*SwitchLikelihood);
2508 if (LHW) {
2509 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2510 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2511 createProfileWeights(*LHW));
2512 }
2513 delete SwitchLikelihood;
2514 }
2515 SwitchInsn = SavedSwitchInsn;
2516 SwitchWeights = SavedSwitchWeights;
2517 SwitchLikelihood = SavedSwitchLikelihood;
2518 CaseRangeBlock = SavedCRBlock;
2519}
2520
2521std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2522 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2523 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2524 if (Info.allowsRegister() || !Info.allowsMemory()) {
2526 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2527
2528 llvm::Type *Ty = ConvertType(InputType);
2529 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2530 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2531 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2532 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2533
2534 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2535 nullptr};
2536 }
2537 }
2538
2539 Address Addr = InputValue.getAddress();
2540 ConstraintStr += '*';
2541 return {InputValue.getPointer(*this), Addr.getElementType()};
2542}
2543std::pair<llvm::Value *, llvm::Type *>
2544CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2545 const Expr *InputExpr,
2546 std::string &ConstraintStr) {
2547 // If this can't be a register or memory, i.e., has to be a constant
2548 // (immediate or symbolic), try to emit it as such.
2549 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2550 if (Info.requiresImmediateConstant()) {
2551 Expr::EvalResult EVResult;
2552 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2553
2554 llvm::APSInt IntResult;
2555 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2556 getContext()))
2557 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2558 }
2559
2560 Expr::EvalResult Result;
2561 if (InputExpr->EvaluateAsInt(Result, getContext()))
2562 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2563 nullptr};
2564 }
2565
2566 if (Info.allowsRegister() || !Info.allowsMemory())
2568 return {EmitScalarExpr(InputExpr), nullptr};
2569 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2570 return {EmitScalarExpr(InputExpr), nullptr};
2571 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2572 LValue Dest = EmitLValue(InputExpr);
2573 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2574 InputExpr->getExprLoc());
2575}
2576
2577/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2578/// asm call instruction. The !srcloc MDNode contains a list of constant
2579/// integers which are the source locations of the start of each line in the
2580/// asm.
2581static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2582 CodeGenFunction &CGF) {
2584 // Add the location of the first line to the MDNode.
2585 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2586 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2587 StringRef StrVal = Str->getString();
2588 if (!StrVal.empty()) {
2590 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2591 unsigned StartToken = 0;
2592 unsigned ByteOffset = 0;
2593
2594 // Add the location of the start of each subsequent line of the asm to the
2595 // MDNode.
2596 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2597 if (StrVal[i] != '\n') continue;
2598 SourceLocation LineLoc = Str->getLocationOfByte(
2599 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2600 Locs.push_back(llvm::ConstantAsMetadata::get(
2601 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2602 }
2603 }
2604
2605 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2606}
2607
2608static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2609 bool HasUnwindClobber, bool ReadOnly,
2610 bool ReadNone, bool NoMerge, bool NoConvergent,
2611 const AsmStmt &S,
2612 const std::vector<llvm::Type *> &ResultRegTypes,
2613 const std::vector<llvm::Type *> &ArgElemTypes,
2614 CodeGenFunction &CGF,
2615 std::vector<llvm::Value *> &RegResults) {
2616 if (!HasUnwindClobber)
2617 Result.addFnAttr(llvm::Attribute::NoUnwind);
2618
2619 if (NoMerge)
2620 Result.addFnAttr(llvm::Attribute::NoMerge);
2621 // Attach readnone and readonly attributes.
2622 if (!HasSideEffect) {
2623 if (ReadNone)
2624 Result.setDoesNotAccessMemory();
2625 else if (ReadOnly)
2626 Result.setOnlyReadsMemory();
2627 }
2628
2629 // Add elementtype attribute for indirect constraints.
2630 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2631 if (Pair.value()) {
2632 auto Attr = llvm::Attribute::get(
2633 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2634 Result.addParamAttr(Pair.index(), Attr);
2635 }
2636 }
2637
2638 // Slap the source location of the inline asm into a !srcloc metadata on the
2639 // call.
2640 const StringLiteral *SL;
2641 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S);
2642 gccAsmStmt &&
2643 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) {
2644 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF));
2645 } else {
2646 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2647 // strings.
2648 llvm::Constant *Loc =
2649 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2650 Result.setMetadata("srcloc",
2651 llvm::MDNode::get(CGF.getLLVMContext(),
2652 llvm::ConstantAsMetadata::get(Loc)));
2653 }
2654
2655 // Make inline-asm calls Key for the debug info feature Key Instructions.
2656 CGF.addInstToNewSourceAtom(&Result, nullptr);
2657
2658 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2659 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2660 // convergent (meaning, they may call an intrinsically convergent op, such
2661 // as bar.sync, and so can't have certain optimizations applied around
2662 // them) unless it's explicitly marked 'noconvergent'.
2663 Result.addFnAttr(llvm::Attribute::Convergent);
2664 // Extract all of the register value results from the asm.
2665 if (ResultRegTypes.size() == 1) {
2666 RegResults.push_back(&Result);
2667 } else {
2668 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2669 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2670 RegResults.push_back(Tmp);
2671 }
2672 }
2673}
2674
2675static void
2677 const llvm::ArrayRef<llvm::Value *> RegResults,
2678 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2679 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2680 const llvm::ArrayRef<LValue> ResultRegDests,
2681 const llvm::ArrayRef<QualType> ResultRegQualTys,
2682 const llvm::BitVector &ResultTypeRequiresCast,
2683 const std::vector<std::optional<std::pair<unsigned, unsigned>>>
2684 &ResultBounds) {
2685 CGBuilderTy &Builder = CGF.Builder;
2686 CodeGenModule &CGM = CGF.CGM;
2687 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2688
2689 assert(RegResults.size() == ResultRegTypes.size());
2690 assert(RegResults.size() == ResultTruncRegTypes.size());
2691 assert(RegResults.size() == ResultRegDests.size());
2692 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2693 // in which case its size may grow.
2694 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2695 assert(ResultBounds.size() <= ResultRegDests.size());
2696
2697 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2698 llvm::Value *Tmp = RegResults[i];
2699 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2700
2701 if ((i < ResultBounds.size()) && ResultBounds[i].has_value()) {
2702 const auto [LowerBound, UpperBound] = ResultBounds[i].value();
2703 // FIXME: Support for nonzero lower bounds not yet implemented.
2704 assert(LowerBound == 0 && "Output operand lower bound is not zero.");
2705 llvm::Constant *UpperBoundConst =
2706 llvm::ConstantInt::get(Tmp->getType(), UpperBound);
2707 llvm::Value *IsBooleanValue =
2708 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, UpperBoundConst);
2709 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2710 Builder.CreateCall(FnAssume, IsBooleanValue);
2711 }
2712
2713 // If the result type of the LLVM IR asm doesn't match the result type of
2714 // the expression, do the conversion.
2715 if (ResultRegTypes[i] != TruncTy) {
2716
2717 // Truncate the integer result to the right size, note that TruncTy can be
2718 // a pointer.
2719 if (TruncTy->isFloatingPointTy())
2720 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2721 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2722 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2723 Tmp = Builder.CreateTrunc(
2724 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2725 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2726 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2727 uint64_t TmpSize =
2728 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2729 Tmp = Builder.CreatePtrToInt(
2730 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2731 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2732 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2733 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2734 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2735 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2736 }
2737 }
2738
2739 ApplyAtomGroup Grp(CGF.getDebugInfo());
2740 LValue Dest = ResultRegDests[i];
2741 // ResultTypeRequiresCast elements correspond to the first
2742 // ResultTypeRequiresCast.size() elements of RegResults.
2743 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2744 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2745 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2746 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2747 llvm::StoreInst *S = Builder.CreateStore(Tmp, A);
2748 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand());
2749 continue;
2750 }
2751
2752 QualType Ty =
2753 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2754 if (Ty.isNull()) {
2755 const Expr *OutExpr = S.getOutputExpr(i);
2756 CGM.getDiags().Report(OutExpr->getExprLoc(),
2757 diag::err_store_value_to_reg);
2758 return;
2759 }
2760 Dest = CGF.MakeAddrLValue(A, Ty);
2761 }
2762 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2763 }
2764}
2765
2767 const AsmStmt &S) {
2768 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2769
2770 std::string Asm;
2771 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2772 Asm = GCCAsm->getAsmString();
2773
2774 auto &Ctx = CGF->CGM.getLLVMContext();
2775
2776 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2777 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2778 {StrTy->getType()}, false);
2779 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2780
2781 CGF->Builder.CreateCall(UBF, {StrTy});
2782}
2783
2785 // Pop all cleanup blocks at the end of the asm statement.
2786 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2787
2788 // Assemble the final asm string.
2789 std::string AsmString = S.generateAsmString(getContext());
2790
2791 // Get all the output and input constraints together.
2792 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2793 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2794
2795 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2796 bool IsValidTargetAsm = true;
2797 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2798 StringRef Name;
2799 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2800 Name = GAS->getOutputName(i);
2802 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2803 if (IsHipStdPar && !IsValid)
2804 IsValidTargetAsm = false;
2805 else
2806 assert(IsValid && "Failed to parse output constraint");
2807 OutputConstraintInfos.push_back(Info);
2808 }
2809
2810 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2811 StringRef Name;
2812 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2813 Name = GAS->getInputName(i);
2815 bool IsValid =
2816 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2817 if (IsHipStdPar && !IsValid)
2818 IsValidTargetAsm = false;
2819 else
2820 assert(IsValid && "Failed to parse input constraint");
2821 InputConstraintInfos.push_back(Info);
2822 }
2823
2824 if (!IsValidTargetAsm)
2825 return EmitHipStdParUnsupportedAsm(this, S);
2826
2827 std::string Constraints;
2828
2829 std::vector<LValue> ResultRegDests;
2830 std::vector<QualType> ResultRegQualTys;
2831 std::vector<llvm::Type *> ResultRegTypes;
2832 std::vector<llvm::Type *> ResultTruncRegTypes;
2833 std::vector<llvm::Type *> ArgTypes;
2834 std::vector<llvm::Type *> ArgElemTypes;
2835 std::vector<llvm::Value*> Args;
2836 llvm::BitVector ResultTypeRequiresCast;
2837 std::vector<std::optional<std::pair<unsigned, unsigned>>> ResultBounds;
2838
2839 // Keep track of inout constraints.
2840 std::string InOutConstraints;
2841 std::vector<llvm::Value*> InOutArgs;
2842 std::vector<llvm::Type*> InOutArgTypes;
2843 std::vector<llvm::Type*> InOutArgElemTypes;
2844
2845 // Keep track of out constraints for tied input operand.
2846 std::vector<std::string> OutputConstraints;
2847
2848 // Keep track of defined physregs.
2849 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2850
2851 // An inline asm can be marked readonly if it meets the following conditions:
2852 // - it doesn't have any sideeffects
2853 // - it doesn't clobber memory
2854 // - it doesn't return a value by-reference
2855 // It can be marked readnone if it doesn't have any input memory constraints
2856 // in addition to meeting the conditions listed above.
2857 bool ReadOnly = true, ReadNone = true;
2858
2859 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2860 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2861
2862 // Simplify the output constraint.
2863 std::string OutputConstraint(S.getOutputConstraint(i));
2864 OutputConstraint = getTarget().simplifyConstraint(
2865 StringRef(OutputConstraint).substr(1), &OutputConstraintInfos);
2866
2867 const Expr *OutExpr = S.getOutputExpr(i);
2868 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2869
2870 std::string GCCReg;
2871 OutputConstraint = S.addVariableConstraints(
2872 OutputConstraint, *OutExpr, getTarget(), Info.earlyClobber(),
2873 [&](const Stmt *UnspStmt, StringRef Msg) {
2874 CGM.ErrorUnsupported(UnspStmt, Msg);
2875 },
2876 &GCCReg);
2877 // Give an error on multiple outputs to same physreg.
2878 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2879 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2880
2881 OutputConstraints.push_back(OutputConstraint);
2882 LValue Dest = EmitLValue(OutExpr);
2883 if (!Constraints.empty())
2884 Constraints += ',';
2885
2886 // If this is a register output, then make the inline asm return it
2887 // by-value. If this is a memory result, return the value by-reference.
2888 QualType QTy = OutExpr->getType();
2889 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2891 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2892
2893 Constraints += "=" + OutputConstraint;
2894 ResultRegQualTys.push_back(QTy);
2895 ResultRegDests.push_back(Dest);
2896
2897 ResultBounds.emplace_back(Info.getOutputOperandBounds());
2898
2899 llvm::Type *Ty = ConvertTypeForMem(QTy);
2900 const bool RequiresCast = Info.allowsRegister() &&
2902 Ty->isAggregateType());
2903
2904 ResultTruncRegTypes.push_back(Ty);
2905 ResultTypeRequiresCast.push_back(RequiresCast);
2906
2907 if (RequiresCast) {
2908 unsigned Size = getContext().getTypeSize(QTy);
2909 if (Size)
2910 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2911 else
2912 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2913 }
2914 ResultRegTypes.push_back(Ty);
2915 // If this output is tied to an input, and if the input is larger, then
2916 // we need to set the actual result type of the inline asm node to be the
2917 // same as the input type.
2918 if (Info.hasMatchingInput()) {
2919 unsigned InputNo;
2920 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2921 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2922 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2923 break;
2924 }
2925 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2926
2927 QualType InputTy = S.getInputExpr(InputNo)->getType();
2928 QualType OutputType = OutExpr->getType();
2929
2930 uint64_t InputSize = getContext().getTypeSize(InputTy);
2931 if (getContext().getTypeSize(OutputType) < InputSize) {
2932 // Form the asm to return the value as a larger integer or fp type.
2933 ResultRegTypes.back() = ConvertType(InputTy);
2934 }
2935 }
2936 if (llvm::Type* AdjTy =
2937 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2938 ResultRegTypes.back()))
2939 ResultRegTypes.back() = AdjTy;
2940 else {
2941 CGM.getDiags().Report(S.getAsmLoc(),
2942 diag::err_asm_invalid_type_in_input)
2943 << OutExpr->getType() << OutputConstraint;
2944 }
2945
2946 // Update largest vector width for any vector types.
2947 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2948 LargestVectorWidth =
2949 std::max((uint64_t)LargestVectorWidth,
2950 VT->getPrimitiveSizeInBits().getKnownMinValue());
2951 } else {
2952 Address DestAddr = Dest.getAddress();
2953 // Matrix types in memory are represented by arrays, but accessed through
2954 // vector pointers, with the alignment specified on the access operation.
2955 // For inline assembly, update pointer arguments to use vector pointers.
2956 // Otherwise there will be a mis-match if the matrix is also an
2957 // input-argument which is represented as vector.
2958 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2959 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2960
2961 ArgTypes.push_back(DestAddr.getType());
2962 ArgElemTypes.push_back(DestAddr.getElementType());
2963 Args.push_back(DestAddr.emitRawPointer(*this));
2964 Constraints += "=*";
2965 Constraints += OutputConstraint;
2966 ReadOnly = ReadNone = false;
2967 }
2968
2969 if (Info.isReadWrite()) {
2970 InOutConstraints += ',';
2971
2972 const Expr *InputExpr = S.getOutputExpr(i);
2973 llvm::Value *Arg;
2974 llvm::Type *ArgElemType;
2975 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2976 Info, Dest, InputExpr->getType(), InOutConstraints,
2977 InputExpr->getExprLoc());
2978
2979 if (llvm::Type* AdjTy =
2980 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2981 Arg->getType()))
2982 Arg = Builder.CreateBitCast(Arg, AdjTy);
2983
2984 // Update largest vector width for any vector types.
2985 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2986 LargestVectorWidth =
2987 std::max((uint64_t)LargestVectorWidth,
2988 VT->getPrimitiveSizeInBits().getKnownMinValue());
2989 // Only tie earlyclobber physregs.
2990 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2991 InOutConstraints += llvm::utostr(i);
2992 else
2993 InOutConstraints += OutputConstraint;
2994
2995 InOutArgTypes.push_back(Arg->getType());
2996 InOutArgElemTypes.push_back(ArgElemType);
2997 InOutArgs.push_back(Arg);
2998 }
2999 }
3000
3001 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
3002 // to the return value slot. Only do this when returning in registers.
3003 if (isa<MSAsmStmt>(&S)) {
3004 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
3005 if (RetAI.isDirect() || RetAI.isExtend()) {
3006 // Make a fake lvalue for the return value slot.
3008 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
3009 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3010 ResultRegDests, AsmString, S.getNumOutputs());
3011 SawAsmBlock = true;
3012 }
3013 }
3014
3015 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3016 const Expr *InputExpr = S.getInputExpr(i);
3017
3018 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3019
3020 if (Info.allowsMemory())
3021 ReadNone = false;
3022
3023 if (!Constraints.empty())
3024 Constraints += ',';
3025
3026 // Simplify the input constraint.
3027 std::string InputConstraint(S.getInputConstraint(i));
3028 InputConstraint =
3029 getTarget().simplifyConstraint(InputConstraint, &OutputConstraintInfos);
3030
3031 InputConstraint = S.addVariableConstraints(
3032 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
3033 getTarget(), false /* No EarlyClobber */,
3034 [&](const Stmt *UnspStmt, std::string_view Msg) {
3035 CGM.ErrorUnsupported(UnspStmt, Msg);
3036 });
3037
3038 std::string ReplaceConstraint (InputConstraint);
3039 llvm::Value *Arg;
3040 llvm::Type *ArgElemType;
3041 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
3042
3043 // If this input argument is tied to a larger output result, extend the
3044 // input to be the same size as the output. The LLVM backend wants to see
3045 // the input and output of a matching constraint be the same size. Note
3046 // that GCC does not define what the top bits are here. We use zext because
3047 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3048 if (Info.hasTiedOperand()) {
3049 unsigned Output = Info.getTiedOperand();
3050 QualType OutputType = S.getOutputExpr(Output)->getType();
3051 QualType InputTy = InputExpr->getType();
3052
3053 if (getContext().getTypeSize(OutputType) >
3054 getContext().getTypeSize(InputTy)) {
3055 // Use ptrtoint as appropriate so that we can do our extension.
3056 if (isa<llvm::PointerType>(Arg->getType()))
3057 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
3058 llvm::Type *OutputTy = ConvertType(OutputType);
3059 if (isa<llvm::IntegerType>(OutputTy))
3060 Arg = Builder.CreateZExt(Arg, OutputTy);
3061 else if (isa<llvm::PointerType>(OutputTy))
3062 Arg = Builder.CreateZExt(Arg, IntPtrTy);
3063 else if (OutputTy->isFloatingPointTy())
3064 Arg = Builder.CreateFPExt(Arg, OutputTy);
3065 }
3066 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3067 ReplaceConstraint = OutputConstraints[Output];
3068 }
3069 if (llvm::Type* AdjTy =
3070 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
3071 Arg->getType()))
3072 Arg = Builder.CreateBitCast(Arg, AdjTy);
3073 else
3074 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
3075 << InputExpr->getType() << InputConstraint;
3076
3077 // Update largest vector width for any vector types.
3078 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3079 LargestVectorWidth =
3080 std::max((uint64_t)LargestVectorWidth,
3081 VT->getPrimitiveSizeInBits().getKnownMinValue());
3082
3083 ArgTypes.push_back(Arg->getType());
3084 ArgElemTypes.push_back(ArgElemType);
3085 Args.push_back(Arg);
3086 Constraints += InputConstraint;
3087 }
3088
3089 // Append the "input" part of inout constraints.
3090 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3091 ArgTypes.push_back(InOutArgTypes[i]);
3092 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3093 Args.push_back(InOutArgs[i]);
3094 }
3095 Constraints += InOutConstraints;
3096
3097 // Labels
3099 llvm::BasicBlock *Fallthrough = nullptr;
3100 bool IsGCCAsmGoto = false;
3101 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3102 IsGCCAsmGoto = GS->isAsmGoto();
3103 if (IsGCCAsmGoto) {
3104 for (const auto *E : GS->labels()) {
3105 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3106 Transfer.push_back(Dest.getBlock());
3107 if (!Constraints.empty())
3108 Constraints += ',';
3109 Constraints += "!i";
3110 }
3111 Fallthrough = createBasicBlock("asm.fallthrough");
3112 }
3113 }
3114
3115 bool HasUnwindClobber = false;
3116
3117 // Clobbers
3118 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3119 std::string Clobber = S.getClobber(i);
3120
3121 if (Clobber == "memory")
3122 ReadOnly = ReadNone = false;
3123 else if (Clobber == "unwind") {
3124 HasUnwindClobber = true;
3125 continue;
3126 } else if (Clobber != "cc") {
3127 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3128 if (CGM.getCodeGenOpts().StackClashProtector &&
3129 getTarget().isSPRegName(Clobber)) {
3130 CGM.getDiags().Report(S.getAsmLoc(),
3131 diag::warn_stack_clash_protection_inline_asm);
3132 }
3133 }
3134
3135 if (isa<MSAsmStmt>(&S)) {
3136 if (Clobber == "eax" || Clobber == "edx") {
3137 if (Constraints.find("=&A") != std::string::npos)
3138 continue;
3139 std::string::size_type position1 =
3140 Constraints.find("={" + Clobber + "}");
3141 if (position1 != std::string::npos) {
3142 Constraints.insert(position1 + 1, "&");
3143 continue;
3144 }
3145 std::string::size_type position2 = Constraints.find("=A");
3146 if (position2 != std::string::npos) {
3147 Constraints.insert(position2 + 1, "&");
3148 continue;
3149 }
3150 }
3151 }
3152 if (!Constraints.empty())
3153 Constraints += ',';
3154
3155 Constraints += "~{";
3156 Constraints += Clobber;
3157 Constraints += '}';
3158 }
3159
3160 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3161 "unwind clobber can't be used with asm goto");
3162
3163 // Add machine specific clobbers
3164 std::string_view MachineClobbers = getTarget().getClobbers();
3165 if (!MachineClobbers.empty()) {
3166 if (!Constraints.empty())
3167 Constraints += ',';
3168 Constraints += MachineClobbers;
3169 }
3170
3171 llvm::Type *ResultType;
3172 if (ResultRegTypes.empty())
3173 ResultType = VoidTy;
3174 else if (ResultRegTypes.size() == 1)
3175 ResultType = ResultRegTypes[0];
3176 else
3177 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3178
3179 llvm::FunctionType *FTy =
3180 llvm::FunctionType::get(ResultType, ArgTypes, false);
3181
3182 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3183
3184 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3185 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3186 ? llvm::InlineAsm::AD_ATT
3187 : llvm::InlineAsm::AD_Intel;
3188 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3189 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3190
3191 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3192 FTy, AsmString, Constraints, HasSideEffect,
3193 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3194 std::vector<llvm::Value*> RegResults;
3195 llvm::CallBrInst *CBR;
3196 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3197 CBRRegResults;
3198 if (IsGCCAsmGoto) {
3199 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3200 EmitBlock(Fallthrough);
3201 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3202 ReadNone, InNoMergeAttributedStmt,
3203 InNoConvergentAttributedStmt, S, ResultRegTypes,
3204 ArgElemTypes, *this, RegResults);
3205 // Because we are emitting code top to bottom, we don't have enough
3206 // information at this point to know precisely whether we have a critical
3207 // edge. If we have outputs, split all indirect destinations.
3208 if (!RegResults.empty()) {
3209 unsigned i = 0;
3210 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3211 llvm::Twine SynthName = Dest->getName() + ".split";
3212 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3213 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3214 Builder.SetInsertPoint(SynthBB);
3215
3216 if (ResultRegTypes.size() == 1) {
3217 CBRRegResults[SynthBB].push_back(CBR);
3218 } else {
3219 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3220 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3221 CBRRegResults[SynthBB].push_back(Tmp);
3222 }
3223 }
3224
3225 EmitBranch(Dest);
3226 EmitBlock(SynthBB);
3227 CBR->setIndirectDest(i++, SynthBB);
3228 }
3229 }
3230 } else if (HasUnwindClobber) {
3231 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3232 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3233 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3234 InNoConvergentAttributedStmt, S, ResultRegTypes,
3235 ArgElemTypes, *this, RegResults);
3236 } else {
3237 llvm::CallInst *Result =
3238 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3239 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3240 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3241 InNoConvergentAttributedStmt, S, ResultRegTypes,
3242 ArgElemTypes, *this, RegResults);
3243 }
3244
3245 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3246 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3247 ResultBounds);
3248
3249 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3250 // different insertion point; one for each indirect destination and with
3251 // CBRRegResults rather than RegResults.
3252 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3253 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3254 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3255 Builder.SetInsertPoint(Succ, --(Succ->end()));
3256 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3257 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3258 ResultTypeRequiresCast, ResultBounds);
3259 }
3260 }
3261}
3262
3264 const RecordDecl *RD = S.getCapturedRecordDecl();
3266
3267 // Initialize the captured struct.
3268 LValue SlotLV =
3269 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3270
3271 RecordDecl::field_iterator CurField = RD->field_begin();
3273 E = S.capture_init_end();
3274 I != E; ++I, ++CurField) {
3275 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3276 if (CurField->hasCapturedVLAType()) {
3277 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3278 } else {
3279 EmitInitializerForField(*CurField, LV, *I);
3280 }
3281 }
3282
3283 return SlotLV;
3284}
3285
3286/// Generate an outlined function for the body of a CapturedStmt, store any
3287/// captured variables into the captured struct, and call the outlined function.
3288llvm::Function *
3290 LValue CapStruct = InitCapturedStruct(S);
3291
3292 // Emit the CapturedDecl
3293 CodeGenFunction CGF(CGM, true);
3294 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3295 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3296 delete CGF.CapturedStmtInfo;
3297
3298 // Emit call to the helper function.
3299 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3300
3301 return F;
3302}
3303
3305 LValue CapStruct = InitCapturedStruct(S);
3306 return CapStruct.getAddress();
3307}
3308
3309/// Creates the outlined function for a CapturedStmt.
3310llvm::Function *
3312 assert(CapturedStmtInfo &&
3313 "CapturedStmtInfo should be set when generating the captured function");
3314 const CapturedDecl *CD = S.getCapturedDecl();
3315 const RecordDecl *RD = S.getCapturedRecordDecl();
3316 SourceLocation Loc = S.getBeginLoc();
3317 assert(CD->hasBody() && "missing CapturedDecl body");
3318
3319 // Build the argument list.
3320 ASTContext &Ctx = CGM.getContext();
3321 FunctionArgList Args;
3322 Args.append(CD->param_begin(), CD->param_end());
3323
3324 // Create the function declaration.
3325 const CGFunctionInfo &FuncInfo =
3326 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
3327 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3328
3329 llvm::Function *F =
3330 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3331 CapturedStmtInfo->getHelperName(), &CGM.getModule());
3332 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3333 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
3334 F->addFnAttr("sample-profile-suffix-elision-policy", "selected");
3335 if (CD->isNothrow())
3336 F->addFnAttr(llvm::Attribute::NoUnwind);
3337
3338 // Generate the function.
3339 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3340 CD->getBody()->getBeginLoc());
3341 // Set the context parameter in CapturedStmtInfo.
3342 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3343 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
3344
3345 // Initialize variable-length arrays.
3347 CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
3348 for (auto *FD : RD->fields()) {
3349 if (FD->hasCapturedVLAType()) {
3350 auto *ExprArg =
3352 .getScalarVal();
3353 auto VAT = FD->getCapturedVLAType();
3354 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3355 }
3356 }
3357
3358 // If 'this' is captured, load it into CXXThisValue.
3359 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3360 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3361 LValue ThisLValue = EmitLValueForField(Base, FD);
3362 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3363 }
3364
3365 PGO->assignRegionCounters(GlobalDecl(CD), F);
3366 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3368
3369 return F;
3370}
3371
3372// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3373// std::nullptr otherwise.
3374static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3375 for (auto &I : *BB) {
3376 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3377 return CI;
3378 }
3379 return nullptr;
3380}
3381
3382llvm::CallBase *
3383CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3384 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3385 assert(ParentToken);
3386
3387 llvm::Value *bundleArgs[] = {ParentToken};
3388 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3389 auto *Output = llvm::CallBase::addOperandBundle(
3390 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3391 Input->replaceAllUsesWith(Output);
3392 Input->eraseFromParent();
3393 return Output;
3394}
3395
3396llvm::ConvergenceControlInst *
3397CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3398 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3399 assert(ParentToken);
3400 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken);
3401}
3402
3403llvm::ConvergenceControlInst *
3404CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3405 llvm::BasicBlock *BB = &F->getEntryBlock();
3406 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3407 if (Token)
3408 return Token;
3409
3410 // Adding a convergence token requires the function to be marked as
3411 // convergent.
3412 F->setConvergent();
3413 return llvm::ConvergenceControlInst::CreateEntry(*BB);
3414}
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition CGStmt.cpp:2217
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition CGStmt.cpp:3374
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition CGStmt.cpp:2766
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition CGStmt.cpp:2271
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition CGStmt.cpp:2581
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition CGStmt.cpp:1541
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition CGStmt.cpp:2062
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const std::vector< std::optional< std::pair< unsigned, unsigned > > > &ResultBounds)
Definition CGStmt.cpp:2676
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition CGStmt.cpp:1049
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition CGStmt.cpp:2061
@ CSFC_Failure
Definition CGStmt.cpp:2061
@ CSFC_Success
Definition CGStmt.cpp:2061
@ CSFC_FallThrough
Definition CGStmt.cpp:2061
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition CGStmt.cpp:2608
#define SM(sm)
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
This file defines SYCL AST classes used to represent calls to SYCL kernels.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition APValue.cpp:995
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
SourceManager & getSourceManager()
Definition ASTContext.h:859
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CanQualType VoidTy
CanQualType getCanonicalTagType(const TagDecl *TD) const
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3283
std::string getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition Stmt.cpp:515
bool isVolatile() const
Definition Stmt.h:3319
std::string getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition Stmt.cpp:499
SourceLocation getAsmLoc() const
Definition Stmt.h:3313
const Expr * getInputExpr(unsigned i) const
Definition Stmt.cpp:523
std::string addVariableConstraints(StringRef Constraint, const Expr &AsmExpr, const TargetInfo &Target, bool EarlyClobber, UnsupportedConstraintCallbackTy UnsupportedCB, std::string *GCCReg=nullptr) const
Look at AsmExpr and if it is a variable declared as using a particular register add that as a constra...
Definition Stmt.cpp:459
unsigned getNumClobbers() const
Definition Stmt.h:3374
const Expr * getOutputExpr(unsigned i) const
Definition Stmt.cpp:507
unsigned getNumOutputs() const
Definition Stmt.h:3342
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition Stmt.cpp:491
unsigned getNumInputs() const
Definition Stmt.h:3364
std::string getClobber(unsigned i) const
Definition Stmt.cpp:531
Attr - This represents one attribute.
Definition Attr.h:46
Represents an attribute applied to a statement.
Definition Stmt.h:2209
Stmt * getSubStmt()
Definition Stmt.h:2245
ArrayRef< const Attr * > getAttrs() const
Definition Stmt.h:2241
BreakStmt - This represents a break.
Definition Stmt.h:3141
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
DeclStmt * getBeginStmt()
Definition StmtCXX.h:163
DeclStmt * getLoopVarStmt()
Definition StmtCXX.h:169
DeclStmt * getEndStmt()
Definition StmtCXX.h:166
DeclStmt * getRangeStmt()
Definition StmtCXX.h:162
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getCallee()
Definition Expr.h:3093
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition Decl.h:4961
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition Decl.h:5019
bool isNothrow() const
Definition Decl.cpp:5703
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition Decl.h:5036
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition Decl.h:5034
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition Decl.cpp:5700
This captures a statement into a function.
Definition Stmt.h:3943
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition Stmt.cpp:1493
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition Stmt.h:4064
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition Stmt.h:4120
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:4138
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition Stmt.h:4130
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition Stmt.h:4107
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition Stmt.cpp:1508
CaseStmt - Represent a case statement.
Definition Stmt.h:1926
Stmt * getSubStmt()
Definition Stmt.h:2039
Expr * getLHS()
Definition Stmt.h:2009
Expr * getRHS()
Definition Stmt.h:2021
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:551
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:146
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
API for captured statement code generation.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition CGStmt.cpp:745
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1409
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition CGStmt.cpp:3263
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition CGCall.cpp:5213
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:697
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition CGObjC.cpp:2156
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4001
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition CGStmt.cpp:510
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:692
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition CGStmt.cpp:680
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition CGStmt.cpp:621
void EmitOMPSplitDirective(const OMPSplitDirective &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
void EmitOMPScopeDirective(const OMPScopeDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasSkipCounter(const Stmt *S) const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5723
const TargetInfo & getTarget() const
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:572
void EmitGotoStmt(const GotoStmt &S)
Definition CGStmt.cpp:833
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:251
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2497
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1264
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition CGObjC.cpp:3715
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1062
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition CGStmt.cpp:999
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5897
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition CGObjC.cpp:2152
const TargetCodeGenInfo & getTargetHooks() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition CGCall.cpp:5141
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition CGStmt.cpp:48
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition CGStmt.cpp:869
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitDeferStmt(const DeferStmt &S)
Definition CGStmt.cpp:2034
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2738
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition CGObjC.cpp:2160
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:560
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPFuseDirective(const OMPFuseDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition CGClass.cpp:652
void EmitAsmStmt(const AsmStmt &S)
Definition CGStmt.cpp:2784
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1935
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition CGStmt.cpp:2326
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:302
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:273
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:58
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitSYCLKernelCallStmt(const SYCLKernelCallStmt &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition CGStmt.cpp:3289
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1820
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition CGStmt.cpp:1706
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1179
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition CGStmt.cpp:3304
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:663
void EmitOMPSimdDirective(const OMPSimdDirective &S)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
const BreakContinue * GetDestForLoopControlStmt(const LoopControlStmt &S)
Definition CGStmt.cpp:1692
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPStripeDirective(const OMPStripeDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition CGStmt.cpp:1735
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition CGStmt.cpp:1567
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition CGStmt.cpp:3311
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition CGStmt.cpp:1682
void EmitLabelStmt(const LabelStmt &S)
Definition CGStmt.cpp:766
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition CGDecl.cpp:52
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
void EmitAttributedStmt(const AttributedStmt &S)
Definition CGStmt.cpp:776
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition CGObjC.cpp:1831
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition CGStmt.cpp:845
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2089
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition CGStmt.cpp:708
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:643
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition CGStmt.cpp:1719
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
A saved depth on the scope stack.
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition TargetInfo.h:204
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1746
Stmt *const * const_body_iterator
Definition Stmt.h:1818
body_iterator body_end()
Definition Stmt.h:1811
SourceLocation getLBracLoc() const
Definition Stmt.h:1863
body_iterator body_begin()
Definition Stmt.h:1810
Stmt * body_back()
Definition Stmt.h:1814
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1085
ContinueStmt - This represents a continue.
Definition Stmt.h:3125
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1637
decl_range decls()
Definition Stmt.h:1685
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition DeclBase.h:1106
SourceLocation getLocation() const
Definition DeclBase.h:447
Stmt * getSubStmt()
Definition Stmt.h:2087
DeferStmt - This represents a deferred statement.
Definition Stmt.h:3242
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2838
Stmt * getBody()
Definition Stmt.h:2863
Expr * getCond()
Definition Stmt.h:2856
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3175
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2894
Stmt * getInit()
Definition Stmt.h:2909
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition Stmt.cpp:1120
Stmt * getBody()
Definition Stmt.h:2938
Expr * getInc()
Definition Stmt.h:2937
Expr * getCond()
Definition Stmt.h:2936
const Expr * getSubExpr() const
Definition Expr.h:1065
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4553
CallingConv getCallConv() const
Definition TypeBase.h:4908
This represents a GCC inline-assembly statement extension.
Definition Stmt.h:3452
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2975
LabelDecl * getLabel() const
Definition Stmt.h:2988
IfStmt - This represents an if/then/else.
Definition Stmt.h:2265
Stmt * getThen()
Definition Stmt.h:2354
Stmt * getInit()
Definition Stmt.h:2415
Expr * getCond()
Definition Stmt.h:2342
bool isConstexpr() const
Definition Stmt.h:2458
bool isNegatedConsteval() const
Definition Stmt.h:2454
Stmt * getElse()
Definition Stmt.h:2363
bool isConsteval() const
Definition Stmt.h:2445
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition Stmt.cpp:1068
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3014
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition Stmt.cpp:1269
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt * getStmt() const
Definition Decl.h:548
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2152
LabelDecl * getDecl() const
Definition Stmt.h:2170
bool isSideEntry() const
Definition Stmt.h:2199
Stmt * getSubStmt()
Definition Stmt.h:2174
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
bool assumeFunctionsAreConvergent() const
Base class for BreakStmt and ContinueStmt.
Definition Stmt.h:3063
Represents a point when we exit a loop.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
QualType getCanonicalType() const
Definition TypeBase.h:8483
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Represents a struct/union/class.
Definition Decl.h:4342
field_range fields() const
Definition Decl.h:4545
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4542
field_iterator field_begin() const
Definition Decl.cpp:5277
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3166
SourceLocation getBeginLoc() const
Definition Stmt.h:3218
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization.
Definition Stmt.h:3202
Expr * getRetValue()
Definition Stmt.h:3193
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
Stmt - This represents one statement.
Definition Stmt.h:86
@ NoStmtClass
Definition Stmt.h:89
StmtClass getStmtClass() const
Definition Stmt.h:1499
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1442
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1443
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1444
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1446
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition Stmt.cpp:176
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition Stmt.cpp:168
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:1976
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition Expr.cpp:1326
StringRef getString() const
Definition Expr.h:1870
const SwitchCase * getNextSwitchCase() const
Definition Stmt.h:1899
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2515
Expr * getCond()
Definition Stmt.h:2578
Stmt * getBody()
Definition Stmt.h:2590
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition Stmt.cpp:1186
Stmt * getInit()
Definition Stmt.h:2595
SwitchCase * getSwitchCaseList()
Definition Stmt.h:2646
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
std::string simplifyConstraint(StringRef Constraint, SmallVectorImpl< ConstraintInfo > *OutCons=nullptr) const
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
bool validateOutputConstraint(ConstraintInfo &Info) const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition TypeBase.h:9034
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO).
Definition Decl.h:1527
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2703
Expr * getCond()
Definition Stmt.h:2755
SourceLocation getWhileLoc() const
Definition Stmt.h:2808
SourceLocation getRParenLoc() const
Definition Stmt.h:2813
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition Stmt.cpp:1247
Stmt * getBody()
Definition Stmt.h:2767
Defines the clang::TargetInfo interface.
@ Address
A pointer to a ValueDecl.
Definition Primitives.h:28
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ CPlusPlus11
CapturedRegionKind
The different kinds of captured statement.
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ CC_SwiftAsync
Definition Specifiers.h:295
U cast(CodeGen::Address addr)
Definition Address.h:327
@ None
The alignment was not explicit in code.
Definition ASTContext.h:179
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
std::optional< std::pair< unsigned, unsigned > > getOutputOperandBounds() const
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.