clang 23.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/IR/Assumptions.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/InlineAsm.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/MDBuilder.h"
37#include "llvm/Support/SaveAndRestore.h"
38#include <optional>
39
40using namespace clang;
41using namespace CodeGen;
42
43//===----------------------------------------------------------------------===//
44// Statement Emission
45//===----------------------------------------------------------------------===//
46
48 if (CGDebugInfo *DI = getDebugInfo()) {
50 Loc = S->getBeginLoc();
51 DI->EmitLocation(Builder, Loc);
52
53 LastStopPoint = Loc;
54 }
55}
56
58 assert(S && "Null statement?");
59 PGO->setCurrentStmt(S);
60
61 // These statements have their own debug info handling.
62 if (EmitSimpleStmt(S, Attrs))
63 return;
64
65 // Check if we are generating unreachable code.
66 if (!HaveInsertPoint()) {
67 // If so, and the statement doesn't contain a label, then we do not need to
68 // generate actual code. This is safe because (1) the current point is
69 // unreachable, so we don't need to execute the code, and (2) we've already
70 // handled the statements which update internal data structures (like the
71 // local variable map) which could be used by subsequent statements.
72 if (!ContainsLabel(S)) {
73 // Verify that any decl statements were handled as simple, they may be in
74 // scope of subsequent reachable statements.
75 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
76 PGO->markStmtMaybeUsed(S);
77 return;
78 }
79
80 // Otherwise, make a new block to hold the code.
82 }
83
84 // Generate a stoppoint if we are emitting debug info.
86
87 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
88 // enabled.
89 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
90 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
92 return;
93 }
94 }
95
96 switch (S->getStmtClass()) {
98 case Stmt::CXXCatchStmtClass:
99 case Stmt::SEHExceptStmtClass:
100 case Stmt::SEHFinallyStmtClass:
101 case Stmt::MSDependentExistsStmtClass:
102 llvm_unreachable("invalid statement class to emit generically");
103 case Stmt::NullStmtClass:
104 case Stmt::CompoundStmtClass:
105 case Stmt::DeclStmtClass:
106 case Stmt::LabelStmtClass:
107 case Stmt::AttributedStmtClass:
108 case Stmt::GotoStmtClass:
109 case Stmt::BreakStmtClass:
110 case Stmt::ContinueStmtClass:
111 case Stmt::DefaultStmtClass:
112 case Stmt::CaseStmtClass:
113 case Stmt::DeferStmtClass:
114 case Stmt::SEHLeaveStmtClass:
115 case Stmt::SYCLKernelCallStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
167 break;
168 case Stmt::CoreturnStmtClass:
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
196 break;
197
198 case Stmt::CXXTryStmtClass:
200 break;
201 case Stmt::CXXForRangeStmtClass:
203 break;
204 case Stmt::SEHTryStmtClass:
206 break;
207 case Stmt::OMPMetaDirectiveClass:
209 break;
210 case Stmt::OMPCanonicalLoopClass:
212 break;
213 case Stmt::OMPParallelDirectiveClass:
215 break;
216 case Stmt::OMPSimdDirectiveClass:
218 break;
219 case Stmt::OMPTileDirectiveClass:
221 break;
222 case Stmt::OMPStripeDirectiveClass:
224 break;
225 case Stmt::OMPUnrollDirectiveClass:
227 break;
228 case Stmt::OMPReverseDirectiveClass:
230 break;
231 case Stmt::OMPInterchangeDirectiveClass:
233 break;
234 case Stmt::OMPFuseDirectiveClass:
236 break;
237 case Stmt::OMPForDirectiveClass:
239 break;
240 case Stmt::OMPForSimdDirectiveClass:
242 break;
243 case Stmt::OMPSectionsDirectiveClass:
245 break;
246 case Stmt::OMPSectionDirectiveClass:
248 break;
249 case Stmt::OMPSingleDirectiveClass:
251 break;
252 case Stmt::OMPMasterDirectiveClass:
254 break;
255 case Stmt::OMPCriticalDirectiveClass:
257 break;
258 case Stmt::OMPParallelForDirectiveClass:
260 break;
261 case Stmt::OMPParallelForSimdDirectiveClass:
263 break;
264 case Stmt::OMPParallelMasterDirectiveClass:
266 break;
267 case Stmt::OMPParallelSectionsDirectiveClass:
269 break;
270 case Stmt::OMPTaskDirectiveClass:
272 break;
273 case Stmt::OMPTaskyieldDirectiveClass:
275 break;
276 case Stmt::OMPErrorDirectiveClass:
278 break;
279 case Stmt::OMPBarrierDirectiveClass:
281 break;
282 case Stmt::OMPTaskwaitDirectiveClass:
284 break;
285 case Stmt::OMPTaskgroupDirectiveClass:
287 break;
288 case Stmt::OMPFlushDirectiveClass:
290 break;
291 case Stmt::OMPDepobjDirectiveClass:
293 break;
294 case Stmt::OMPScanDirectiveClass:
296 break;
297 case Stmt::OMPOrderedDirectiveClass:
299 break;
300 case Stmt::OMPAtomicDirectiveClass:
302 break;
303 case Stmt::OMPTargetDirectiveClass:
305 break;
306 case Stmt::OMPTeamsDirectiveClass:
308 break;
309 case Stmt::OMPCancellationPointDirectiveClass:
311 break;
312 case Stmt::OMPCancelDirectiveClass:
314 break;
315 case Stmt::OMPTargetDataDirectiveClass:
317 break;
318 case Stmt::OMPTargetEnterDataDirectiveClass:
320 break;
321 case Stmt::OMPTargetExitDataDirectiveClass:
323 break;
324 case Stmt::OMPTargetParallelDirectiveClass:
326 break;
327 case Stmt::OMPTargetParallelForDirectiveClass:
329 break;
330 case Stmt::OMPTaskLoopDirectiveClass:
332 break;
333 case Stmt::OMPTaskLoopSimdDirectiveClass:
335 break;
336 case Stmt::OMPMasterTaskLoopDirectiveClass:
338 break;
339 case Stmt::OMPMaskedTaskLoopDirectiveClass:
341 break;
342 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
345 break;
346 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
349 break;
350 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
357 break;
358 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
361 break;
362 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
365 break;
366 case Stmt::OMPDistributeDirectiveClass:
368 break;
369 case Stmt::OMPTargetUpdateDirectiveClass:
371 break;
372 case Stmt::OMPDistributeParallelForDirectiveClass:
375 break;
376 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
379 break;
380 case Stmt::OMPDistributeSimdDirectiveClass:
382 break;
383 case Stmt::OMPTargetParallelForSimdDirectiveClass:
386 break;
387 case Stmt::OMPTargetSimdDirectiveClass:
389 break;
390 case Stmt::OMPTeamsDistributeDirectiveClass:
392 break;
393 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
396 break;
397 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
400 break;
401 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
404 break;
405 case Stmt::OMPTargetTeamsDirectiveClass:
407 break;
408 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
411 break;
412 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
415 break;
416 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
419 break;
420 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
423 break;
424 case Stmt::OMPInteropDirectiveClass:
426 break;
427 case Stmt::OMPDispatchDirectiveClass:
428 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
429 break;
430 case Stmt::OMPScopeDirectiveClass:
432 break;
433 case Stmt::OMPMaskedDirectiveClass:
435 break;
436 case Stmt::OMPGenericLoopDirectiveClass:
438 break;
439 case Stmt::OMPTeamsGenericLoopDirectiveClass:
441 break;
442 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
445 break;
446 case Stmt::OMPParallelGenericLoopDirectiveClass:
449 break;
450 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
453 break;
454 case Stmt::OMPParallelMaskedDirectiveClass:
456 break;
457 case Stmt::OMPAssumeDirectiveClass:
459 break;
460 case Stmt::OpenACCComputeConstructClass:
462 break;
463 case Stmt::OpenACCLoopConstructClass:
465 break;
466 case Stmt::OpenACCCombinedConstructClass:
468 break;
469 case Stmt::OpenACCDataConstructClass:
471 break;
472 case Stmt::OpenACCEnterDataConstructClass:
474 break;
475 case Stmt::OpenACCExitDataConstructClass:
477 break;
478 case Stmt::OpenACCHostDataConstructClass:
480 break;
481 case Stmt::OpenACCWaitConstructClass:
483 break;
484 case Stmt::OpenACCInitConstructClass:
486 break;
487 case Stmt::OpenACCShutdownConstructClass:
489 break;
490 case Stmt::OpenACCSetConstructClass:
492 break;
493 case Stmt::OpenACCUpdateConstructClass:
495 break;
496 case Stmt::OpenACCAtomicConstructClass:
498 break;
499 case Stmt::OpenACCCacheConstructClass:
501 break;
502 }
503}
504
507 switch (S->getStmtClass()) {
508 default:
509 return false;
510 case Stmt::NullStmtClass:
511 break;
512 case Stmt::CompoundStmtClass:
514 break;
515 case Stmt::DeclStmtClass:
517 break;
518 case Stmt::LabelStmtClass:
520 break;
521 case Stmt::AttributedStmtClass:
523 break;
524 case Stmt::GotoStmtClass:
526 break;
527 case Stmt::BreakStmtClass:
529 break;
530 case Stmt::ContinueStmtClass:
532 break;
533 case Stmt::DefaultStmtClass:
535 break;
536 case Stmt::CaseStmtClass:
537 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
538 break;
539 case Stmt::DeferStmtClass:
541 break;
542 case Stmt::SEHLeaveStmtClass:
544 break;
545 case Stmt::SYCLKernelCallStmtClass:
546 // SYCL kernel call statements are generated as wrappers around the body
547 // of functions declared with the sycl_kernel_entry_point attribute. Such
548 // functions are used to specify how a SYCL kernel (a function object) is
549 // to be invoked; the SYCL kernel call statement contains a transformed
550 // variation of the function body and is used to generate a SYCL kernel
551 // caller function; a function that serves as the device side entry point
552 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed
553 // function is invoked by host code in order to trigger emission of the
554 // device side SYCL kernel caller function and to generate metadata needed
555 // by SYCL run-time library implementations; the function is otherwise
556 // intended to have no effect. As such, the function body is not evaluated
557 // as part of the invocation during host compilation (and the function
558 // should not be called or emitted during device compilation); the SYCL
559 // kernel call statement is thus handled as a null statement for the
560 // purpose of code generation.
561 break;
562 }
563 return true;
564}
565
566/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
567/// this captures the expression result of the last sub-statement and returns it
568/// (for use by the statement expression extension).
570 AggValueSlot AggSlot) {
571 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
572 "LLVM IR generation of compound statement ('{}')");
573
574 // Keep track of the current cleanup stack depth, including debug scopes.
576
577 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
578}
579
582 bool GetLast,
583 AggValueSlot AggSlot) {
584
586 E = S.body_end() - GetLast;
587 I != E; ++I)
588 EmitStmt(*I);
589
590 Address RetAlloca = Address::invalid();
591 if (GetLast) {
592 // We have to special case labels here. They are statements, but when put
593 // at the end of a statement expression, they yield the value of their
594 // subexpression. Handle this by walking through all labels we encounter,
595 // emitting them before we evaluate the subexpr.
596 // Similar issues arise for attributed statements.
597 const Stmt *LastStmt = S.body_back();
598 while (!isa<Expr>(LastStmt)) {
599 if (const auto *LS = dyn_cast<LabelStmt>(LastStmt)) {
600 EmitLabel(LS->getDecl());
601 LastStmt = LS->getSubStmt();
602 } else if (const auto *AS = dyn_cast<AttributedStmt>(LastStmt)) {
603 // FIXME: Update this if we ever have attributes that affect the
604 // semantics of an expression.
605 LastStmt = AS->getSubStmt();
606 } else {
607 llvm_unreachable("unknown value statement");
608 }
609 }
610
612
613 const Expr *E = cast<Expr>(LastStmt);
614 QualType ExprTy = E->getType();
615 if (hasAggregateEvaluationKind(ExprTy)) {
616 EmitAggExpr(E, AggSlot);
617 } else {
618 // We can't return an RValue here because there might be cleanups at
619 // the end of the StmtExpr. Because of that, we have to emit the result
620 // here into a temporary alloca.
621 RetAlloca = CreateMemTemp(ExprTy);
622 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
623 /*IsInit*/ false);
624 }
625 }
626
627 return RetAlloca;
628}
629
631 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
632
633 // If there is a cleanup stack, then we it isn't worth trying to
634 // simplify this block (we would need to remove it from the scope map
635 // and cleanup entry).
636 if (!EHStack.empty())
637 return;
638
639 // Can only simplify direct branches.
640 if (!BI || !BI->isUnconditional())
641 return;
642
643 // Can only simplify empty blocks.
644 if (BI->getIterator() != BB->begin())
645 return;
646
647 BB->replaceAllUsesWith(BI->getSuccessor(0));
648 BI->eraseFromParent();
649 BB->eraseFromParent();
650}
651
652void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
653 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
654
655 // Fall out of the current block (if necessary).
656 EmitBranch(BB);
657
658 if (IsFinished && BB->use_empty()) {
659 delete BB;
660 return;
661 }
662
663 // Place the block after the current block, if possible, or else at
664 // the end of the function.
665 if (CurBB && CurBB->getParent())
666 CurFn->insert(std::next(CurBB->getIterator()), BB);
667 else
668 CurFn->insert(CurFn->end(), BB);
669 Builder.SetInsertPoint(BB);
670}
671
672void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
673 // Emit a branch from the current block to the target one if this
674 // was a real block. If this was just a fall-through block after a
675 // terminator, don't emit it.
676 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
677
678 if (!CurBB || CurBB->getTerminator()) {
679 // If there is no insert point or the previous block is already
680 // terminated, don't touch it.
681 } else {
682 // Otherwise, create a fall-through branch.
683 Builder.CreateBr(Target);
684 }
685
686 Builder.ClearInsertionPoint();
687}
688
689void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
690 bool inserted = false;
691 for (llvm::User *u : block->users()) {
692 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
693 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
694 inserted = true;
695 break;
696 }
697 }
698
699 if (!inserted)
700 CurFn->insert(CurFn->end(), block);
701
702 Builder.SetInsertPoint(block);
703}
704
707 JumpDest &Dest = LabelMap[D];
708 if (Dest.isValid()) return Dest;
709
710 // Create, but don't insert, the new block.
711 Dest = JumpDest(createBasicBlock(D->getName()),
714 return Dest;
715}
716
718 // Add this label to the current lexical scope if we're within any
719 // normal cleanups. Jumps "in" to this label --- when permitted by
720 // the language --- may need to be routed around such cleanups.
721 if (EHStack.hasNormalCleanups() && CurLexicalScope)
722 CurLexicalScope->addLabel(D);
723
724 JumpDest &Dest = LabelMap[D];
725
726 // If we didn't need a forward reference to this label, just go
727 // ahead and create a destination at the current scope.
728 if (!Dest.isValid()) {
730
731 // Otherwise, we need to give this label a target depth and remove
732 // it from the branch-fixups list.
733 } else {
734 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
735 Dest.setScopeDepth(EHStack.stable_begin());
737 }
738
739 EmitBlock(Dest.getBlock());
740
741 // Emit debug info for labels.
742 if (CGDebugInfo *DI = getDebugInfo()) {
743 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
744 DI->setLocation(D->getLocation());
745 DI->EmitLabel(D, Builder);
746 }
747 }
748
750}
751
752/// Change the cleanup scope of the labels in this lexical scope to
753/// match the scope of the enclosing context.
755 assert(!Labels.empty());
756 EHScopeStack::stable_iterator innermostScope
757 = CGF.EHStack.getInnermostNormalCleanup();
758
759 // Change the scope depth of all the labels.
760 for (const LabelDecl *Label : Labels) {
761 assert(CGF.LabelMap.count(Label));
762 JumpDest &dest = CGF.LabelMap.find(Label)->second;
763 assert(dest.getScopeDepth().isValid());
764 assert(innermostScope.encloses(dest.getScopeDepth()));
765 dest.setScopeDepth(innermostScope);
766 }
767
768 // Reparent the labels if the new scope also has cleanups.
769 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
770 ParentScope->Labels.append(Labels.begin(), Labels.end());
771 }
772}
773
774
776 EmitLabel(S.getDecl());
777
778 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
779 if (getLangOpts().EHAsynch && S.isSideEntry())
781
782 EmitStmt(S.getSubStmt());
783}
784
786 bool nomerge = false;
787 bool noinline = false;
788 bool alwaysinline = false;
789 bool noconvergent = false;
790 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
791 HLSLControlFlowHintAttr::SpellingNotCalculated;
792 const CallExpr *musttail = nullptr;
793 const AtomicAttr *AA = nullptr;
794
795 for (const auto *A : S.getAttrs()) {
796 switch (A->getKind()) {
797 default:
798 break;
799 case attr::NoMerge:
800 nomerge = true;
801 break;
802 case attr::NoInline:
803 noinline = true;
804 break;
805 case attr::AlwaysInline:
806 alwaysinline = true;
807 break;
808 case attr::NoConvergent:
809 noconvergent = true;
810 break;
811 case attr::MustTail: {
812 const Stmt *Sub = S.getSubStmt();
813 const ReturnStmt *R = cast<ReturnStmt>(Sub);
814 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
815 } break;
816 case attr::CXXAssume: {
817 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
818 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
819 !Assumption->HasSideEffects(getContext())) {
820 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
821 Builder.CreateAssumption(AssumptionVal);
822 }
823 } break;
824 case attr::Atomic:
825 AA = cast<AtomicAttr>(A);
826 break;
827 case attr::HLSLControlFlowHint: {
828 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
829 } break;
830 }
831 }
832 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
833 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
834 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
835 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
836 SaveAndRestore save_musttail(MustTailCall, musttail);
837 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
838 CGAtomicOptionsRAII AORAII(CGM, AA);
839 EmitStmt(S.getSubStmt(), S.getAttrs());
840}
841
843 // If this code is reachable then emit a stop point (if generating
844 // debug info). We have to do this ourselves because we are on the
845 // "simple" statement path.
846 if (HaveInsertPoint())
847 EmitStopPoint(&S);
848
851}
852
853
856 if (const LabelDecl *Target = S.getConstantTarget()) {
858 return;
859 }
860
861 // Ensure that we have an i8* for our PHI node.
862 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
863 Int8PtrTy, "addr");
864 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
865
866 // Get the basic block for the indirect goto.
867 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
868
869 // The first instruction in the block has to be the PHI for the switch dest,
870 // add an entry for this branch.
871 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
872
873 EmitBranch(IndGotoBB);
874 if (CurBB && CurBB->getTerminator())
875 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr);
876}
877
879 const Stmt *Else = S.getElse();
880
881 // The else branch of a consteval if statement is always the only branch that
882 // can be runtime evaluated.
883 if (S.isConsteval()) {
884 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
885 if (Executed) {
886 RunCleanupsScope ExecutedScope(*this);
887 EmitStmt(Executed);
888 }
889 return;
890 }
891
892 // C99 6.8.4.1: The first substatement is executed if the expression compares
893 // unequal to 0. The condition must be a scalar type.
894 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
895 ApplyDebugLocation DL(*this, S.getCond());
896
897 if (S.getInit())
898 EmitStmt(S.getInit());
899
900 if (S.getConditionVariable())
902
903 // If the condition constant folds and can be elided, try to avoid emitting
904 // the condition and the dead arm of the if/else.
905 bool CondConstant;
906 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
907 S.isConstexpr())) {
908 // Figure out which block (then or else) is executed.
909 const Stmt *Executed = S.getThen();
910 const Stmt *Skipped = Else;
911 if (!CondConstant) // Condition false?
912 std::swap(Executed, Skipped);
913
914 // If the skipped block has no labels in it, just emit the executed block.
915 // This avoids emitting dead code and simplifies the CFG substantially.
916 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
918 /*UseBoth=*/true);
919 if (Executed) {
921 RunCleanupsScope ExecutedScope(*this);
922 EmitStmt(Executed);
923 }
924 PGO->markStmtMaybeUsed(Skipped);
925 return;
926 }
927 }
928
929 auto HasSkip = hasSkipCounter(&S);
930
931 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
932 // the conditional branch.
933 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
934 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
935 llvm::BasicBlock *ElseBlock =
936 (Else || HasSkip ? createBasicBlock("if.else") : ContBlock);
937 // Prefer the PGO based weights over the likelihood attribute.
938 // When the build isn't optimized the metadata isn't used, so don't generate
939 // it.
940 // Also, differentiate between disabled PGO and a never executed branch with
941 // PGO. Assuming PGO is in use:
942 // - we want to ignore the [[likely]] attribute if the branch is never
943 // executed,
944 // - assuming the profile is poor, preserving the attribute may still be
945 // beneficial.
946 // As an approximation, preserve the attribute only if both the branch and the
947 // parent context were not executed.
949 uint64_t ThenCount = getProfileCount(S.getThen());
950 if (!ThenCount && !getCurrentProfileCount() &&
951 CGM.getCodeGenOpts().OptimizationLevel)
952 LH = Stmt::getLikelihood(S.getThen(), Else);
953
954 // When measuring MC/DC, always fully evaluate the condition up front using
955 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
956 // executing the body of the if.then or if.else. This is useful for when
957 // there is a 'return' within the body, but this is particularly beneficial
958 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
959 // updates are kept linear and consistent.
960 if (!CGM.getCodeGenOpts().MCDCCoverage) {
961 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH,
962 /*ConditionalOp=*/nullptr,
963 /*ConditionalDecl=*/S.getConditionVariable());
964 } else {
965 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
967 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
968 }
969
970 // Emit the 'then' code.
971 EmitBlock(ThenBlock);
973 {
974 RunCleanupsScope ThenScope(*this);
975 EmitStmt(S.getThen());
976 }
977 EmitBranch(ContBlock);
978
979 // Emit the 'else' code if present.
980 if (Else) {
981 {
982 // There is no need to emit line number for an unconditional branch.
983 auto NL = ApplyDebugLocation::CreateEmpty(*this);
984 EmitBlock(ElseBlock);
985 }
986 // Add a counter to else block unless it has CounterExpr.
987 if (HasSkip)
989 {
990 RunCleanupsScope ElseScope(*this);
991 EmitStmt(Else);
992 }
993 {
994 // There is no need to emit line number for an unconditional branch.
995 auto NL = ApplyDebugLocation::CreateEmpty(*this);
996 EmitBranch(ContBlock);
997 }
998 } else if (HasSkip) {
999 EmitBlock(ElseBlock);
1001 EmitBranch(ContBlock);
1002 }
1003
1004 // Emit the continuation block for code after the if.
1005 EmitBlock(ContBlock, true);
1006}
1007
1008bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
1009 bool HasEmptyBody) {
1010 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1012 return false;
1013
1014 // Now apply rules for plain C (see 6.8.5.6 in C11).
1015 // Loops with constant conditions do not have to make progress in any C
1016 // version.
1017 // As an extension, we consisider loops whose constant expression
1018 // can be constant-folded.
1020 bool CondIsConstInt =
1021 !ControllingExpression ||
1022 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1023 Result.Val.isInt());
1024
1025 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1026 Result.Val.getInt().getBoolValue());
1027
1028 // Loops with non-constant conditions must make progress in C11 and later.
1029 if (getLangOpts().C11 && !CondIsConstInt)
1030 return true;
1031
1032 // [C++26][intro.progress] (DR)
1033 // The implementation may assume that any thread will eventually do one of the
1034 // following:
1035 // [...]
1036 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1037 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1040 if (HasEmptyBody && CondIsTrue) {
1041 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1042 return false;
1043 }
1044 return true;
1045 }
1046 return false;
1047}
1048
1049// [C++26][stmt.iter.general] (DR)
1050// A trivially empty iteration statement is an iteration statement matching one
1051// of the following forms:
1052// - while ( expression ) ;
1053// - while ( expression ) { }
1054// - do ; while ( expression ) ;
1055// - do { } while ( expression ) ;
1056// - for ( init-statement expression(opt); ) ;
1057// - for ( init-statement expression(opt); ) { }
1058template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1059 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1060 if (S.getInc())
1061 return false;
1062 }
1063 const Stmt *Body = S.getBody();
1064 if (!Body || isa<NullStmt>(Body))
1065 return true;
1066 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1067 return Compound->body_empty();
1068 return false;
1069}
1070
1072 ArrayRef<const Attr *> WhileAttrs) {
1073 // Emit the header for the loop, which will also become
1074 // the continue target.
1075 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1076 EmitBlock(LoopHeader.getBlock());
1077
1078 if (CGM.shouldEmitConvergenceTokens())
1079 ConvergenceTokenStack.push_back(
1080 emitConvergenceLoopToken(LoopHeader.getBlock()));
1081
1082 // Create an exit block for when the condition fails, which will
1083 // also become the break target.
1085
1086 // Store the blocks to use for break and continue.
1087 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopHeader));
1088
1089 // C++ [stmt.while]p2:
1090 // When the condition of a while statement is a declaration, the
1091 // scope of the variable that is declared extends from its point
1092 // of declaration (3.3.2) to the end of the while statement.
1093 // [...]
1094 // The object created in a condition is destroyed and created
1095 // with each iteration of the loop.
1096 RunCleanupsScope ConditionScope(*this);
1097
1098 if (S.getConditionVariable())
1100
1101 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1102 // evaluation of the controlling expression takes place before each
1103 // execution of the loop body.
1104 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1105
1107
1108 // while(1) is common, avoid extra exit blocks. Be sure
1109 // to correctly handle break/continue though.
1110 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1111 bool EmitBoolCondBranch = !C || !C->isOne();
1112 const SourceRange &R = S.getSourceRange();
1113 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1114 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1117
1118 // As long as the condition is true, go to the loop body.
1119 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1120 if (EmitBoolCondBranch) {
1121 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1122 if (hasSkipCounter(&S) || ConditionScope.requiresCleanups())
1123 ExitBlock = createBasicBlock("while.exit");
1124 llvm::MDNode *Weights =
1125 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1126 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1127 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1128 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1129 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1130 // Key Instructions: Emit the condition and branch as separate source
1131 // location atoms otherwise we may omit a step onto the loop condition in
1132 // favour of the `while` keyword.
1133 // FIXME: We could have the branch as the backup location for the condition,
1134 // which would probably be a better experience. Explore this later.
1135 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1136 addInstToNewSourceAtom(CondI, nullptr);
1137 addInstToNewSourceAtom(I, nullptr);
1138
1139 if (ExitBlock != LoopExit.getBlock()) {
1140 EmitBlock(ExitBlock);
1143 }
1144 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1145 CGM.getDiags().Report(A->getLocation(),
1146 diag::warn_attribute_has_no_effect_on_infinite_loop)
1147 << A << A->getRange();
1148 CGM.getDiags().Report(
1149 S.getWhileLoc(),
1150 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1152 }
1153
1154 // Emit the loop body. We have to emit this in a cleanup scope
1155 // because it might be a singleton DeclStmt.
1156 {
1157 RunCleanupsScope BodyScope(*this);
1158 EmitBlock(LoopBody);
1160 EmitStmt(S.getBody());
1161 }
1162
1163 BreakContinueStack.pop_back();
1164
1165 // Immediately force cleanup.
1166 ConditionScope.ForceCleanup();
1167
1168 EmitStopPoint(&S);
1169 // Branch to the loop header again.
1170 EmitBranch(LoopHeader.getBlock());
1171
1172 LoopStack.pop();
1173
1174 // Emit the exit block.
1175 EmitBlock(LoopExit.getBlock(), true);
1176
1177 // The LoopHeader typically is just a branch if we skipped emitting
1178 // a branch, try to erase it.
1179 if (!EmitBoolCondBranch) {
1180 SimplifyForwardingBlocks(LoopHeader.getBlock());
1181 PGO->markStmtAsUsed(true, &S);
1182 }
1183
1184 if (CGM.shouldEmitConvergenceTokens())
1185 ConvergenceTokenStack.pop_back();
1186}
1187
1189 ArrayRef<const Attr *> DoAttrs) {
1191 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1192
1193 uint64_t ParentCount = getCurrentProfileCount();
1194
1195 // Store the blocks to use for break and continue.
1196 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopCond));
1197
1198 // Emit the body of the loop.
1199 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1200
1201 EmitBlockWithFallThrough(LoopBody, &S);
1202
1203 if (CGM.shouldEmitConvergenceTokens())
1204 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1205
1206 {
1207 RunCleanupsScope BodyScope(*this);
1208 EmitStmt(S.getBody());
1209 }
1210
1211 EmitBlock(LoopCond.getBlock());
1212
1213 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1214 // after each execution of the loop body."
1215
1216 // Evaluate the conditional in the while header.
1217 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1218 // compares unequal to 0. The condition must be a scalar type.
1219 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1220
1221 BreakContinueStack.pop_back();
1222
1223 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1224 // to correctly handle break/continue though.
1225 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1226 bool EmitBoolCondBranch = !C || !C->isZero();
1227
1228 const SourceRange &R = S.getSourceRange();
1229 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1233
1234 auto *LoopFalse = (hasSkipCounter(&S) ? createBasicBlock("do.loopfalse")
1235 : LoopExit.getBlock());
1236
1237 // As long as the condition is true, iterate the loop.
1238 if (EmitBoolCondBranch) {
1239 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1240 auto *I = Builder.CreateCondBr(
1241 BoolCondVal, LoopBody, LoopFalse,
1242 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1243
1244 // Key Instructions: Emit the condition and branch as separate source
1245 // location atoms otherwise we may omit a step onto the loop condition in
1246 // favour of the closing brace.
1247 // FIXME: We could have the branch as the backup location for the condition,
1248 // which would probably be a better experience (no jumping to the brace).
1249 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1250 addInstToNewSourceAtom(CondI, nullptr);
1251 addInstToNewSourceAtom(I, nullptr);
1252 }
1253
1254 LoopStack.pop();
1255
1256 if (LoopFalse != LoopExit.getBlock()) {
1257 EmitBlock(LoopFalse);
1258 incrementProfileCounter(UseSkipPath, &S, /*UseBoth=*/true);
1259 }
1260
1261 // Emit the exit block.
1262 EmitBlock(LoopExit.getBlock());
1263
1264 // The DoCond block typically is just a branch if we skipped
1265 // emitting a branch, try to erase it.
1266 if (!EmitBoolCondBranch)
1268
1269 if (CGM.shouldEmitConvergenceTokens())
1270 ConvergenceTokenStack.pop_back();
1271}
1272
1274 ArrayRef<const Attr *> ForAttrs) {
1276
1277 std::optional<LexicalScope> ForScope;
1279 ForScope.emplace(*this, S.getSourceRange());
1280
1281 // Evaluate the first part before the loop.
1282 if (S.getInit())
1283 EmitStmt(S.getInit());
1284
1285 // Start the loop with a block that tests the condition.
1286 // If there's an increment, the continue scope will be overwritten
1287 // later.
1288 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1289 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1290 EmitBlock(CondBlock);
1291
1292 if (CGM.shouldEmitConvergenceTokens())
1293 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1294
1295 const SourceRange &R = S.getSourceRange();
1296 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1300
1301 // Create a cleanup scope for the condition variable cleanups.
1302 LexicalScope ConditionScope(*this, S.getSourceRange());
1303
1304 // If the for loop doesn't have an increment we can just use the condition as
1305 // the continue block. Otherwise, if there is no condition variable, we can
1306 // form the continue block now. If there is a condition variable, we can't
1307 // form the continue block until after we've emitted the condition, because
1308 // the condition is in scope in the increment, but Sema's jump diagnostics
1309 // ensure that there are no continues from the condition variable that jump
1310 // to the loop increment.
1311 JumpDest Continue;
1312 if (!S.getInc())
1313 Continue = CondDest;
1314 else if (!S.getConditionVariable())
1315 Continue = getJumpDestInCurrentScope("for.inc");
1316 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1317
1318 if (S.getCond()) {
1319 // If the for statement has a condition scope, emit the local variable
1320 // declaration.
1321 if (S.getConditionVariable()) {
1323
1324 // We have entered the condition variable's scope, so we're now able to
1325 // jump to the continue block.
1326 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1327 BreakContinueStack.back().ContinueBlock = Continue;
1328 }
1329
1330 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1331 // If there are any cleanups between here and the loop-exit scope,
1332 // create a block to stage a loop exit along.
1333 if (hasSkipCounter(&S) || (ForScope && ForScope->requiresCleanups()))
1334 ExitBlock = createBasicBlock("for.cond.cleanup");
1335
1336 // As long as the condition is true, iterate the loop.
1337 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1338
1339 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1340 // compares unequal to 0. The condition must be a scalar type.
1341 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1342
1344
1345 llvm::MDNode *Weights =
1346 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1347 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1348 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1349 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1350
1351 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1352 // Key Instructions: Emit the condition and branch as separate atoms to
1353 // match existing loop stepping behaviour. FIXME: We could have the branch
1354 // as the backup location for the condition, which would probably be a
1355 // better experience (no jumping to the brace).
1356 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1357 addInstToNewSourceAtom(CondI, nullptr);
1358 addInstToNewSourceAtom(I, nullptr);
1359
1360 if (ExitBlock != LoopExit.getBlock()) {
1361 EmitBlock(ExitBlock);
1364 }
1365
1366 EmitBlock(ForBody);
1367 } else {
1368 // Treat it as a non-zero constant. Don't even create a new block for the
1369 // body, just fall into it.
1370 PGO->markStmtAsUsed(true, &S);
1371 }
1372
1374
1375 {
1376 // Create a separate cleanup scope for the body, in case it is not
1377 // a compound statement.
1378 RunCleanupsScope BodyScope(*this);
1379 EmitStmt(S.getBody());
1380 }
1381
1382 // The last block in the loop's body (which unconditionally branches to the
1383 // `inc` block if there is one).
1384 auto *FinalBodyBB = Builder.GetInsertBlock();
1385
1386 // If there is an increment, emit it next.
1387 if (S.getInc()) {
1388 EmitBlock(Continue.getBlock());
1389 EmitStmt(S.getInc());
1390 }
1391
1392 BreakContinueStack.pop_back();
1393
1394 ConditionScope.ForceCleanup();
1395
1396 EmitStopPoint(&S);
1397 EmitBranch(CondBlock);
1398
1399 if (ForScope)
1400 ForScope->ForceCleanup();
1401
1402 LoopStack.pop();
1403
1404 // Emit the fall-through block.
1405 EmitBlock(LoopExit.getBlock(), true);
1406
1407 if (CGM.shouldEmitConvergenceTokens())
1408 ConvergenceTokenStack.pop_back();
1409
1410 if (FinalBodyBB) {
1411 // Key Instructions: We want the for closing brace to be step-able on to
1412 // match existing behaviour.
1413 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1414 }
1415}
1416
1417void
1419 ArrayRef<const Attr *> ForAttrs) {
1421
1422 LexicalScope ForScope(*this, S.getSourceRange());
1423
1424 // Evaluate the first pieces before the loop.
1425 if (S.getInit())
1426 EmitStmt(S.getInit());
1429 EmitStmt(S.getEndStmt());
1430
1431 // Start the loop with a block that tests the condition.
1432 // If there's an increment, the continue scope will be overwritten
1433 // later.
1434 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1435 EmitBlock(CondBlock);
1436
1437 if (CGM.shouldEmitConvergenceTokens())
1438 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1439
1440 const SourceRange &R = S.getSourceRange();
1441 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1444
1445 // If there are any cleanups between here and the loop-exit scope,
1446 // create a block to stage a loop exit along.
1447 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1448 if (hasSkipCounter(&S) || ForScope.requiresCleanups())
1449 ExitBlock = createBasicBlock("for.cond.cleanup");
1450
1451 // The loop body, consisting of the specified body and the loop variable.
1452 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1453
1454 // The body is executed if the expression, contextually converted
1455 // to bool, is true.
1456 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1457 llvm::MDNode *Weights =
1458 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1459 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1460 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1461 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1462 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1463 // Key Instructions: Emit the condition and branch as separate atoms to
1464 // match existing loop stepping behaviour. FIXME: We could have the branch as
1465 // the backup location for the condition, which would probably be a better
1466 // experience.
1467 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1468 addInstToNewSourceAtom(CondI, nullptr);
1469 addInstToNewSourceAtom(I, nullptr);
1470
1471 if (ExitBlock != LoopExit.getBlock()) {
1472 EmitBlock(ExitBlock);
1475 }
1476
1477 EmitBlock(ForBody);
1479
1480 // Create a block for the increment. In case of a 'continue', we jump there.
1481 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1482
1483 // Store the blocks to use for break and continue.
1484 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1485
1486 {
1487 // Create a separate cleanup scope for the loop variable and body.
1488 LexicalScope BodyScope(*this, S.getSourceRange());
1490 EmitStmt(S.getBody());
1491 }
1492 // The last block in the loop's body (which unconditionally branches to the
1493 // `inc` block if there is one).
1494 auto *FinalBodyBB = Builder.GetInsertBlock();
1495
1496 EmitStopPoint(&S);
1497 // If there is an increment, emit it next.
1498 EmitBlock(Continue.getBlock());
1499 EmitStmt(S.getInc());
1500
1501 BreakContinueStack.pop_back();
1502
1503 EmitBranch(CondBlock);
1504
1505 ForScope.ForceCleanup();
1506
1507 LoopStack.pop();
1508
1509 // Emit the fall-through block.
1510 EmitBlock(LoopExit.getBlock(), true);
1511
1512 if (CGM.shouldEmitConvergenceTokens())
1513 ConvergenceTokenStack.pop_back();
1514
1515 if (FinalBodyBB) {
1516 // We want the for closing brace to be step-able on to match existing
1517 // behaviour.
1518 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1519 }
1520}
1521
1522void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1523 if (RV.isScalar()) {
1524 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1525 } else if (RV.isAggregate()) {
1526 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1529 } else {
1531 /*init*/ true);
1532 }
1534}
1535
1536namespace {
1537// RAII struct used to save and restore a return statment's result expression.
1538struct SaveRetExprRAII {
1539 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1540 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1541 CGF.RetExpr = RetExpr;
1542 }
1543 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1544 const Expr *OldRetExpr;
1545 CodeGenFunction &CGF;
1546};
1547} // namespace
1548
1549/// Determine if the given call uses the swiftasync calling convention.
1550static bool isSwiftAsyncCallee(const CallExpr *CE) {
1551 auto calleeQualType = CE->getCallee()->getType();
1552 const FunctionType *calleeType = nullptr;
1553 if (calleeQualType->isFunctionPointerType() ||
1554 calleeQualType->isFunctionReferenceType() ||
1555 calleeQualType->isBlockPointerType() ||
1556 calleeQualType->isMemberFunctionPointerType()) {
1557 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1558 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1559 calleeType = ty;
1560 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1561 if (auto methodDecl = CMCE->getMethodDecl()) {
1562 // getMethodDecl() doesn't handle member pointers at the moment.
1563 calleeType = methodDecl->getType()->castAs<FunctionType>();
1564 } else {
1565 return false;
1566 }
1567 } else {
1568 return false;
1569 }
1570 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1571}
1572
1573/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1574/// if the function returns void, or may be missing one if the function returns
1575/// non-void. Fun stuff :).
1578 if (requiresReturnValueCheck()) {
1579 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1580 auto *SLocPtr =
1581 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1582 llvm::GlobalVariable::PrivateLinkage, SLoc);
1583 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1584 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1585 assert(ReturnLocation.isValid() && "No valid return location");
1586 Builder.CreateStore(SLocPtr, ReturnLocation);
1587 }
1588
1589 // Returning from an outlined SEH helper is UB, and we already warn on it.
1590 if (IsOutlinedSEHHelper) {
1591 Builder.CreateUnreachable();
1592 Builder.ClearInsertionPoint();
1593 }
1594
1595 // Emit the result value, even if unused, to evaluate the side effects.
1596 const Expr *RV = S.getRetValue();
1597
1598 // Record the result expression of the return statement. The recorded
1599 // expression is used to determine whether a block capture's lifetime should
1600 // end at the end of the full expression as opposed to the end of the scope
1601 // enclosing the block expression.
1602 //
1603 // This permits a small, easily-implemented exception to our over-conservative
1604 // rules about not jumping to statements following block literals with
1605 // non-trivial cleanups.
1606 SaveRetExprRAII SaveRetExpr(RV, *this);
1607
1608 RunCleanupsScope cleanupScope(*this);
1609 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1610 RV = EWC->getSubExpr();
1611
1612 // If we're in a swiftasynccall function, and the return expression is a
1613 // call to a swiftasynccall function, mark the call as the musttail call.
1614 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1615 if (RV && CurFnInfo &&
1616 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1617 if (auto CE = dyn_cast<CallExpr>(RV)) {
1618 if (isSwiftAsyncCallee(CE)) {
1619 SaveMustTail.emplace(MustTailCall, CE);
1620 }
1621 }
1622 }
1623
1624 // FIXME: Clean this up by using an LValue for ReturnTemp,
1625 // EmitStoreThroughLValue, and EmitAnyExpr.
1626 // Check if the NRVO candidate was not globalized in OpenMP mode.
1627 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1629 (!getLangOpts().OpenMP ||
1630 !CGM.getOpenMPRuntime()
1631 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1632 .isValid())) {
1633 // Apply the named return value optimization for this return statement,
1634 // which means doing nothing: the appropriate result has already been
1635 // constructed into the NRVO variable.
1636
1637 // If there is an NRVO flag for this variable, set it to 1 into indicate
1638 // that the cleanup code should not destroy the variable.
1639 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1640 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1641 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1642 // Make sure not to return anything, but evaluate the expression
1643 // for side effects.
1644 if (RV) {
1645 EmitAnyExpr(RV);
1646 }
1647 } else if (!RV) {
1648 // Do nothing (return value is left uninitialized)
1649 } else if (FnRetTy->isReferenceType()) {
1650 // If this function returns a reference, take the address of the expression
1651 // rather than the value.
1653 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1654 addInstToCurrentSourceAtom(I, I->getValueOperand());
1655 } else {
1656 switch (getEvaluationKind(RV->getType())) {
1657 case TEK_Scalar: {
1658 llvm::Value *Ret = EmitScalarExpr(RV);
1659 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1661 /*isInit*/ true);
1662 } else {
1663 auto *I = Builder.CreateStore(Ret, ReturnValue);
1664 addInstToCurrentSourceAtom(I, I->getValueOperand());
1665 }
1666 break;
1667 }
1668 case TEK_Complex:
1670 /*isInit*/ true);
1671 break;
1672 case TEK_Aggregate:
1679 break;
1680 }
1681 }
1682
1683 ++NumReturnExprs;
1684 if (!RV || RV->isEvaluatable(getContext()))
1685 ++NumSimpleReturnExprs;
1686
1687 cleanupScope.ForceCleanup();
1689}
1690
1692 // As long as debug info is modeled with instructions, we have to ensure we
1693 // have a place to insert here and write the stop point here.
1694 if (HaveInsertPoint())
1695 EmitStopPoint(&S);
1696
1697 for (const auto *I : S.decls())
1698 EmitDecl(*I, /*EvaluateConditionDecl=*/true);
1699}
1700
1702 -> const BreakContinue * {
1703 if (!S.hasLabelTarget())
1704 return &BreakContinueStack.back();
1705
1706 const Stmt *LoopOrSwitch = S.getNamedLoopOrSwitch();
1707 assert(LoopOrSwitch && "break/continue target not set?");
1708 for (const BreakContinue &BC : llvm::reverse(BreakContinueStack))
1709 if (BC.LoopOrSwitch == LoopOrSwitch)
1710 return &BC;
1711
1712 llvm_unreachable("break/continue target not found");
1713}
1714
1716 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1717
1718 // If this code is reachable then emit a stop point (if generating
1719 // debug info). We have to do this ourselves because we are on the
1720 // "simple" statement path.
1721 if (HaveInsertPoint())
1722 EmitStopPoint(&S);
1723
1726}
1727
1729 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1730
1731 // If this code is reachable then emit a stop point (if generating
1732 // debug info). We have to do this ourselves because we are on the
1733 // "simple" statement path.
1734 if (HaveInsertPoint())
1735 EmitStopPoint(&S);
1736
1739}
1740
1741/// EmitCaseStmtRange - If case statement range is not too big then
1742/// add multiple cases to switch instruction, one for each value within
1743/// the range. If range is too big then emit "if" condition check.
1745 ArrayRef<const Attr *> Attrs) {
1746 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1747
1748 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1749 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1750
1751 // Emit the code for this case. We do this first to make sure it is
1752 // properly chained from our predecessor before generating the
1753 // switch machinery to enter this block.
1754 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1755 EmitBlockWithFallThrough(CaseDest, &S);
1756 EmitStmt(S.getSubStmt());
1757
1758 // If range is empty, do nothing.
1759 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1760 return;
1761
1763 llvm::APInt Range = RHS - LHS;
1764 // FIXME: parameters such as this should not be hardcoded.
1765 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1766 // Range is small enough to add multiple switch instruction cases.
1767 uint64_t Total = getProfileCount(&S);
1768 unsigned NCases = Range.getZExtValue() + 1;
1769 // We only have one region counter for the entire set of cases here, so we
1770 // need to divide the weights evenly between the generated cases, ensuring
1771 // that the total weight is preserved. E.g., a weight of 5 over three cases
1772 // will be distributed as weights of 2, 2, and 1.
1773 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1774 for (unsigned I = 0; I != NCases; ++I) {
1775 if (SwitchWeights)
1776 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1777 else if (SwitchLikelihood)
1778 SwitchLikelihood->push_back(LH);
1779
1780 if (Rem)
1781 Rem--;
1782 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1783 ++LHS;
1784 }
1785 return;
1786 }
1787
1788 // The range is too big. Emit "if" condition into a new block,
1789 // making sure to save and restore the current insertion point.
1790 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1791
1792 // Push this test onto the chain of range checks (which terminates
1793 // in the default basic block). The switch's default will be changed
1794 // to the top of this chain after switch emission is complete.
1795 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1796 CaseRangeBlock = createBasicBlock("sw.caserange");
1797
1798 CurFn->insert(CurFn->end(), CaseRangeBlock);
1799 Builder.SetInsertPoint(CaseRangeBlock);
1800
1801 // Emit range check.
1802 llvm::Value *Diff =
1803 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1804 llvm::Value *Cond =
1805 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1806
1807 llvm::MDNode *Weights = nullptr;
1808 if (SwitchWeights) {
1809 uint64_t ThisCount = getProfileCount(&S);
1810 uint64_t DefaultCount = (*SwitchWeights)[0];
1811 Weights = createProfileWeights(ThisCount, DefaultCount);
1812
1813 // Since we're chaining the switch default through each large case range, we
1814 // need to update the weight for the default, ie, the first case, to include
1815 // this case.
1816 (*SwitchWeights)[0] += ThisCount;
1817 } else if (SwitchLikelihood)
1818 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1819
1820 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1821
1822 // Restore the appropriate insertion point.
1823 if (RestoreBB)
1824 Builder.SetInsertPoint(RestoreBB);
1825 else
1826 Builder.ClearInsertionPoint();
1827}
1828
1830 ArrayRef<const Attr *> Attrs) {
1831 // If there is no enclosing switch instance that we're aware of, then this
1832 // case statement and its block can be elided. This situation only happens
1833 // when we've constant-folded the switch, are emitting the constant case,
1834 // and part of the constant case includes another case statement. For
1835 // instance: switch (4) { case 4: do { case 5: } while (1); }
1836 if (!SwitchInsn) {
1837 EmitStmt(S.getSubStmt());
1838 return;
1839 }
1840
1841 // Handle case ranges.
1842 if (S.getRHS()) {
1843 EmitCaseStmtRange(S, Attrs);
1844 return;
1845 }
1846
1847 llvm::ConstantInt *CaseVal =
1849
1850 // Emit debuginfo for the case value if it is an enum value.
1851 const ConstantExpr *CE;
1852 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1853 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1854 else
1855 CE = dyn_cast<ConstantExpr>(S.getLHS());
1856 if (CE) {
1857 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1858 if (CGDebugInfo *Dbg = getDebugInfo())
1859 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1860 Dbg->EmitGlobalVariable(DE->getDecl(),
1861 APValue(llvm::APSInt(CaseVal->getValue())));
1862 }
1863
1864 if (SwitchLikelihood)
1865 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1866
1867 // If the body of the case is just a 'break', try to not emit an empty block.
1868 // If we're profiling or we're not optimizing, leave the block in for better
1869 // debug and coverage analysis.
1870 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1871 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1873 JumpDest Block = BreakContinueStack.back().BreakBlock;
1874
1875 // Only do this optimization if there are no cleanups that need emitting.
1877 if (SwitchWeights)
1878 SwitchWeights->push_back(getProfileCount(&S));
1879 SwitchInsn->addCase(CaseVal, Block.getBlock());
1880
1881 // If there was a fallthrough into this case, make sure to redirect it to
1882 // the end of the switch as well.
1883 if (Builder.GetInsertBlock()) {
1884 Builder.CreateBr(Block.getBlock());
1885 Builder.ClearInsertionPoint();
1886 }
1887 return;
1888 }
1889 }
1890
1891 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1892 EmitBlockWithFallThrough(CaseDest, &S);
1893 if (SwitchWeights)
1894 SwitchWeights->push_back(getProfileCount(&S));
1895 SwitchInsn->addCase(CaseVal, CaseDest);
1896
1897 // Recursively emitting the statement is acceptable, but is not wonderful for
1898 // code where we have many case statements nested together, i.e.:
1899 // case 1:
1900 // case 2:
1901 // case 3: etc.
1902 // Handling this recursively will create a new block for each case statement
1903 // that falls through to the next case which is IR intensive. It also causes
1904 // deep recursion which can run into stack depth limitations. Handle
1905 // sequential non-range case statements specially.
1906 //
1907 // TODO When the next case has a likelihood attribute the code returns to the
1908 // recursive algorithm. Maybe improve this case if it becomes common practice
1909 // to use a lot of attributes.
1910 const CaseStmt *CurCase = &S;
1911 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1912
1913 // Otherwise, iteratively add consecutive cases to this switch stmt.
1914 while (NextCase && NextCase->getRHS() == nullptr) {
1915 CurCase = NextCase;
1916 llvm::ConstantInt *CaseVal =
1917 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1918
1919 if (SwitchWeights)
1920 SwitchWeights->push_back(getProfileCount(NextCase));
1921 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1922 CaseDest = createBasicBlock("sw.bb");
1923 EmitBlockWithFallThrough(CaseDest, CurCase);
1924 }
1925 // Since this loop is only executed when the CaseStmt has no attributes
1926 // use a hard-coded value.
1927 if (SwitchLikelihood)
1928 SwitchLikelihood->push_back(Stmt::LH_None);
1929
1930 SwitchInsn->addCase(CaseVal, CaseDest);
1931 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1932 }
1933
1934 // Generate a stop point for debug info if the case statement is
1935 // followed by a default statement. A fallthrough case before a
1936 // default case gets its own branch target.
1937 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1938 EmitStopPoint(CurCase);
1939
1940 // Normal default recursion for non-cases.
1941 EmitStmt(CurCase->getSubStmt());
1942}
1943
1945 ArrayRef<const Attr *> Attrs) {
1946 // If there is no enclosing switch instance that we're aware of, then this
1947 // default statement can be elided. This situation only happens when we've
1948 // constant-folded the switch.
1949 if (!SwitchInsn) {
1950 EmitStmt(S.getSubStmt());
1951 return;
1952 }
1953
1954 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1955 assert(DefaultBlock->empty() &&
1956 "EmitDefaultStmt: Default block already defined?");
1957
1958 if (SwitchLikelihood)
1959 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1960
1961 EmitBlockWithFallThrough(DefaultBlock, &S);
1962
1963 EmitStmt(S.getSubStmt());
1964}
1965
1966namespace {
1967struct EmitDeferredStatement final : EHScopeStack::Cleanup {
1968 const DeferStmt &Stmt;
1969 EmitDeferredStatement(const DeferStmt *Stmt) : Stmt(*Stmt) {}
1970
1971 void Emit(CodeGenFunction &CGF, Flags) override {
1972 // Take care that any cleanups pushed by the body of a '_Defer' statement
1973 // don't clobber the current cleanup slot value.
1974 //
1975 // Assume we have a scope that pushes a cleanup; when that scope is exited,
1976 // we need to run that cleanup; this is accomplished by emitting the cleanup
1977 // into a separate block and then branching to that block at scope exit.
1978 //
1979 // Where this gets complicated is if we exit the scope in multiple different
1980 // ways; e.g. in a 'for' loop, we may exit the scope of its body by falling
1981 // off the end (in which case we need to run the cleanup and then branch to
1982 // the increment), or by 'break'ing out of the loop (in which case we need
1983 // to run the cleanup and then branch to the loop exit block); in both cases
1984 // we first branch to the cleanup block to run the cleanup, but the block we
1985 // need to jump to *after* running the cleanup is different.
1986 //
1987 // This is accomplished using a local integer variable called the 'cleanup
1988 // slot': before branching to the cleanup block, we store a value into that
1989 // slot. Then, in the cleanup block, after running the cleanup, we load the
1990 // value of that variable and 'switch' on it to branch to the appropriate
1991 // continuation block.
1992 //
1993 // The problem that arises once '_Defer' statements are involved is that the
1994 // body of a '_Defer' is an arbitrary statement which itself can create more
1995 // cleanups. This means we may end up overwriting the cleanup slot before we
1996 // ever have a chance to 'switch' on it, which means that once we *do* get
1997 // to the 'switch', we end up in whatever block the cleanup code happened to
1998 // pick as the default 'switch' exit label!
1999 //
2000 // That is, what is normally supposed to happen is something like:
2001 //
2002 // 1. Store 'X' to cleanup slot.
2003 // 2. Branch to cleanup block.
2004 // 3. Execute cleanup.
2005 // 4. Read value from cleanup slot.
2006 // 5. Branch to the block associated with 'X'.
2007 //
2008 // But if we encounter a _Defer' statement that contains a cleanup, then
2009 // what might instead happen is:
2010 //
2011 // 1. Store 'X' to cleanup slot.
2012 // 2. Branch to cleanup block.
2013 // 3. Execute cleanup; this ends up pushing another cleanup, so:
2014 // 3a. Store 'Y' to cleanup slot.
2015 // 3b. Run steps 2–5 recursively.
2016 // 4. Read value from cleanup slot, which is now 'Y' instead of 'X'.
2017 // 5. Branch to the block associated with 'Y'... which doesn't even
2018 // exist because the value 'Y' is only meaningful for the inner
2019 // cleanup. The result is we just branch 'somewhere random'.
2020 //
2021 // The rest of the cleanup code simply isn't prepared to handle this case
2022 // because most other cleanups can't push more cleanups, and thus, emitting
2023 // other cleanups generally cannot clobber the cleanup slot.
2024 //
2025 // To prevent this from happening, save the current cleanup slot value and
2026 // restore it after emitting the '_Defer' statement.
2027 llvm::Value *SavedCleanupDest = nullptr;
2028 if (CGF.NormalCleanupDest.isValid())
2029 SavedCleanupDest =
2030 CGF.Builder.CreateLoad(CGF.NormalCleanupDest, "cleanup.dest.saved");
2031
2032 CGF.EmitStmt(Stmt.getBody());
2033
2034 if (SavedCleanupDest && CGF.HaveInsertPoint())
2035 CGF.Builder.CreateStore(SavedCleanupDest, CGF.NormalCleanupDest);
2036
2037 // Cleanups must end with an insert point.
2038 CGF.EnsureInsertPoint();
2039 }
2040};
2041} // namespace
2042
2044 EHStack.pushCleanup<EmitDeferredStatement>(NormalAndEHCleanup, &S);
2045}
2046
2047/// CollectStatementsForCase - Given the body of a 'switch' statement and a
2048/// constant value that is being switched on, see if we can dead code eliminate
2049/// the body of the switch to a simple series of statements to emit. Basically,
2050/// on a switch (5) we want to find these statements:
2051/// case 5:
2052/// printf(...); <--
2053/// ++i; <--
2054/// break;
2055///
2056/// and add them to the ResultStmts vector. If it is unsafe to do this
2057/// transformation (for example, one of the elided statements contains a label
2058/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
2059/// should include statements after it (e.g. the printf() line is a substmt of
2060/// the case) then return CSFC_FallThrough. If we handled it and found a break
2061/// statement, then return CSFC_Success.
2062///
2063/// If Case is non-null, then we are looking for the specified case, checking
2064/// that nothing we jump over contains labels. If Case is null, then we found
2065/// the case and are looking for the break.
2066///
2067/// If the recursive walk actually finds our Case, then we set FoundCase to
2068/// true.
2069///
2072 const SwitchCase *Case,
2073 bool &FoundCase,
2074 SmallVectorImpl<const Stmt*> &ResultStmts) {
2075 // If this is a null statement, just succeed.
2076 if (!S)
2077 return Case ? CSFC_Success : CSFC_FallThrough;
2078
2079 // If this is the switchcase (case 4: or default) that we're looking for, then
2080 // we're in business. Just add the substatement.
2081 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
2082 if (S == Case) {
2083 FoundCase = true;
2084 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
2085 ResultStmts);
2086 }
2087
2088 // Otherwise, this is some other case or default statement, just ignore it.
2089 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
2090 ResultStmts);
2091 }
2092
2093 // If we are in the live part of the code and we found our break statement,
2094 // return a success!
2095 if (!Case && isa<BreakStmt>(S))
2096 return CSFC_Success;
2097
2098 // If this is a switch statement, then it might contain the SwitchCase, the
2099 // break, or neither.
2100 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
2101 // Handle this as two cases: we might be looking for the SwitchCase (if so
2102 // the skipped statements must be skippable) or we might already have it.
2103 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2104 bool StartedInLiveCode = FoundCase;
2105 unsigned StartSize = ResultStmts.size();
2106
2107 // If we've not found the case yet, scan through looking for it.
2108 if (Case) {
2109 // Keep track of whether we see a skipped declaration. The code could be
2110 // using the declaration even if it is skipped, so we can't optimize out
2111 // the decl if the kept statements might refer to it.
2112 bool HadSkippedDecl = false;
2113
2114 // If we're looking for the case, just see if we can skip each of the
2115 // substatements.
2116 for (; Case && I != E; ++I) {
2117 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
2118
2119 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
2120 case CSFC_Failure: return CSFC_Failure;
2121 case CSFC_Success:
2122 // A successful result means that either 1) that the statement doesn't
2123 // have the case and is skippable, or 2) does contain the case value
2124 // and also contains the break to exit the switch. In the later case,
2125 // we just verify the rest of the statements are elidable.
2126 if (FoundCase) {
2127 // If we found the case and skipped declarations, we can't do the
2128 // optimization.
2129 if (HadSkippedDecl)
2130 return CSFC_Failure;
2131
2132 for (++I; I != E; ++I)
2133 if (CodeGenFunction::ContainsLabel(*I, true))
2134 return CSFC_Failure;
2135 return CSFC_Success;
2136 }
2137 break;
2138 case CSFC_FallThrough:
2139 // If we have a fallthrough condition, then we must have found the
2140 // case started to include statements. Consider the rest of the
2141 // statements in the compound statement as candidates for inclusion.
2142 assert(FoundCase && "Didn't find case but returned fallthrough?");
2143 // We recursively found Case, so we're not looking for it anymore.
2144 Case = nullptr;
2145
2146 // If we found the case and skipped declarations, we can't do the
2147 // optimization.
2148 if (HadSkippedDecl)
2149 return CSFC_Failure;
2150 break;
2151 }
2152 }
2153
2154 if (!FoundCase)
2155 return CSFC_Success;
2156
2157 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2158 }
2159
2160 // If we have statements in our range, then we know that the statements are
2161 // live and need to be added to the set of statements we're tracking.
2162 bool AnyDecls = false;
2163 for (; I != E; ++I) {
2165
2166 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2167 case CSFC_Failure: return CSFC_Failure;
2168 case CSFC_FallThrough:
2169 // A fallthrough result means that the statement was simple and just
2170 // included in ResultStmt, keep adding them afterwards.
2171 break;
2172 case CSFC_Success:
2173 // A successful result means that we found the break statement and
2174 // stopped statement inclusion. We just ensure that any leftover stmts
2175 // are skippable and return success ourselves.
2176 for (++I; I != E; ++I)
2177 if (CodeGenFunction::ContainsLabel(*I, true))
2178 return CSFC_Failure;
2179 return CSFC_Success;
2180 }
2181 }
2182
2183 // If we're about to fall out of a scope without hitting a 'break;', we
2184 // can't perform the optimization if there were any decls in that scope
2185 // (we'd lose their end-of-lifetime).
2186 if (AnyDecls) {
2187 // If the entire compound statement was live, there's one more thing we
2188 // can try before giving up: emit the whole thing as a single statement.
2189 // We can do that unless the statement contains a 'break;'.
2190 // FIXME: Such a break must be at the end of a construct within this one.
2191 // We could emit this by just ignoring the BreakStmts entirely.
2192 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2193 ResultStmts.resize(StartSize);
2194 ResultStmts.push_back(S);
2195 } else {
2196 return CSFC_Failure;
2197 }
2198 }
2199
2200 return CSFC_FallThrough;
2201 }
2202
2203 // Okay, this is some other statement that we don't handle explicitly, like a
2204 // for statement or increment etc. If we are skipping over this statement,
2205 // just verify it doesn't have labels, which would make it invalid to elide.
2206 if (Case) {
2207 if (CodeGenFunction::ContainsLabel(S, true))
2208 return CSFC_Failure;
2209 return CSFC_Success;
2210 }
2211
2212 // Otherwise, we want to include this statement. Everything is cool with that
2213 // so long as it doesn't contain a break out of the switch we're in.
2215
2216 // Otherwise, everything is great. Include the statement and tell the caller
2217 // that we fall through and include the next statement as well.
2218 ResultStmts.push_back(S);
2219 return CSFC_FallThrough;
2220}
2221
2222/// FindCaseStatementsForValue - Find the case statement being jumped to and
2223/// then invoke CollectStatementsForCase to find the list of statements to emit
2224/// for a switch on constant. See the comment above CollectStatementsForCase
2225/// for more details.
2227 const llvm::APSInt &ConstantCondValue,
2228 SmallVectorImpl<const Stmt*> &ResultStmts,
2229 ASTContext &C,
2230 const SwitchCase *&ResultCase) {
2231 // First step, find the switch case that is being branched to. We can do this
2232 // efficiently by scanning the SwitchCase list.
2233 const SwitchCase *Case = S.getSwitchCaseList();
2234 const DefaultStmt *DefaultCase = nullptr;
2235
2236 for (; Case; Case = Case->getNextSwitchCase()) {
2237 // It's either a default or case. Just remember the default statement in
2238 // case we're not jumping to any numbered cases.
2239 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2240 DefaultCase = DS;
2241 continue;
2242 }
2243
2244 // Check to see if this case is the one we're looking for.
2245 const CaseStmt *CS = cast<CaseStmt>(Case);
2246 // Don't handle case ranges yet.
2247 if (CS->getRHS()) return false;
2248
2249 // If we found our case, remember it as 'case'.
2250 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2251 break;
2252 }
2253
2254 // If we didn't find a matching case, we use a default if it exists, or we
2255 // elide the whole switch body!
2256 if (!Case) {
2257 // It is safe to elide the body of the switch if it doesn't contain labels
2258 // etc. If it is safe, return successfully with an empty ResultStmts list.
2259 if (!DefaultCase)
2261 Case = DefaultCase;
2262 }
2263
2264 // Ok, we know which case is being jumped to, try to collect all the
2265 // statements that follow it. This can fail for a variety of reasons. Also,
2266 // check to see that the recursive walk actually found our case statement.
2267 // Insane cases like this can fail to find it in the recursive walk since we
2268 // don't handle every stmt kind:
2269 // switch (4) {
2270 // while (1) {
2271 // case 4: ...
2272 bool FoundCase = false;
2273 ResultCase = Case;
2274 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2275 ResultStmts) != CSFC_Failure &&
2276 FoundCase;
2277}
2278
2279static std::optional<SmallVector<uint64_t, 16>>
2281 // Are there enough branches to weight them?
2282 if (Likelihoods.size() <= 1)
2283 return std::nullopt;
2284
2285 uint64_t NumUnlikely = 0;
2286 uint64_t NumNone = 0;
2287 uint64_t NumLikely = 0;
2288 for (const auto LH : Likelihoods) {
2289 switch (LH) {
2290 case Stmt::LH_Unlikely:
2291 ++NumUnlikely;
2292 break;
2293 case Stmt::LH_None:
2294 ++NumNone;
2295 break;
2296 case Stmt::LH_Likely:
2297 ++NumLikely;
2298 break;
2299 }
2300 }
2301
2302 // Is there a likelihood attribute used?
2303 if (NumUnlikely == 0 && NumLikely == 0)
2304 return std::nullopt;
2305
2306 // When multiple cases share the same code they can be combined during
2307 // optimization. In that case the weights of the branch will be the sum of
2308 // the individual weights. Make sure the combined sum of all neutral cases
2309 // doesn't exceed the value of a single likely attribute.
2310 // The additions both avoid divisions by 0 and make sure the weights of None
2311 // don't exceed the weight of Likely.
2312 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2313 const uint64_t None = Likely / (NumNone + 1);
2314 const uint64_t Unlikely = 0;
2315
2317 Result.reserve(Likelihoods.size());
2318 for (const auto LH : Likelihoods) {
2319 switch (LH) {
2320 case Stmt::LH_Unlikely:
2321 Result.push_back(Unlikely);
2322 break;
2323 case Stmt::LH_None:
2324 Result.push_back(None);
2325 break;
2326 case Stmt::LH_Likely:
2327 Result.push_back(Likely);
2328 break;
2329 }
2330 }
2331
2332 return Result;
2333}
2334
2336 // Handle nested switch statements.
2337 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2338 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2339 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2340 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2341
2342 // See if we can constant fold the condition of the switch and therefore only
2343 // emit the live case statement (if any) of the switch.
2344 llvm::APSInt ConstantCondValue;
2345 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2347 const SwitchCase *Case = nullptr;
2348 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2349 getContext(), Case)) {
2350 if (Case)
2352 RunCleanupsScope ExecutedScope(*this);
2353
2354 if (S.getInit())
2355 EmitStmt(S.getInit());
2356
2357 // Emit the condition variable if needed inside the entire cleanup scope
2358 // used by this special case for constant folded switches.
2359 if (S.getConditionVariable())
2360 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2361
2362 // At this point, we are no longer "within" a switch instance, so
2363 // we can temporarily enforce this to ensure that any embedded case
2364 // statements are not emitted.
2365 SwitchInsn = nullptr;
2366
2367 // Okay, we can dead code eliminate everything except this case. Emit the
2368 // specified series of statements and we're good.
2369 for (const Stmt *CaseStmt : CaseStmts)
2372 PGO->markStmtMaybeUsed(S.getBody());
2373
2374 // Now we want to restore the saved switch instance so that nested
2375 // switches continue to function properly
2376 SwitchInsn = SavedSwitchInsn;
2377
2378 return;
2379 }
2380 }
2381
2382 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2383
2384 RunCleanupsScope ConditionScope(*this);
2385
2386 if (S.getInit())
2387 EmitStmt(S.getInit());
2388
2389 if (S.getConditionVariable())
2391 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2393
2394 // Create basic block to hold stuff that comes after switch
2395 // statement. We also need to create a default block now so that
2396 // explicit case ranges tests can have a place to jump to on
2397 // failure.
2398 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2399 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2400 addInstToNewSourceAtom(SwitchInsn, CondV);
2401
2402 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2403 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2404 llvm::ConstantInt *BranchHintConstant =
2406 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2407 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2408 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2409 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"),
2410 MDHelper.createConstant(BranchHintConstant)};
2411 SwitchInsn->setMetadata("hlsl.controlflow.hint",
2412 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2413 }
2414
2415 if (PGO->haveRegionCounts()) {
2416 // Walk the SwitchCase list to find how many there are.
2417 uint64_t DefaultCount = 0;
2418 unsigned NumCases = 0;
2419 for (const SwitchCase *Case = S.getSwitchCaseList();
2420 Case;
2421 Case = Case->getNextSwitchCase()) {
2422 if (isa<DefaultStmt>(Case))
2423 DefaultCount = getProfileCount(Case);
2424 NumCases += 1;
2425 }
2426 SwitchWeights = new SmallVector<uint64_t, 16>();
2427 SwitchWeights->reserve(NumCases);
2428 // The default needs to be first. We store the edge count, so we already
2429 // know the right weight.
2430 SwitchWeights->push_back(DefaultCount);
2431 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2432 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2433 // Initialize the default case.
2434 SwitchLikelihood->push_back(Stmt::LH_None);
2435 }
2436
2437 CaseRangeBlock = DefaultBlock;
2438
2439 // Clear the insertion point to indicate we are in unreachable code.
2440 Builder.ClearInsertionPoint();
2441
2442 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2443 // then reuse last ContinueBlock.
2444 JumpDest OuterContinue;
2445 if (!BreakContinueStack.empty())
2446 OuterContinue = BreakContinueStack.back().ContinueBlock;
2447
2448 BreakContinueStack.push_back(BreakContinue(S, SwitchExit, OuterContinue));
2449
2450 // Emit switch body.
2451 EmitStmt(S.getBody());
2452
2453 BreakContinueStack.pop_back();
2454
2455 // Update the default block in case explicit case range tests have
2456 // been chained on top.
2457 SwitchInsn->setDefaultDest(CaseRangeBlock);
2458
2459 // If a default was never emitted:
2460 if (!DefaultBlock->getParent()) {
2461 // If we have cleanups, emit the default block so that there's a
2462 // place to jump through the cleanups from.
2463 if (ConditionScope.requiresCleanups()) {
2464 EmitBlock(DefaultBlock);
2465
2466 // Otherwise, just forward the default block to the switch end.
2467 } else {
2468 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2469 delete DefaultBlock;
2470 }
2471 }
2472
2473 ConditionScope.ForceCleanup();
2474
2475 // Close the last case (or DefaultBlock).
2476 EmitBranch(SwitchExit.getBlock());
2477
2478 // Insert a False Counter if SwitchStmt doesn't have DefaultStmt.
2479 if (hasSkipCounter(S.getCond())) {
2480 auto *ImplicitDefaultBlock = createBasicBlock("sw.false");
2481 EmitBlock(ImplicitDefaultBlock);
2483 Builder.CreateBr(SwitchInsn->getDefaultDest());
2484 SwitchInsn->setDefaultDest(ImplicitDefaultBlock);
2485 }
2486
2487 // Emit continuation.
2488 EmitBlock(SwitchExit.getBlock(), true);
2490
2491 // If the switch has a condition wrapped by __builtin_unpredictable,
2492 // create metadata that specifies that the switch is unpredictable.
2493 // Don't bother if not optimizing because that metadata would not be used.
2494 auto *Call = dyn_cast<CallExpr>(S.getCond());
2495 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2496 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2497 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2498 llvm::MDBuilder MDHelper(getLLVMContext());
2499 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2500 MDHelper.createUnpredictable());
2501 }
2502 }
2503
2504 if (SwitchWeights) {
2505 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2506 "switch weights do not match switch cases");
2507 // If there's only one jump destination there's no sense weighting it.
2508 if (SwitchWeights->size() > 1)
2509 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2510 createProfileWeights(*SwitchWeights));
2511 delete SwitchWeights;
2512 } else if (SwitchLikelihood) {
2513 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2514 "switch likelihoods do not match switch cases");
2515 std::optional<SmallVector<uint64_t, 16>> LHW =
2516 getLikelihoodWeights(*SwitchLikelihood);
2517 if (LHW) {
2518 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2519 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2520 createProfileWeights(*LHW));
2521 }
2522 delete SwitchLikelihood;
2523 }
2524 SwitchInsn = SavedSwitchInsn;
2525 SwitchWeights = SavedSwitchWeights;
2526 SwitchLikelihood = SavedSwitchLikelihood;
2527 CaseRangeBlock = SavedCRBlock;
2528}
2529
2530std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2531 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2532 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2533 if (Info.allowsRegister() || !Info.allowsMemory()) {
2535 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2536
2537 llvm::Type *Ty = ConvertType(InputType);
2538 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2539 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2540 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2541 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2542
2543 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2544 nullptr};
2545 }
2546 }
2547
2548 Address Addr = InputValue.getAddress();
2549 ConstraintStr += '*';
2550 return {InputValue.getPointer(*this), Addr.getElementType()};
2551}
2552std::pair<llvm::Value *, llvm::Type *>
2553CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2554 const Expr *InputExpr,
2555 std::string &ConstraintStr) {
2556 // If this can't be a register or memory, i.e., has to be a constant
2557 // (immediate or symbolic), try to emit it as such.
2558 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2559 if (Info.requiresImmediateConstant()) {
2560 Expr::EvalResult EVResult;
2561 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2562
2563 llvm::APSInt IntResult;
2564 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2565 getContext()))
2566 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2567 }
2568
2569 Expr::EvalResult Result;
2570 if (InputExpr->EvaluateAsInt(Result, getContext()))
2571 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2572 nullptr};
2573 }
2574
2575 if (Info.allowsRegister() || !Info.allowsMemory())
2577 return {EmitScalarExpr(InputExpr), nullptr};
2578 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2579 return {EmitScalarExpr(InputExpr), nullptr};
2580 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2581 LValue Dest = EmitLValue(InputExpr);
2582 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2583 InputExpr->getExprLoc());
2584}
2585
2586/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2587/// asm call instruction. The !srcloc MDNode contains a list of constant
2588/// integers which are the source locations of the start of each line in the
2589/// asm.
2590static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2591 CodeGenFunction &CGF) {
2593 // Add the location of the first line to the MDNode.
2594 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2595 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2596 StringRef StrVal = Str->getString();
2597 if (!StrVal.empty()) {
2599 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2600 unsigned StartToken = 0;
2601 unsigned ByteOffset = 0;
2602
2603 // Add the location of the start of each subsequent line of the asm to the
2604 // MDNode.
2605 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2606 if (StrVal[i] != '\n') continue;
2607 SourceLocation LineLoc = Str->getLocationOfByte(
2608 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2609 Locs.push_back(llvm::ConstantAsMetadata::get(
2610 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2611 }
2612 }
2613
2614 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2615}
2616
2617static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2618 bool HasUnwindClobber, bool ReadOnly,
2619 bool ReadNone, bool NoMerge, bool NoConvergent,
2620 const AsmStmt &S,
2621 const std::vector<llvm::Type *> &ResultRegTypes,
2622 const std::vector<llvm::Type *> &ArgElemTypes,
2623 CodeGenFunction &CGF,
2624 std::vector<llvm::Value *> &RegResults) {
2625 if (!HasUnwindClobber)
2626 Result.addFnAttr(llvm::Attribute::NoUnwind);
2627
2628 if (NoMerge)
2629 Result.addFnAttr(llvm::Attribute::NoMerge);
2630 // Attach readnone and readonly attributes.
2631 if (!HasSideEffect) {
2632 if (ReadNone)
2633 Result.setDoesNotAccessMemory();
2634 else if (ReadOnly)
2635 Result.setOnlyReadsMemory();
2636 }
2637
2638 // Add elementtype attribute for indirect constraints.
2639 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2640 if (Pair.value()) {
2641 auto Attr = llvm::Attribute::get(
2642 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2643 Result.addParamAttr(Pair.index(), Attr);
2644 }
2645 }
2646
2647 // Slap the source location of the inline asm into a !srcloc metadata on the
2648 // call.
2649 const StringLiteral *SL;
2650 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S);
2651 gccAsmStmt &&
2652 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) {
2653 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF));
2654 } else {
2655 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2656 // strings.
2657 llvm::Constant *Loc =
2658 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2659 Result.setMetadata("srcloc",
2660 llvm::MDNode::get(CGF.getLLVMContext(),
2661 llvm::ConstantAsMetadata::get(Loc)));
2662 }
2663
2664 // Make inline-asm calls Key for the debug info feature Key Instructions.
2665 CGF.addInstToNewSourceAtom(&Result, nullptr);
2666
2667 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2668 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2669 // convergent (meaning, they may call an intrinsically convergent op, such
2670 // as bar.sync, and so can't have certain optimizations applied around
2671 // them) unless it's explicitly marked 'noconvergent'.
2672 Result.addFnAttr(llvm::Attribute::Convergent);
2673 // Extract all of the register value results from the asm.
2674 if (ResultRegTypes.size() == 1) {
2675 RegResults.push_back(&Result);
2676 } else {
2677 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2678 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2679 RegResults.push_back(Tmp);
2680 }
2681 }
2682}
2683
2684static void
2686 const llvm::ArrayRef<llvm::Value *> RegResults,
2687 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2688 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2689 const llvm::ArrayRef<LValue> ResultRegDests,
2690 const llvm::ArrayRef<QualType> ResultRegQualTys,
2691 const llvm::BitVector &ResultTypeRequiresCast,
2692 const std::vector<std::optional<std::pair<unsigned, unsigned>>>
2693 &ResultBounds) {
2694 CGBuilderTy &Builder = CGF.Builder;
2695 CodeGenModule &CGM = CGF.CGM;
2696 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2697
2698 assert(RegResults.size() == ResultRegTypes.size());
2699 assert(RegResults.size() == ResultTruncRegTypes.size());
2700 assert(RegResults.size() == ResultRegDests.size());
2701 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2702 // in which case its size may grow.
2703 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2704 assert(ResultBounds.size() <= ResultRegDests.size());
2705
2706 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2707 llvm::Value *Tmp = RegResults[i];
2708 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2709
2710 if ((i < ResultBounds.size()) && ResultBounds[i].has_value()) {
2711 const auto [LowerBound, UpperBound] = ResultBounds[i].value();
2712 // FIXME: Support for nonzero lower bounds not yet implemented.
2713 assert(LowerBound == 0 && "Output operand lower bound is not zero.");
2714 llvm::Constant *UpperBoundConst =
2715 llvm::ConstantInt::get(Tmp->getType(), UpperBound);
2716 llvm::Value *IsBooleanValue =
2717 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, UpperBoundConst);
2718 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2719 Builder.CreateCall(FnAssume, IsBooleanValue);
2720 }
2721
2722 // If the result type of the LLVM IR asm doesn't match the result type of
2723 // the expression, do the conversion.
2724 if (ResultRegTypes[i] != TruncTy) {
2725
2726 // Truncate the integer result to the right size, note that TruncTy can be
2727 // a pointer.
2728 if (TruncTy->isFloatingPointTy())
2729 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2730 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2731 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2732 Tmp = Builder.CreateTrunc(
2733 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2734 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2735 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2736 uint64_t TmpSize =
2737 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2738 Tmp = Builder.CreatePtrToInt(
2739 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2740 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2741 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2742 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2743 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2744 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2745 }
2746 }
2747
2748 ApplyAtomGroup Grp(CGF.getDebugInfo());
2749 LValue Dest = ResultRegDests[i];
2750 // ResultTypeRequiresCast elements correspond to the first
2751 // ResultTypeRequiresCast.size() elements of RegResults.
2752 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2753 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2754 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2755 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2756 llvm::StoreInst *S = Builder.CreateStore(Tmp, A);
2757 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand());
2758 continue;
2759 }
2760
2761 QualType Ty =
2762 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2763 if (Ty.isNull()) {
2764 const Expr *OutExpr = S.getOutputExpr(i);
2765 CGM.getDiags().Report(OutExpr->getExprLoc(),
2766 diag::err_store_value_to_reg);
2767 return;
2768 }
2769 Dest = CGF.MakeAddrLValue(A, Ty);
2770 }
2771 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2772 }
2773}
2774
2776 const AsmStmt &S) {
2777 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2778
2779 std::string Asm;
2780 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2781 Asm = GCCAsm->getAsmString();
2782
2783 auto &Ctx = CGF->CGM.getLLVMContext();
2784
2785 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2786 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2787 {StrTy->getType()}, false);
2788 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2789
2790 CGF->Builder.CreateCall(UBF, {StrTy});
2791}
2792
2794 // Pop all cleanup blocks at the end of the asm statement.
2795 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2796
2797 // Assemble the final asm string.
2798 std::string AsmString = S.generateAsmString(getContext());
2799
2800 // Get all the output and input constraints together.
2801 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2802 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2803
2804 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2805 bool IsValidTargetAsm = true;
2806 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2807 StringRef Name;
2808 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2809 Name = GAS->getOutputName(i);
2811 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2812 if (IsHipStdPar && !IsValid)
2813 IsValidTargetAsm = false;
2814 else
2815 assert(IsValid && "Failed to parse output constraint");
2816 OutputConstraintInfos.push_back(Info);
2817 }
2818
2819 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2820 StringRef Name;
2821 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2822 Name = GAS->getInputName(i);
2824 bool IsValid =
2825 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2826 if (IsHipStdPar && !IsValid)
2827 IsValidTargetAsm = false;
2828 else
2829 assert(IsValid && "Failed to parse input constraint");
2830 InputConstraintInfos.push_back(Info);
2831 }
2832
2833 if (!IsValidTargetAsm)
2834 return EmitHipStdParUnsupportedAsm(this, S);
2835
2836 std::string Constraints;
2837
2838 std::vector<LValue> ResultRegDests;
2839 std::vector<QualType> ResultRegQualTys;
2840 std::vector<llvm::Type *> ResultRegTypes;
2841 std::vector<llvm::Type *> ResultTruncRegTypes;
2842 std::vector<llvm::Type *> ArgTypes;
2843 std::vector<llvm::Type *> ArgElemTypes;
2844 std::vector<llvm::Value*> Args;
2845 llvm::BitVector ResultTypeRequiresCast;
2846 std::vector<std::optional<std::pair<unsigned, unsigned>>> ResultBounds;
2847
2848 // Keep track of inout constraints.
2849 std::string InOutConstraints;
2850 std::vector<llvm::Value*> InOutArgs;
2851 std::vector<llvm::Type*> InOutArgTypes;
2852 std::vector<llvm::Type*> InOutArgElemTypes;
2853
2854 // Keep track of out constraints for tied input operand.
2855 std::vector<std::string> OutputConstraints;
2856
2857 // Keep track of defined physregs.
2858 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2859
2860 // An inline asm can be marked readonly if it meets the following conditions:
2861 // - it doesn't have any sideeffects
2862 // - it doesn't clobber memory
2863 // - it doesn't return a value by-reference
2864 // It can be marked readnone if it doesn't have any input memory constraints
2865 // in addition to meeting the conditions listed above.
2866 bool ReadOnly = true, ReadNone = true;
2867
2868 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2869 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2870
2871 // Simplify the output constraint.
2872 std::string OutputConstraint(S.getOutputConstraint(i));
2873 OutputConstraint = getTarget().simplifyConstraint(
2874 StringRef(OutputConstraint).substr(1), &OutputConstraintInfos);
2875
2876 const Expr *OutExpr = S.getOutputExpr(i);
2877 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2878
2879 std::string GCCReg;
2880 OutputConstraint = S.addVariableConstraints(
2881 OutputConstraint, *OutExpr, getTarget(), Info.earlyClobber(),
2882 [&](const Stmt *UnspStmt, StringRef Msg) {
2883 CGM.ErrorUnsupported(UnspStmt, Msg);
2884 },
2885 &GCCReg);
2886 // Give an error on multiple outputs to same physreg.
2887 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2888 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2889
2890 OutputConstraints.push_back(OutputConstraint);
2891 LValue Dest = EmitLValue(OutExpr);
2892 if (!Constraints.empty())
2893 Constraints += ',';
2894
2895 // If this is a register output, then make the inline asm return it
2896 // by-value. If this is a memory result, return the value by-reference.
2897 QualType QTy = OutExpr->getType();
2898 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2900 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2901
2902 Constraints += "=" + OutputConstraint;
2903 ResultRegQualTys.push_back(QTy);
2904 ResultRegDests.push_back(Dest);
2905
2906 ResultBounds.emplace_back(Info.getOutputOperandBounds());
2907
2908 llvm::Type *Ty = ConvertTypeForMem(QTy);
2909 const bool RequiresCast = Info.allowsRegister() &&
2911 Ty->isAggregateType());
2912
2913 ResultTruncRegTypes.push_back(Ty);
2914 ResultTypeRequiresCast.push_back(RequiresCast);
2915
2916 if (RequiresCast) {
2917 unsigned Size = getContext().getTypeSize(QTy);
2918 if (Size)
2919 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2920 else
2921 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2922 }
2923 ResultRegTypes.push_back(Ty);
2924 // If this output is tied to an input, and if the input is larger, then
2925 // we need to set the actual result type of the inline asm node to be the
2926 // same as the input type.
2927 if (Info.hasMatchingInput()) {
2928 unsigned InputNo;
2929 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2930 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2931 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2932 break;
2933 }
2934 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2935
2936 QualType InputTy = S.getInputExpr(InputNo)->getType();
2937 QualType OutputType = OutExpr->getType();
2938
2939 uint64_t InputSize = getContext().getTypeSize(InputTy);
2940 if (getContext().getTypeSize(OutputType) < InputSize) {
2941 // Form the asm to return the value as a larger integer or fp type.
2942 ResultRegTypes.back() = ConvertType(InputTy);
2943 }
2944 }
2945 if (llvm::Type* AdjTy =
2946 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2947 ResultRegTypes.back()))
2948 ResultRegTypes.back() = AdjTy;
2949 else {
2950 CGM.getDiags().Report(S.getAsmLoc(),
2951 diag::err_asm_invalid_type_in_input)
2952 << OutExpr->getType() << OutputConstraint;
2953 }
2954
2955 // Update largest vector width for any vector types.
2956 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2957 LargestVectorWidth =
2958 std::max((uint64_t)LargestVectorWidth,
2959 VT->getPrimitiveSizeInBits().getKnownMinValue());
2960 } else {
2961 Address DestAddr = Dest.getAddress();
2962 // Matrix types in memory are represented by arrays, but accessed through
2963 // vector pointers, with the alignment specified on the access operation.
2964 // For inline assembly, update pointer arguments to use vector pointers.
2965 // Otherwise there will be a mis-match if the matrix is also an
2966 // input-argument which is represented as vector.
2967 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2968 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2969
2970 ArgTypes.push_back(DestAddr.getType());
2971 ArgElemTypes.push_back(DestAddr.getElementType());
2972 Args.push_back(DestAddr.emitRawPointer(*this));
2973 Constraints += "=*";
2974 Constraints += OutputConstraint;
2975 ReadOnly = ReadNone = false;
2976 }
2977
2978 if (Info.isReadWrite()) {
2979 InOutConstraints += ',';
2980
2981 const Expr *InputExpr = S.getOutputExpr(i);
2982 llvm::Value *Arg;
2983 llvm::Type *ArgElemType;
2984 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2985 Info, Dest, InputExpr->getType(), InOutConstraints,
2986 InputExpr->getExprLoc());
2987
2988 if (llvm::Type* AdjTy =
2989 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2990 Arg->getType()))
2991 Arg = Builder.CreateBitCast(Arg, AdjTy);
2992
2993 // Update largest vector width for any vector types.
2994 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2995 LargestVectorWidth =
2996 std::max((uint64_t)LargestVectorWidth,
2997 VT->getPrimitiveSizeInBits().getKnownMinValue());
2998 // Only tie earlyclobber physregs.
2999 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
3000 InOutConstraints += llvm::utostr(i);
3001 else
3002 InOutConstraints += OutputConstraint;
3003
3004 InOutArgTypes.push_back(Arg->getType());
3005 InOutArgElemTypes.push_back(ArgElemType);
3006 InOutArgs.push_back(Arg);
3007 }
3008 }
3009
3010 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
3011 // to the return value slot. Only do this when returning in registers.
3012 if (isa<MSAsmStmt>(&S)) {
3013 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
3014 if (RetAI.isDirect() || RetAI.isExtend()) {
3015 // Make a fake lvalue for the return value slot.
3017 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
3018 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3019 ResultRegDests, AsmString, S.getNumOutputs());
3020 SawAsmBlock = true;
3021 }
3022 }
3023
3024 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3025 const Expr *InputExpr = S.getInputExpr(i);
3026
3027 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3028
3029 if (Info.allowsMemory())
3030 ReadNone = false;
3031
3032 if (!Constraints.empty())
3033 Constraints += ',';
3034
3035 // Simplify the input constraint.
3036 std::string InputConstraint(S.getInputConstraint(i));
3037 InputConstraint =
3038 getTarget().simplifyConstraint(InputConstraint, &OutputConstraintInfos);
3039
3040 InputConstraint = S.addVariableConstraints(
3041 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
3042 getTarget(), false /* No EarlyClobber */,
3043 [&](const Stmt *UnspStmt, std::string_view Msg) {
3044 CGM.ErrorUnsupported(UnspStmt, Msg);
3045 });
3046
3047 std::string ReplaceConstraint (InputConstraint);
3048 llvm::Value *Arg;
3049 llvm::Type *ArgElemType;
3050 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
3051
3052 // If this input argument is tied to a larger output result, extend the
3053 // input to be the same size as the output. The LLVM backend wants to see
3054 // the input and output of a matching constraint be the same size. Note
3055 // that GCC does not define what the top bits are here. We use zext because
3056 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3057 if (Info.hasTiedOperand()) {
3058 unsigned Output = Info.getTiedOperand();
3059 QualType OutputType = S.getOutputExpr(Output)->getType();
3060 QualType InputTy = InputExpr->getType();
3061
3062 if (getContext().getTypeSize(OutputType) >
3063 getContext().getTypeSize(InputTy)) {
3064 // Use ptrtoint as appropriate so that we can do our extension.
3065 if (isa<llvm::PointerType>(Arg->getType()))
3066 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
3067 llvm::Type *OutputTy = ConvertType(OutputType);
3068 if (isa<llvm::IntegerType>(OutputTy))
3069 Arg = Builder.CreateZExt(Arg, OutputTy);
3070 else if (isa<llvm::PointerType>(OutputTy))
3071 Arg = Builder.CreateZExt(Arg, IntPtrTy);
3072 else if (OutputTy->isFloatingPointTy())
3073 Arg = Builder.CreateFPExt(Arg, OutputTy);
3074 }
3075 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3076 ReplaceConstraint = OutputConstraints[Output];
3077 }
3078 if (llvm::Type* AdjTy =
3079 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
3080 Arg->getType()))
3081 Arg = Builder.CreateBitCast(Arg, AdjTy);
3082 else
3083 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
3084 << InputExpr->getType() << InputConstraint;
3085
3086 // Update largest vector width for any vector types.
3087 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3088 LargestVectorWidth =
3089 std::max((uint64_t)LargestVectorWidth,
3090 VT->getPrimitiveSizeInBits().getKnownMinValue());
3091
3092 ArgTypes.push_back(Arg->getType());
3093 ArgElemTypes.push_back(ArgElemType);
3094 Args.push_back(Arg);
3095 Constraints += InputConstraint;
3096 }
3097
3098 // Append the "input" part of inout constraints.
3099 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3100 ArgTypes.push_back(InOutArgTypes[i]);
3101 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3102 Args.push_back(InOutArgs[i]);
3103 }
3104 Constraints += InOutConstraints;
3105
3106 // Labels
3108 llvm::BasicBlock *Fallthrough = nullptr;
3109 bool IsGCCAsmGoto = false;
3110 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3111 IsGCCAsmGoto = GS->isAsmGoto();
3112 if (IsGCCAsmGoto) {
3113 for (const auto *E : GS->labels()) {
3114 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3115 Transfer.push_back(Dest.getBlock());
3116 if (!Constraints.empty())
3117 Constraints += ',';
3118 Constraints += "!i";
3119 }
3120 Fallthrough = createBasicBlock("asm.fallthrough");
3121 }
3122 }
3123
3124 bool HasUnwindClobber = false;
3125
3126 // Clobbers
3127 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3128 std::string Clobber = S.getClobber(i);
3129
3130 if (Clobber == "memory")
3131 ReadOnly = ReadNone = false;
3132 else if (Clobber == "unwind") {
3133 HasUnwindClobber = true;
3134 continue;
3135 } else if (Clobber != "cc") {
3136 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3137 if (CGM.getCodeGenOpts().StackClashProtector &&
3138 getTarget().isSPRegName(Clobber)) {
3139 CGM.getDiags().Report(S.getAsmLoc(),
3140 diag::warn_stack_clash_protection_inline_asm);
3141 }
3142 }
3143
3144 if (isa<MSAsmStmt>(&S)) {
3145 if (Clobber == "eax" || Clobber == "edx") {
3146 if (Constraints.find("=&A") != std::string::npos)
3147 continue;
3148 std::string::size_type position1 =
3149 Constraints.find("={" + Clobber + "}");
3150 if (position1 != std::string::npos) {
3151 Constraints.insert(position1 + 1, "&");
3152 continue;
3153 }
3154 std::string::size_type position2 = Constraints.find("=A");
3155 if (position2 != std::string::npos) {
3156 Constraints.insert(position2 + 1, "&");
3157 continue;
3158 }
3159 }
3160 }
3161 if (!Constraints.empty())
3162 Constraints += ',';
3163
3164 Constraints += "~{";
3165 Constraints += Clobber;
3166 Constraints += '}';
3167 }
3168
3169 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3170 "unwind clobber can't be used with asm goto");
3171
3172 // Add machine specific clobbers
3173 std::string_view MachineClobbers = getTarget().getClobbers();
3174 if (!MachineClobbers.empty()) {
3175 if (!Constraints.empty())
3176 Constraints += ',';
3177 Constraints += MachineClobbers;
3178 }
3179
3180 llvm::Type *ResultType;
3181 if (ResultRegTypes.empty())
3182 ResultType = VoidTy;
3183 else if (ResultRegTypes.size() == 1)
3184 ResultType = ResultRegTypes[0];
3185 else
3186 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3187
3188 llvm::FunctionType *FTy =
3189 llvm::FunctionType::get(ResultType, ArgTypes, false);
3190
3191 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3192
3193 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3194 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3195 ? llvm::InlineAsm::AD_ATT
3196 : llvm::InlineAsm::AD_Intel;
3197 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3198 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3199
3200 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3201 FTy, AsmString, Constraints, HasSideEffect,
3202 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3203 std::vector<llvm::Value*> RegResults;
3204 llvm::CallBrInst *CBR;
3205 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3206 CBRRegResults;
3207 if (IsGCCAsmGoto) {
3208 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3209 EmitBlock(Fallthrough);
3210 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3211 ReadNone, InNoMergeAttributedStmt,
3212 InNoConvergentAttributedStmt, S, ResultRegTypes,
3213 ArgElemTypes, *this, RegResults);
3214 // Because we are emitting code top to bottom, we don't have enough
3215 // information at this point to know precisely whether we have a critical
3216 // edge. If we have outputs, split all indirect destinations.
3217 if (!RegResults.empty()) {
3218 unsigned i = 0;
3219 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3220 llvm::Twine SynthName = Dest->getName() + ".split";
3221 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3222 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3223 Builder.SetInsertPoint(SynthBB);
3224
3225 if (ResultRegTypes.size() == 1) {
3226 CBRRegResults[SynthBB].push_back(CBR);
3227 } else {
3228 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3229 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3230 CBRRegResults[SynthBB].push_back(Tmp);
3231 }
3232 }
3233
3234 EmitBranch(Dest);
3235 EmitBlock(SynthBB);
3236 CBR->setIndirectDest(i++, SynthBB);
3237 }
3238 }
3239 } else if (HasUnwindClobber) {
3240 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3241 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3242 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3243 InNoConvergentAttributedStmt, S, ResultRegTypes,
3244 ArgElemTypes, *this, RegResults);
3245 } else {
3246 llvm::CallInst *Result =
3247 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3248 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3249 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3250 InNoConvergentAttributedStmt, S, ResultRegTypes,
3251 ArgElemTypes, *this, RegResults);
3252 }
3253
3254 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3255 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3256 ResultBounds);
3257
3258 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3259 // different insertion point; one for each indirect destination and with
3260 // CBRRegResults rather than RegResults.
3261 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3262 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3263 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3264 Builder.SetInsertPoint(Succ, --(Succ->end()));
3265 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3266 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3267 ResultTypeRequiresCast, ResultBounds);
3268 }
3269 }
3270}
3271
3273 const RecordDecl *RD = S.getCapturedRecordDecl();
3275
3276 // Initialize the captured struct.
3277 LValue SlotLV =
3278 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3279
3280 RecordDecl::field_iterator CurField = RD->field_begin();
3282 E = S.capture_init_end();
3283 I != E; ++I, ++CurField) {
3284 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3285 if (CurField->hasCapturedVLAType()) {
3286 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3287 } else {
3288 EmitInitializerForField(*CurField, LV, *I);
3289 }
3290 }
3291
3292 return SlotLV;
3293}
3294
3295/// Generate an outlined function for the body of a CapturedStmt, store any
3296/// captured variables into the captured struct, and call the outlined function.
3297llvm::Function *
3299 LValue CapStruct = InitCapturedStruct(S);
3300
3301 // Emit the CapturedDecl
3302 CodeGenFunction CGF(CGM, true);
3303 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3304 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3305 delete CGF.CapturedStmtInfo;
3306
3307 // Emit call to the helper function.
3308 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3309
3310 return F;
3311}
3312
3314 LValue CapStruct = InitCapturedStruct(S);
3315 return CapStruct.getAddress();
3316}
3317
3318/// Creates the outlined function for a CapturedStmt.
3319llvm::Function *
3321 assert(CapturedStmtInfo &&
3322 "CapturedStmtInfo should be set when generating the captured function");
3323 const CapturedDecl *CD = S.getCapturedDecl();
3324 const RecordDecl *RD = S.getCapturedRecordDecl();
3325 SourceLocation Loc = S.getBeginLoc();
3326 assert(CD->hasBody() && "missing CapturedDecl body");
3327
3328 // Build the argument list.
3329 ASTContext &Ctx = CGM.getContext();
3330 FunctionArgList Args;
3331 Args.append(CD->param_begin(), CD->param_end());
3332
3333 // Create the function declaration.
3334 const CGFunctionInfo &FuncInfo =
3335 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
3336 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3337
3338 llvm::Function *F =
3339 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3340 CapturedStmtInfo->getHelperName(), &CGM.getModule());
3341 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3342 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
3343 F->addFnAttr("sample-profile-suffix-elision-policy", "selected");
3344 if (CD->isNothrow())
3345 F->addFnAttr(llvm::Attribute::NoUnwind);
3346
3347 // Generate the function.
3348 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3349 CD->getBody()->getBeginLoc());
3350 // Set the context parameter in CapturedStmtInfo.
3351 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3352 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
3353
3354 // Initialize variable-length arrays.
3356 CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
3357 for (auto *FD : RD->fields()) {
3358 if (FD->hasCapturedVLAType()) {
3359 auto *ExprArg =
3361 .getScalarVal();
3362 auto VAT = FD->getCapturedVLAType();
3363 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3364 }
3365 }
3366
3367 // If 'this' is captured, load it into CXXThisValue.
3368 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3369 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3370 LValue ThisLValue = EmitLValueForField(Base, FD);
3371 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3372 }
3373
3374 PGO->assignRegionCounters(GlobalDecl(CD), F);
3375 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3377
3378 return F;
3379}
3380
3381// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3382// std::nullptr otherwise.
3383static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3384 for (auto &I : *BB) {
3385 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3386 return CI;
3387 }
3388 return nullptr;
3389}
3390
3391llvm::CallBase *
3392CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3393 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3394 assert(ParentToken);
3395
3396 llvm::Value *bundleArgs[] = {ParentToken};
3397 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3398 auto *Output = llvm::CallBase::addOperandBundle(
3399 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3400 Input->replaceAllUsesWith(Output);
3401 Input->eraseFromParent();
3402 return Output;
3403}
3404
3405llvm::ConvergenceControlInst *
3406CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3407 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3408 assert(ParentToken);
3409 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken);
3410}
3411
3412llvm::ConvergenceControlInst *
3413CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3414 llvm::BasicBlock *BB = &F->getEntryBlock();
3415 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3416 if (Token)
3417 return Token;
3418
3419 // Adding a convergence token requires the function to be marked as
3420 // convergent.
3421 F->setConvergent();
3422 return llvm::ConvergenceControlInst::CreateEntry(*BB);
3423}
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition CGStmt.cpp:2226
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition CGStmt.cpp:3383
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition CGStmt.cpp:2775
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition CGStmt.cpp:2280
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition CGStmt.cpp:2590
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition CGStmt.cpp:1550
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition CGStmt.cpp:2071
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const std::vector< std::optional< std::pair< unsigned, unsigned > > > &ResultBounds)
Definition CGStmt.cpp:2685
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition CGStmt.cpp:1058
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition CGStmt.cpp:2070
@ CSFC_Failure
Definition CGStmt.cpp:2070
@ CSFC_Success
Definition CGStmt.cpp:2070
@ CSFC_FallThrough
Definition CGStmt.cpp:2070
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition CGStmt.cpp:2617
#define SM(sm)
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition APValue.cpp:963
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:851
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CanQualType VoidTy
CanQualType getCanonicalTagType(const TagDecl *TD) const
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3269
std::string getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition Stmt.cpp:510
bool isVolatile() const
Definition Stmt.h:3305
std::string getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition Stmt.cpp:494
SourceLocation getAsmLoc() const
Definition Stmt.h:3299
const Expr * getInputExpr(unsigned i) const
Definition Stmt.cpp:518
std::string addVariableConstraints(StringRef Constraint, const Expr &AsmExpr, const TargetInfo &Target, bool EarlyClobber, UnsupportedConstraintCallbackTy UnsupportedCB, std::string *GCCReg=nullptr) const
Look at AsmExpr and if it is a variable declared as using a particular register add that as a constra...
Definition Stmt.cpp:454
unsigned getNumClobbers() const
Definition Stmt.h:3360
const Expr * getOutputExpr(unsigned i) const
Definition Stmt.cpp:502
unsigned getNumOutputs() const
Definition Stmt.h:3328
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition Stmt.cpp:486
unsigned getNumInputs() const
Definition Stmt.h:3350
std::string getClobber(unsigned i) const
Definition Stmt.cpp:526
Attr - This represents one attribute.
Definition Attr.h:46
Represents an attribute applied to a statement.
Definition Stmt.h:2195
Stmt * getSubStmt()
Definition Stmt.h:2231
ArrayRef< const Attr * > getAttrs() const
Definition Stmt.h:2227
BreakStmt - This represents a break.
Definition Stmt.h:3127
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
DeclStmt * getBeginStmt()
Definition StmtCXX.h:163
DeclStmt * getLoopVarStmt()
Definition StmtCXX.h:169
DeclStmt * getEndStmt()
Definition StmtCXX.h:166
DeclStmt * getRangeStmt()
Definition StmtCXX.h:162
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getCallee()
Definition Expr.h:3090
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition Decl.h:4943
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition Decl.h:5001
bool isNothrow() const
Definition Decl.cpp:5696
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition Decl.h:5018
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition Decl.h:5016
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition Decl.cpp:5693
This captures a statement into a function.
Definition Stmt.h:3929
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition Stmt.cpp:1488
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition Stmt.h:4050
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition Stmt.h:4106
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:4124
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition Stmt.h:4116
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition Stmt.h:4093
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition Stmt.cpp:1503
CaseStmt - Represent a case statement.
Definition Stmt.h:1912
Stmt * getSubStmt()
Definition Stmt.h:2025
Expr * getLHS()
Definition Stmt.h:1995
Expr * getRHS()
Definition Stmt.h:2007
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:551
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
API for captured statement code generation.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition CGStmt.cpp:754
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1418
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition CGStmt.cpp:3272
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition CGCall.cpp:5103
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:706
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition CGObjC.cpp:2129
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3903
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition CGStmt.cpp:505
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:688
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition CGStmt.cpp:689
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition CGStmt.cpp:630
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
void EmitOMPScopeDirective(const OMPScopeDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasSkipCounter(const Stmt *S) const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5564
const TargetInfo & getTarget() const
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:581
void EmitGotoStmt(const GotoStmt &S)
Definition CGStmt.cpp:842
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:245
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2407
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1273
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition CGObjC.cpp:3688
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1071
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition CGStmt.cpp:1008
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5738
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition CGObjC.cpp:2125
const TargetCodeGenInfo & getTargetHooks() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition CGCall.cpp:5031
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition CGStmt.cpp:47
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition CGStmt.cpp:878
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitDeferStmt(const DeferStmt &S)
Definition CGStmt.cpp:2043
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2643
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition CGObjC.cpp:2133
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:569
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPFuseDirective(const OMPFuseDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition CGClass.cpp:668
void EmitAsmStmt(const AsmStmt &S)
Definition CGStmt.cpp:2793
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1944
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition CGStmt.cpp:2335
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:296
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:267
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:57
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition CGStmt.cpp:3298
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1829
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition CGStmt.cpp:1715
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1188
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition CGStmt.cpp:3313
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:672
void EmitOMPSimdDirective(const OMPSimdDirective &S)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
const BreakContinue * GetDestForLoopControlStmt(const LoopControlStmt &S)
Definition CGStmt.cpp:1701
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPStripeDirective(const OMPStripeDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition CGStmt.cpp:1744
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition CGStmt.cpp:1576
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition CGStmt.cpp:3320
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition CGStmt.cpp:1691
void EmitLabelStmt(const LabelStmt &S)
Definition CGStmt.cpp:775
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition CGDecl.cpp:52
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1692
void EmitAttributedStmt(const AttributedStmt &S)
Definition CGStmt.cpp:785
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition CGObjC.cpp:1804
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition CGStmt.cpp:854
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2089
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition CGStmt.cpp:717
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition CGStmt.cpp:1728
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
A saved depth on the scope stack.
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition TargetInfo.h:204
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
Stmt *const * const_body_iterator
Definition Stmt.h:1804
body_iterator body_end()
Definition Stmt.h:1797
SourceLocation getLBracLoc() const
Definition Stmt.h:1849
body_iterator body_begin()
Definition Stmt.h:1796
Stmt * body_back()
Definition Stmt.h:1800
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1082
ContinueStmt - This represents a continue.
Definition Stmt.h:3111
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1623
decl_range decls()
Definition Stmt.h:1671
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition DeclBase.h:1093
SourceLocation getLocation() const
Definition DeclBase.h:439
Stmt * getSubStmt()
Definition Stmt.h:2073
DeferStmt - This represents a deferred statement.
Definition Stmt.h:3228
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2824
Stmt * getBody()
Definition Stmt.h:2849
Expr * getCond()
Definition Stmt.h:2842
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3116
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2880
Stmt * getInit()
Definition Stmt.h:2895
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition Stmt.cpp:1115
Stmt * getBody()
Definition Stmt.h:2924
Expr * getInc()
Definition Stmt.h:2923
Expr * getCond()
Definition Stmt.h:2922
const Expr * getSubExpr() const
Definition Expr.h:1062
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4465
CallingConv getCallConv() const
Definition TypeBase.h:4820
This represents a GCC inline-assembly statement extension.
Definition Stmt.h:3438
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2961
LabelDecl * getLabel() const
Definition Stmt.h:2974
IfStmt - This represents an if/then/else.
Definition Stmt.h:2251
Stmt * getThen()
Definition Stmt.h:2340
Stmt * getInit()
Definition Stmt.h:2401
Expr * getCond()
Definition Stmt.h:2328
bool isConstexpr() const
Definition Stmt.h:2444
bool isNegatedConsteval() const
Definition Stmt.h:2440
Stmt * getElse()
Definition Stmt.h:2349
bool isConsteval() const
Definition Stmt.h:2431
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition Stmt.cpp:1063
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3000
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition Stmt.cpp:1264
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt * getStmt() const
Definition Decl.h:548
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2138
LabelDecl * getDecl() const
Definition Stmt.h:2156
bool isSideEntry() const
Definition Stmt.h:2185
Stmt * getSubStmt()
Definition Stmt.h:2160
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
bool assumeFunctionsAreConvergent() const
Base class for BreakStmt and ContinueStmt.
Definition Stmt.h:3049
Represents a point when we exit a loop.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
QualType getCanonicalType() const
Definition TypeBase.h:8354
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Represents a struct/union/class.
Definition Decl.h:4324
field_range fields() const
Definition Decl.h:4527
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4524
field_iterator field_begin() const
Definition Decl.cpp:5270
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3152
SourceLocation getBeginLoc() const
Definition Stmt.h:3204
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization.
Definition Stmt.h:3188
Expr * getRetValue()
Definition Stmt.h:3179
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:86
@ NoStmtClass
Definition Stmt.h:89
StmtClass getStmtClass() const
Definition Stmt.h:1485
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1428
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1429
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1430
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1432
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition Stmt.cpp:171
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition Stmt.cpp:163
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:1973
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition Expr.cpp:1325
StringRef getString() const
Definition Expr.h:1867
const SwitchCase * getNextSwitchCase() const
Definition Stmt.h:1885
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2501
Expr * getCond()
Definition Stmt.h:2564
Stmt * getBody()
Definition Stmt.h:2576
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition Stmt.cpp:1181
Stmt * getInit()
Definition Stmt.h:2581
SwitchCase * getSwitchCaseList()
Definition Stmt.h:2632
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
std::string simplifyConstraint(StringRef Constraint, SmallVectorImpl< ConstraintInfo > *OutCons=nullptr) const
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
bool validateOutputConstraint(ConstraintInfo &Info) const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition TypeBase.h:8901
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9188
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO).
Definition Decl.h:1512
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2689
Expr * getCond()
Definition Stmt.h:2741
SourceLocation getWhileLoc() const
Definition Stmt.h:2794
SourceLocation getRParenLoc() const
Definition Stmt.h:2799
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition Stmt.cpp:1242
Stmt * getBody()
Definition Stmt.h:2753
Defines the clang::TargetInfo interface.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ CPlusPlus11
CapturedRegionKind
The different kinds of captured statement.
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ CC_SwiftAsync
Definition Specifiers.h:294
U cast(CodeGen::Address addr)
Definition Address.h:327
@ None
The alignment was not explicit in code.
Definition ASTContext.h:178
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
std::optional< std::pair< unsigned, unsigned > > getOutputOperandBounds() const
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.