clang 23.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/IR/Assumptions.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/InlineAsm.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/MDBuilder.h"
37#include "llvm/Support/SaveAndRestore.h"
38#include <optional>
39
40using namespace clang;
41using namespace CodeGen;
42
43//===----------------------------------------------------------------------===//
44// Statement Emission
45//===----------------------------------------------------------------------===//
46
47namespace llvm {
48extern cl::opt<bool> EnableSingleByteCoverage;
49} // namespace llvm
50
52 if (CGDebugInfo *DI = getDebugInfo()) {
54 Loc = S->getBeginLoc();
55 DI->EmitLocation(Builder, Loc);
56
57 LastStopPoint = Loc;
58 }
59}
60
62 assert(S && "Null statement?");
63 PGO->setCurrentStmt(S);
64
65 // These statements have their own debug info handling.
66 if (EmitSimpleStmt(S, Attrs))
67 return;
68
69 // Check if we are generating unreachable code.
70 if (!HaveInsertPoint()) {
71 // If so, and the statement doesn't contain a label, then we do not need to
72 // generate actual code. This is safe because (1) the current point is
73 // unreachable, so we don't need to execute the code, and (2) we've already
74 // handled the statements which update internal data structures (like the
75 // local variable map) which could be used by subsequent statements.
76 if (!ContainsLabel(S)) {
77 // Verify that any decl statements were handled as simple, they may be in
78 // scope of subsequent reachable statements.
79 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
80 PGO->markStmtMaybeUsed(S);
81 return;
82 }
83
84 // Otherwise, make a new block to hold the code.
86 }
87
88 // Generate a stoppoint if we are emitting debug info.
90
91 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
92 // enabled.
93 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
94 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
96 return;
97 }
98 }
99
100 switch (S->getStmtClass()) {
102 case Stmt::CXXCatchStmtClass:
103 case Stmt::SEHExceptStmtClass:
104 case Stmt::SEHFinallyStmtClass:
105 case Stmt::MSDependentExistsStmtClass:
106 llvm_unreachable("invalid statement class to emit generically");
107 case Stmt::NullStmtClass:
108 case Stmt::CompoundStmtClass:
109 case Stmt::DeclStmtClass:
110 case Stmt::LabelStmtClass:
111 case Stmt::AttributedStmtClass:
112 case Stmt::GotoStmtClass:
113 case Stmt::BreakStmtClass:
114 case Stmt::ContinueStmtClass:
115 case Stmt::DefaultStmtClass:
116 case Stmt::CaseStmtClass:
117 case Stmt::DeferStmtClass:
118 case Stmt::SEHLeaveStmtClass:
119 case Stmt::SYCLKernelCallStmtClass:
120 llvm_unreachable("should have emitted these statements as simple");
121
122#define STMT(Type, Base)
123#define ABSTRACT_STMT(Op)
124#define EXPR(Type, Base) \
125 case Stmt::Type##Class:
126#include "clang/AST/StmtNodes.inc"
127 {
128 // Remember the block we came in on.
129 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
130 assert(incoming && "expression emission must have an insertion point");
131
133
134 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
135 assert(outgoing && "expression emission cleared block!");
136
137 // The expression emitters assume (reasonably!) that the insertion
138 // point is always set. To maintain that, the call-emission code
139 // for noreturn functions has to enter a new block with no
140 // predecessors. We want to kill that block and mark the current
141 // insertion point unreachable in the common case of a call like
142 // "exit();". Since expression emission doesn't otherwise create
143 // blocks with no predecessors, we can just test for that.
144 // However, we must be careful not to do this to our incoming
145 // block, because *statement* emission does sometimes create
146 // reachable blocks which will have no predecessors until later in
147 // the function. This occurs with, e.g., labels that are not
148 // reachable by fallthrough.
149 if (incoming != outgoing && outgoing->use_empty()) {
150 outgoing->eraseFromParent();
151 Builder.ClearInsertionPoint();
152 }
153 break;
154 }
155
156 case Stmt::IndirectGotoStmtClass:
158
159 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
160 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
161 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
162 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
163
164 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
165
166 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
167 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
168 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
169 case Stmt::CoroutineBodyStmtClass:
171 break;
172 case Stmt::CoreturnStmtClass:
174 break;
175 case Stmt::CapturedStmtClass: {
176 const CapturedStmt *CS = cast<CapturedStmt>(S);
178 }
179 break;
180 case Stmt::ObjCAtTryStmtClass:
182 break;
183 case Stmt::ObjCAtCatchStmtClass:
184 llvm_unreachable(
185 "@catch statements should be handled by EmitObjCAtTryStmt");
186 case Stmt::ObjCAtFinallyStmtClass:
187 llvm_unreachable(
188 "@finally statements should be handled by EmitObjCAtTryStmt");
189 case Stmt::ObjCAtThrowStmtClass:
191 break;
192 case Stmt::ObjCAtSynchronizedStmtClass:
194 break;
195 case Stmt::ObjCForCollectionStmtClass:
197 break;
198 case Stmt::ObjCAutoreleasePoolStmtClass:
200 break;
201
202 case Stmt::CXXTryStmtClass:
204 break;
205 case Stmt::CXXForRangeStmtClass:
207 break;
208 case Stmt::SEHTryStmtClass:
210 break;
211 case Stmt::OMPMetaDirectiveClass:
213 break;
214 case Stmt::OMPCanonicalLoopClass:
216 break;
217 case Stmt::OMPParallelDirectiveClass:
219 break;
220 case Stmt::OMPSimdDirectiveClass:
222 break;
223 case Stmt::OMPTileDirectiveClass:
225 break;
226 case Stmt::OMPStripeDirectiveClass:
228 break;
229 case Stmt::OMPUnrollDirectiveClass:
231 break;
232 case Stmt::OMPReverseDirectiveClass:
234 break;
235 case Stmt::OMPInterchangeDirectiveClass:
237 break;
238 case Stmt::OMPFuseDirectiveClass:
240 break;
241 case Stmt::OMPForDirectiveClass:
243 break;
244 case Stmt::OMPForSimdDirectiveClass:
246 break;
247 case Stmt::OMPSectionsDirectiveClass:
249 break;
250 case Stmt::OMPSectionDirectiveClass:
252 break;
253 case Stmt::OMPSingleDirectiveClass:
255 break;
256 case Stmt::OMPMasterDirectiveClass:
258 break;
259 case Stmt::OMPCriticalDirectiveClass:
261 break;
262 case Stmt::OMPParallelForDirectiveClass:
264 break;
265 case Stmt::OMPParallelForSimdDirectiveClass:
267 break;
268 case Stmt::OMPParallelMasterDirectiveClass:
270 break;
271 case Stmt::OMPParallelSectionsDirectiveClass:
273 break;
274 case Stmt::OMPTaskDirectiveClass:
276 break;
277 case Stmt::OMPTaskyieldDirectiveClass:
279 break;
280 case Stmt::OMPErrorDirectiveClass:
282 break;
283 case Stmt::OMPBarrierDirectiveClass:
285 break;
286 case Stmt::OMPTaskwaitDirectiveClass:
288 break;
289 case Stmt::OMPTaskgroupDirectiveClass:
291 break;
292 case Stmt::OMPFlushDirectiveClass:
294 break;
295 case Stmt::OMPDepobjDirectiveClass:
297 break;
298 case Stmt::OMPScanDirectiveClass:
300 break;
301 case Stmt::OMPOrderedDirectiveClass:
303 break;
304 case Stmt::OMPAtomicDirectiveClass:
306 break;
307 case Stmt::OMPTargetDirectiveClass:
309 break;
310 case Stmt::OMPTeamsDirectiveClass:
312 break;
313 case Stmt::OMPCancellationPointDirectiveClass:
315 break;
316 case Stmt::OMPCancelDirectiveClass:
318 break;
319 case Stmt::OMPTargetDataDirectiveClass:
321 break;
322 case Stmt::OMPTargetEnterDataDirectiveClass:
324 break;
325 case Stmt::OMPTargetExitDataDirectiveClass:
327 break;
328 case Stmt::OMPTargetParallelDirectiveClass:
330 break;
331 case Stmt::OMPTargetParallelForDirectiveClass:
333 break;
334 case Stmt::OMPTaskLoopDirectiveClass:
336 break;
337 case Stmt::OMPTaskLoopSimdDirectiveClass:
339 break;
340 case Stmt::OMPMasterTaskLoopDirectiveClass:
342 break;
343 case Stmt::OMPMaskedTaskLoopDirectiveClass:
345 break;
346 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
349 break;
350 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
353 break;
354 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
357 break;
358 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
361 break;
362 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
365 break;
366 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
369 break;
370 case Stmt::OMPDistributeDirectiveClass:
372 break;
373 case Stmt::OMPTargetUpdateDirectiveClass:
375 break;
376 case Stmt::OMPDistributeParallelForDirectiveClass:
379 break;
380 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
383 break;
384 case Stmt::OMPDistributeSimdDirectiveClass:
386 break;
387 case Stmt::OMPTargetParallelForSimdDirectiveClass:
390 break;
391 case Stmt::OMPTargetSimdDirectiveClass:
393 break;
394 case Stmt::OMPTeamsDistributeDirectiveClass:
396 break;
397 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
400 break;
401 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
404 break;
405 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
408 break;
409 case Stmt::OMPTargetTeamsDirectiveClass:
411 break;
412 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
415 break;
416 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
419 break;
420 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
423 break;
424 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
427 break;
428 case Stmt::OMPInteropDirectiveClass:
430 break;
431 case Stmt::OMPDispatchDirectiveClass:
432 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
433 break;
434 case Stmt::OMPScopeDirectiveClass:
436 break;
437 case Stmt::OMPMaskedDirectiveClass:
439 break;
440 case Stmt::OMPGenericLoopDirectiveClass:
442 break;
443 case Stmt::OMPTeamsGenericLoopDirectiveClass:
445 break;
446 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
449 break;
450 case Stmt::OMPParallelGenericLoopDirectiveClass:
453 break;
454 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
457 break;
458 case Stmt::OMPParallelMaskedDirectiveClass:
460 break;
461 case Stmt::OMPAssumeDirectiveClass:
463 break;
464 case Stmt::OpenACCComputeConstructClass:
466 break;
467 case Stmt::OpenACCLoopConstructClass:
469 break;
470 case Stmt::OpenACCCombinedConstructClass:
472 break;
473 case Stmt::OpenACCDataConstructClass:
475 break;
476 case Stmt::OpenACCEnterDataConstructClass:
478 break;
479 case Stmt::OpenACCExitDataConstructClass:
481 break;
482 case Stmt::OpenACCHostDataConstructClass:
484 break;
485 case Stmt::OpenACCWaitConstructClass:
487 break;
488 case Stmt::OpenACCInitConstructClass:
490 break;
491 case Stmt::OpenACCShutdownConstructClass:
493 break;
494 case Stmt::OpenACCSetConstructClass:
496 break;
497 case Stmt::OpenACCUpdateConstructClass:
499 break;
500 case Stmt::OpenACCAtomicConstructClass:
502 break;
503 case Stmt::OpenACCCacheConstructClass:
505 break;
506 }
507}
508
511 switch (S->getStmtClass()) {
512 default:
513 return false;
514 case Stmt::NullStmtClass:
515 break;
516 case Stmt::CompoundStmtClass:
518 break;
519 case Stmt::DeclStmtClass:
521 break;
522 case Stmt::LabelStmtClass:
524 break;
525 case Stmt::AttributedStmtClass:
527 break;
528 case Stmt::GotoStmtClass:
530 break;
531 case Stmt::BreakStmtClass:
533 break;
534 case Stmt::ContinueStmtClass:
536 break;
537 case Stmt::DefaultStmtClass:
539 break;
540 case Stmt::CaseStmtClass:
541 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
542 break;
543 case Stmt::DeferStmtClass:
545 break;
546 case Stmt::SEHLeaveStmtClass:
548 break;
549 case Stmt::SYCLKernelCallStmtClass:
550 // SYCL kernel call statements are generated as wrappers around the body
551 // of functions declared with the sycl_kernel_entry_point attribute. Such
552 // functions are used to specify how a SYCL kernel (a function object) is
553 // to be invoked; the SYCL kernel call statement contains a transformed
554 // variation of the function body and is used to generate a SYCL kernel
555 // caller function; a function that serves as the device side entry point
556 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed
557 // function is invoked by host code in order to trigger emission of the
558 // device side SYCL kernel caller function and to generate metadata needed
559 // by SYCL run-time library implementations; the function is otherwise
560 // intended to have no effect. As such, the function body is not evaluated
561 // as part of the invocation during host compilation (and the function
562 // should not be called or emitted during device compilation); the SYCL
563 // kernel call statement is thus handled as a null statement for the
564 // purpose of code generation.
565 break;
566 }
567 return true;
568}
569
570/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
571/// this captures the expression result of the last sub-statement and returns it
572/// (for use by the statement expression extension).
574 AggValueSlot AggSlot) {
575 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
576 "LLVM IR generation of compound statement ('{}')");
577
578 // Keep track of the current cleanup stack depth, including debug scopes.
580
581 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
582}
583
586 bool GetLast,
587 AggValueSlot AggSlot) {
588
590 E = S.body_end() - GetLast;
591 I != E; ++I)
592 EmitStmt(*I);
593
594 Address RetAlloca = Address::invalid();
595 if (GetLast) {
596 // We have to special case labels here. They are statements, but when put
597 // at the end of a statement expression, they yield the value of their
598 // subexpression. Handle this by walking through all labels we encounter,
599 // emitting them before we evaluate the subexpr.
600 // Similar issues arise for attributed statements.
601 const Stmt *LastStmt = S.body_back();
602 while (!isa<Expr>(LastStmt)) {
603 if (const auto *LS = dyn_cast<LabelStmt>(LastStmt)) {
604 EmitLabel(LS->getDecl());
605 LastStmt = LS->getSubStmt();
606 } else if (const auto *AS = dyn_cast<AttributedStmt>(LastStmt)) {
607 // FIXME: Update this if we ever have attributes that affect the
608 // semantics of an expression.
609 LastStmt = AS->getSubStmt();
610 } else {
611 llvm_unreachable("unknown value statement");
612 }
613 }
614
616
617 const Expr *E = cast<Expr>(LastStmt);
618 QualType ExprTy = E->getType();
619 if (hasAggregateEvaluationKind(ExprTy)) {
620 EmitAggExpr(E, AggSlot);
621 } else {
622 // We can't return an RValue here because there might be cleanups at
623 // the end of the StmtExpr. Because of that, we have to emit the result
624 // here into a temporary alloca.
625 RetAlloca = CreateMemTemp(ExprTy);
626 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
627 /*IsInit*/ false);
628 }
629 }
630
631 return RetAlloca;
632}
633
635 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
636
637 // If there is a cleanup stack, then we it isn't worth trying to
638 // simplify this block (we would need to remove it from the scope map
639 // and cleanup entry).
640 if (!EHStack.empty())
641 return;
642
643 // Can only simplify direct branches.
644 if (!BI || !BI->isUnconditional())
645 return;
646
647 // Can only simplify empty blocks.
648 if (BI->getIterator() != BB->begin())
649 return;
650
651 BB->replaceAllUsesWith(BI->getSuccessor(0));
652 BI->eraseFromParent();
653 BB->eraseFromParent();
654}
655
656void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
657 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
658
659 // Fall out of the current block (if necessary).
660 EmitBranch(BB);
661
662 if (IsFinished && BB->use_empty()) {
663 delete BB;
664 return;
665 }
666
667 // Place the block after the current block, if possible, or else at
668 // the end of the function.
669 if (CurBB && CurBB->getParent())
670 CurFn->insert(std::next(CurBB->getIterator()), BB);
671 else
672 CurFn->insert(CurFn->end(), BB);
673 Builder.SetInsertPoint(BB);
674}
675
676void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
677 // Emit a branch from the current block to the target one if this
678 // was a real block. If this was just a fall-through block after a
679 // terminator, don't emit it.
680 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
681
682 if (!CurBB || CurBB->getTerminator()) {
683 // If there is no insert point or the previous block is already
684 // terminated, don't touch it.
685 } else {
686 // Otherwise, create a fall-through branch.
687 Builder.CreateBr(Target);
688 }
689
690 Builder.ClearInsertionPoint();
691}
692
693void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
694 bool inserted = false;
695 for (llvm::User *u : block->users()) {
696 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
697 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
698 inserted = true;
699 break;
700 }
701 }
702
703 if (!inserted)
704 CurFn->insert(CurFn->end(), block);
705
706 Builder.SetInsertPoint(block);
707}
708
711 JumpDest &Dest = LabelMap[D];
712 if (Dest.isValid()) return Dest;
713
714 // Create, but don't insert, the new block.
715 Dest = JumpDest(createBasicBlock(D->getName()),
718 return Dest;
719}
720
722 // Add this label to the current lexical scope if we're within any
723 // normal cleanups. Jumps "in" to this label --- when permitted by
724 // the language --- may need to be routed around such cleanups.
725 if (EHStack.hasNormalCleanups() && CurLexicalScope)
726 CurLexicalScope->addLabel(D);
727
728 JumpDest &Dest = LabelMap[D];
729
730 // If we didn't need a forward reference to this label, just go
731 // ahead and create a destination at the current scope.
732 if (!Dest.isValid()) {
734
735 // Otherwise, we need to give this label a target depth and remove
736 // it from the branch-fixups list.
737 } else {
738 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
739 Dest.setScopeDepth(EHStack.stable_begin());
741 }
742
743 EmitBlock(Dest.getBlock());
744
745 // Emit debug info for labels.
746 if (CGDebugInfo *DI = getDebugInfo()) {
747 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
748 DI->setLocation(D->getLocation());
749 DI->EmitLabel(D, Builder);
750 }
751 }
752
754}
755
756/// Change the cleanup scope of the labels in this lexical scope to
757/// match the scope of the enclosing context.
759 assert(!Labels.empty());
760 EHScopeStack::stable_iterator innermostScope
761 = CGF.EHStack.getInnermostNormalCleanup();
762
763 // Change the scope depth of all the labels.
764 for (const LabelDecl *Label : Labels) {
765 assert(CGF.LabelMap.count(Label));
766 JumpDest &dest = CGF.LabelMap.find(Label)->second;
767 assert(dest.getScopeDepth().isValid());
768 assert(innermostScope.encloses(dest.getScopeDepth()));
769 dest.setScopeDepth(innermostScope);
770 }
771
772 // Reparent the labels if the new scope also has cleanups.
773 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
774 ParentScope->Labels.append(Labels.begin(), Labels.end());
775 }
776}
777
778
780 EmitLabel(S.getDecl());
781
782 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
783 if (getLangOpts().EHAsynch && S.isSideEntry())
785
786 EmitStmt(S.getSubStmt());
787}
788
790 bool nomerge = false;
791 bool noinline = false;
792 bool alwaysinline = false;
793 bool noconvergent = false;
794 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
795 HLSLControlFlowHintAttr::SpellingNotCalculated;
796 const CallExpr *musttail = nullptr;
797 const AtomicAttr *AA = nullptr;
798
799 for (const auto *A : S.getAttrs()) {
800 switch (A->getKind()) {
801 default:
802 break;
803 case attr::NoMerge:
804 nomerge = true;
805 break;
806 case attr::NoInline:
807 noinline = true;
808 break;
809 case attr::AlwaysInline:
810 alwaysinline = true;
811 break;
812 case attr::NoConvergent:
813 noconvergent = true;
814 break;
815 case attr::MustTail: {
816 const Stmt *Sub = S.getSubStmt();
817 const ReturnStmt *R = cast<ReturnStmt>(Sub);
818 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
819 } break;
820 case attr::CXXAssume: {
821 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
822 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
823 !Assumption->HasSideEffects(getContext())) {
824 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
825 Builder.CreateAssumption(AssumptionVal);
826 }
827 } break;
828 case attr::Atomic:
829 AA = cast<AtomicAttr>(A);
830 break;
831 case attr::HLSLControlFlowHint: {
832 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
833 } break;
834 }
835 }
836 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
837 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
838 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
839 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
840 SaveAndRestore save_musttail(MustTailCall, musttail);
841 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
842 CGAtomicOptionsRAII AORAII(CGM, AA);
843 EmitStmt(S.getSubStmt(), S.getAttrs());
844}
845
847 // If this code is reachable then emit a stop point (if generating
848 // debug info). We have to do this ourselves because we are on the
849 // "simple" statement path.
850 if (HaveInsertPoint())
851 EmitStopPoint(&S);
852
855}
856
857
860 if (const LabelDecl *Target = S.getConstantTarget()) {
862 return;
863 }
864
865 // Ensure that we have an i8* for our PHI node.
866 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
867 Int8PtrTy, "addr");
868 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
869
870 // Get the basic block for the indirect goto.
871 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
872
873 // The first instruction in the block has to be the PHI for the switch dest,
874 // add an entry for this branch.
875 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
876
877 EmitBranch(IndGotoBB);
878 if (CurBB && CurBB->getTerminator())
879 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr);
880}
881
883 const Stmt *Else = S.getElse();
884
885 // The else branch of a consteval if statement is always the only branch that
886 // can be runtime evaluated.
887 if (S.isConsteval()) {
888 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
889 if (Executed) {
890 RunCleanupsScope ExecutedScope(*this);
891 EmitStmt(Executed);
892 }
893 return;
894 }
895
896 // C99 6.8.4.1: The first substatement is executed if the expression compares
897 // unequal to 0. The condition must be a scalar type.
898 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
899 ApplyDebugLocation DL(*this, S.getCond());
900
901 if (S.getInit())
902 EmitStmt(S.getInit());
903
904 if (S.getConditionVariable())
906
907 // If the condition constant folds and can be elided, try to avoid emitting
908 // the condition and the dead arm of the if/else.
909 bool CondConstant;
910 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
911 S.isConstexpr())) {
912 // Figure out which block (then or else) is executed.
913 const Stmt *Executed = S.getThen();
914 const Stmt *Skipped = Else;
915 if (!CondConstant) // Condition false?
916 std::swap(Executed, Skipped);
917
918 // If the skipped block has no labels in it, just emit the executed block.
919 // This avoids emitting dead code and simplifies the CFG substantially.
920 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
921 if (CondConstant)
923 if (Executed) {
925 RunCleanupsScope ExecutedScope(*this);
926 EmitStmt(Executed);
927 }
928 PGO->markStmtMaybeUsed(Skipped);
929 return;
930 }
931 }
932
933 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
934 // the conditional branch.
935 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
936 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
937 llvm::BasicBlock *ElseBlock = ContBlock;
938 if (Else)
939 ElseBlock = createBasicBlock("if.else");
940
941 // Prefer the PGO based weights over the likelihood attribute.
942 // When the build isn't optimized the metadata isn't used, so don't generate
943 // it.
944 // Also, differentiate between disabled PGO and a never executed branch with
945 // PGO. Assuming PGO is in use:
946 // - we want to ignore the [[likely]] attribute if the branch is never
947 // executed,
948 // - assuming the profile is poor, preserving the attribute may still be
949 // beneficial.
950 // As an approximation, preserve the attribute only if both the branch and the
951 // parent context were not executed.
953 uint64_t ThenCount = getProfileCount(S.getThen());
954 if (!ThenCount && !getCurrentProfileCount() &&
955 CGM.getCodeGenOpts().OptimizationLevel)
956 LH = Stmt::getLikelihood(S.getThen(), Else);
957
958 // When measuring MC/DC, always fully evaluate the condition up front using
959 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
960 // executing the body of the if.then or if.else. This is useful for when
961 // there is a 'return' within the body, but this is particularly beneficial
962 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
963 // updates are kept linear and consistent.
964 if (!CGM.getCodeGenOpts().MCDCCoverage) {
965 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH,
966 /*ConditionalOp=*/nullptr,
967 /*ConditionalDecl=*/S.getConditionVariable());
968 } else {
969 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
971 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
972 }
973
974 // Emit the 'then' code.
975 EmitBlock(ThenBlock);
978 else
980 {
981 RunCleanupsScope ThenScope(*this);
982 EmitStmt(S.getThen());
983 }
984 EmitBranch(ContBlock);
985
986 // Emit the 'else' code if present.
987 if (Else) {
988 {
989 // There is no need to emit line number for an unconditional branch.
990 auto NL = ApplyDebugLocation::CreateEmpty(*this);
991 EmitBlock(ElseBlock);
992 }
993 // When single byte coverage mode is enabled, add a counter to else block.
996 {
997 RunCleanupsScope ElseScope(*this);
998 EmitStmt(Else);
999 }
1000 {
1001 // There is no need to emit line number for an unconditional branch.
1002 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1003 EmitBranch(ContBlock);
1004 }
1005 }
1006
1007 // Emit the continuation block for code after the if.
1008 EmitBlock(ContBlock, true);
1009
1010 // When single byte coverage mode is enabled, add a counter to continuation
1011 // block.
1014}
1015
1016bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
1017 bool HasEmptyBody) {
1018 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1020 return false;
1021
1022 // Now apply rules for plain C (see 6.8.5.6 in C11).
1023 // Loops with constant conditions do not have to make progress in any C
1024 // version.
1025 // As an extension, we consisider loops whose constant expression
1026 // can be constant-folded.
1028 bool CondIsConstInt =
1029 !ControllingExpression ||
1030 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1031 Result.Val.isInt());
1032
1033 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1034 Result.Val.getInt().getBoolValue());
1035
1036 // Loops with non-constant conditions must make progress in C11 and later.
1037 if (getLangOpts().C11 && !CondIsConstInt)
1038 return true;
1039
1040 // [C++26][intro.progress] (DR)
1041 // The implementation may assume that any thread will eventually do one of the
1042 // following:
1043 // [...]
1044 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1045 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1048 if (HasEmptyBody && CondIsTrue) {
1049 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1050 return false;
1051 }
1052 return true;
1053 }
1054 return false;
1055}
1056
1057// [C++26][stmt.iter.general] (DR)
1058// A trivially empty iteration statement is an iteration statement matching one
1059// of the following forms:
1060// - while ( expression ) ;
1061// - while ( expression ) { }
1062// - do ; while ( expression ) ;
1063// - do { } while ( expression ) ;
1064// - for ( init-statement expression(opt); ) ;
1065// - for ( init-statement expression(opt); ) { }
1066template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1067 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1068 if (S.getInc())
1069 return false;
1070 }
1071 const Stmt *Body = S.getBody();
1072 if (!Body || isa<NullStmt>(Body))
1073 return true;
1074 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1075 return Compound->body_empty();
1076 return false;
1077}
1078
1080 ArrayRef<const Attr *> WhileAttrs) {
1081 // Emit the header for the loop, which will also become
1082 // the continue target.
1083 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1084 EmitBlock(LoopHeader.getBlock());
1085
1086 if (CGM.shouldEmitConvergenceTokens())
1087 ConvergenceTokenStack.push_back(
1088 emitConvergenceLoopToken(LoopHeader.getBlock()));
1089
1090 // Create an exit block for when the condition fails, which will
1091 // also become the break target.
1093
1094 // Store the blocks to use for break and continue.
1095 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopHeader));
1096
1097 // C++ [stmt.while]p2:
1098 // When the condition of a while statement is a declaration, the
1099 // scope of the variable that is declared extends from its point
1100 // of declaration (3.3.2) to the end of the while statement.
1101 // [...]
1102 // The object created in a condition is destroyed and created
1103 // with each iteration of the loop.
1104 RunCleanupsScope ConditionScope(*this);
1105
1106 if (S.getConditionVariable())
1108
1109 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1110 // evaluation of the controlling expression takes place before each
1111 // execution of the loop body.
1112 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1113
1115
1116 // while(1) is common, avoid extra exit blocks. Be sure
1117 // to correctly handle break/continue though.
1118 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1119 bool EmitBoolCondBranch = !C || !C->isOne();
1120 const SourceRange &R = S.getSourceRange();
1121 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1122 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1125
1126 // When single byte coverage mode is enabled, add a counter to loop condition.
1129
1130 // As long as the condition is true, go to the loop body.
1131 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1132 if (EmitBoolCondBranch) {
1133 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1134 if (ConditionScope.requiresCleanups())
1135 ExitBlock = createBasicBlock("while.exit");
1136 llvm::MDNode *Weights =
1137 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1138 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1139 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1140 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1141 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1142 // Key Instructions: Emit the condition and branch as separate source
1143 // location atoms otherwise we may omit a step onto the loop condition in
1144 // favour of the `while` keyword.
1145 // FIXME: We could have the branch as the backup location for the condition,
1146 // which would probably be a better experience. Explore this later.
1147 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1148 addInstToNewSourceAtom(CondI, nullptr);
1149 addInstToNewSourceAtom(I, nullptr);
1150
1151 if (ExitBlock != LoopExit.getBlock()) {
1152 EmitBlock(ExitBlock);
1154 }
1155 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1156 CGM.getDiags().Report(A->getLocation(),
1157 diag::warn_attribute_has_no_effect_on_infinite_loop)
1158 << A << A->getRange();
1159 CGM.getDiags().Report(
1160 S.getWhileLoc(),
1161 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1163 }
1164
1165 // Emit the loop body. We have to emit this in a cleanup scope
1166 // because it might be a singleton DeclStmt.
1167 {
1168 RunCleanupsScope BodyScope(*this);
1169 EmitBlock(LoopBody);
1170 // When single byte coverage mode is enabled, add a counter to the body.
1173 else
1175 EmitStmt(S.getBody());
1176 }
1177
1178 BreakContinueStack.pop_back();
1179
1180 // Immediately force cleanup.
1181 ConditionScope.ForceCleanup();
1182
1183 EmitStopPoint(&S);
1184 // Branch to the loop header again.
1185 EmitBranch(LoopHeader.getBlock());
1186
1187 LoopStack.pop();
1188
1189 // Emit the exit block.
1190 EmitBlock(LoopExit.getBlock(), true);
1191
1192 // The LoopHeader typically is just a branch if we skipped emitting
1193 // a branch, try to erase it.
1194 if (!EmitBoolCondBranch)
1195 SimplifyForwardingBlocks(LoopHeader.getBlock());
1196
1197 // When single byte coverage mode is enabled, add a counter to continuation
1198 // block.
1201
1202 if (CGM.shouldEmitConvergenceTokens())
1203 ConvergenceTokenStack.pop_back();
1204}
1205
1207 ArrayRef<const Attr *> DoAttrs) {
1209 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1210
1211 uint64_t ParentCount = getCurrentProfileCount();
1212
1213 // Store the blocks to use for break and continue.
1214 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopCond));
1215
1216 // Emit the body of the loop.
1217 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1218
1220 EmitBlockWithFallThrough(LoopBody, S.getBody());
1221 else
1222 EmitBlockWithFallThrough(LoopBody, &S);
1223
1224 if (CGM.shouldEmitConvergenceTokens())
1225 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1226
1227 {
1228 RunCleanupsScope BodyScope(*this);
1229 EmitStmt(S.getBody());
1230 }
1231
1232 EmitBlock(LoopCond.getBlock());
1233 // When single byte coverage mode is enabled, add a counter to loop condition.
1236
1237 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1238 // after each execution of the loop body."
1239
1240 // Evaluate the conditional in the while header.
1241 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1242 // compares unequal to 0. The condition must be a scalar type.
1243 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1244
1245 BreakContinueStack.pop_back();
1246
1247 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1248 // to correctly handle break/continue though.
1249 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1250 bool EmitBoolCondBranch = !C || !C->isZero();
1251
1252 const SourceRange &R = S.getSourceRange();
1253 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1257
1258 // As long as the condition is true, iterate the loop.
1259 if (EmitBoolCondBranch) {
1260 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1261 auto *I = Builder.CreateCondBr(
1262 BoolCondVal, LoopBody, LoopExit.getBlock(),
1263 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1264
1265 // Key Instructions: Emit the condition and branch as separate source
1266 // location atoms otherwise we may omit a step onto the loop condition in
1267 // favour of the closing brace.
1268 // FIXME: We could have the branch as the backup location for the condition,
1269 // which would probably be a better experience (no jumping to the brace).
1270 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1271 addInstToNewSourceAtom(CondI, nullptr);
1272 addInstToNewSourceAtom(I, nullptr);
1273 }
1274
1275 LoopStack.pop();
1276
1277 // Emit the exit block.
1278 EmitBlock(LoopExit.getBlock());
1279
1280 // The DoCond block typically is just a branch if we skipped
1281 // emitting a branch, try to erase it.
1282 if (!EmitBoolCondBranch)
1284
1285 // When single byte coverage mode is enabled, add a counter to continuation
1286 // block.
1289
1290 if (CGM.shouldEmitConvergenceTokens())
1291 ConvergenceTokenStack.pop_back();
1292}
1293
1295 ArrayRef<const Attr *> ForAttrs) {
1297
1298 std::optional<LexicalScope> ForScope;
1300 ForScope.emplace(*this, S.getSourceRange());
1301
1302 // Evaluate the first part before the loop.
1303 if (S.getInit())
1304 EmitStmt(S.getInit());
1305
1306 // Start the loop with a block that tests the condition.
1307 // If there's an increment, the continue scope will be overwritten
1308 // later.
1309 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1310 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1311 EmitBlock(CondBlock);
1312
1313 if (CGM.shouldEmitConvergenceTokens())
1314 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1315
1316 const SourceRange &R = S.getSourceRange();
1317 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1321
1322 // Create a cleanup scope for the condition variable cleanups.
1323 LexicalScope ConditionScope(*this, S.getSourceRange());
1324
1325 // If the for loop doesn't have an increment we can just use the condition as
1326 // the continue block. Otherwise, if there is no condition variable, we can
1327 // form the continue block now. If there is a condition variable, we can't
1328 // form the continue block until after we've emitted the condition, because
1329 // the condition is in scope in the increment, but Sema's jump diagnostics
1330 // ensure that there are no continues from the condition variable that jump
1331 // to the loop increment.
1332 JumpDest Continue;
1333 if (!S.getInc())
1334 Continue = CondDest;
1335 else if (!S.getConditionVariable())
1336 Continue = getJumpDestInCurrentScope("for.inc");
1337 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1338
1339 if (S.getCond()) {
1340 // If the for statement has a condition scope, emit the local variable
1341 // declaration.
1342 if (S.getConditionVariable()) {
1344
1345 // We have entered the condition variable's scope, so we're now able to
1346 // jump to the continue block.
1347 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1348 BreakContinueStack.back().ContinueBlock = Continue;
1349 }
1350
1351 // When single byte coverage mode is enabled, add a counter to loop
1352 // condition.
1355
1356 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1357 // If there are any cleanups between here and the loop-exit scope,
1358 // create a block to stage a loop exit along.
1359 if (ForScope && ForScope->requiresCleanups())
1360 ExitBlock = createBasicBlock("for.cond.cleanup");
1361
1362 // As long as the condition is true, iterate the loop.
1363 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1364
1365 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1366 // compares unequal to 0. The condition must be a scalar type.
1367 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1368
1370
1371 llvm::MDNode *Weights =
1372 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1373 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1374 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1375 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1376
1377 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1378 // Key Instructions: Emit the condition and branch as separate atoms to
1379 // match existing loop stepping behaviour. FIXME: We could have the branch
1380 // as the backup location for the condition, which would probably be a
1381 // better experience (no jumping to the brace).
1382 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1383 addInstToNewSourceAtom(CondI, nullptr);
1384 addInstToNewSourceAtom(I, nullptr);
1385
1386 if (ExitBlock != LoopExit.getBlock()) {
1387 EmitBlock(ExitBlock);
1389 }
1390
1391 EmitBlock(ForBody);
1392 } else {
1393 // Treat it as a non-zero constant. Don't even create a new block for the
1394 // body, just fall into it.
1395 }
1396
1397 // When single byte coverage mode is enabled, add a counter to the body.
1400 else
1402 {
1403 // Create a separate cleanup scope for the body, in case it is not
1404 // a compound statement.
1405 RunCleanupsScope BodyScope(*this);
1406 EmitStmt(S.getBody());
1407 }
1408
1409 // The last block in the loop's body (which unconditionally branches to the
1410 // `inc` block if there is one).
1411 auto *FinalBodyBB = Builder.GetInsertBlock();
1412
1413 // If there is an increment, emit it next.
1414 if (S.getInc()) {
1415 EmitBlock(Continue.getBlock());
1416 EmitStmt(S.getInc());
1419 }
1420
1421 BreakContinueStack.pop_back();
1422
1423 ConditionScope.ForceCleanup();
1424
1425 EmitStopPoint(&S);
1426 EmitBranch(CondBlock);
1427
1428 if (ForScope)
1429 ForScope->ForceCleanup();
1430
1431 LoopStack.pop();
1432
1433 // Emit the fall-through block.
1434 EmitBlock(LoopExit.getBlock(), true);
1435
1436 // When single byte coverage mode is enabled, add a counter to continuation
1437 // block.
1440
1441 if (CGM.shouldEmitConvergenceTokens())
1442 ConvergenceTokenStack.pop_back();
1443
1444 if (FinalBodyBB) {
1445 // Key Instructions: We want the for closing brace to be step-able on to
1446 // match existing behaviour.
1447 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1448 }
1449}
1450
1451void
1453 ArrayRef<const Attr *> ForAttrs) {
1455
1456 LexicalScope ForScope(*this, S.getSourceRange());
1457
1458 // Evaluate the first pieces before the loop.
1459 if (S.getInit())
1460 EmitStmt(S.getInit());
1463 EmitStmt(S.getEndStmt());
1464
1465 // Start the loop with a block that tests the condition.
1466 // If there's an increment, the continue scope will be overwritten
1467 // later.
1468 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1469 EmitBlock(CondBlock);
1470
1471 if (CGM.shouldEmitConvergenceTokens())
1472 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1473
1474 const SourceRange &R = S.getSourceRange();
1475 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1478
1479 // If there are any cleanups between here and the loop-exit scope,
1480 // create a block to stage a loop exit along.
1481 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1482 if (ForScope.requiresCleanups())
1483 ExitBlock = createBasicBlock("for.cond.cleanup");
1484
1485 // The loop body, consisting of the specified body and the loop variable.
1486 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1487
1488 // The body is executed if the expression, contextually converted
1489 // to bool, is true.
1490 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1491 llvm::MDNode *Weights =
1492 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1493 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1494 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1495 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1496 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1497 // Key Instructions: Emit the condition and branch as separate atoms to
1498 // match existing loop stepping behaviour. FIXME: We could have the branch as
1499 // the backup location for the condition, which would probably be a better
1500 // experience.
1501 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1502 addInstToNewSourceAtom(CondI, nullptr);
1503 addInstToNewSourceAtom(I, nullptr);
1504
1505 if (ExitBlock != LoopExit.getBlock()) {
1506 EmitBlock(ExitBlock);
1508 }
1509
1510 EmitBlock(ForBody);
1513 else
1515
1516 // Create a block for the increment. In case of a 'continue', we jump there.
1517 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1518
1519 // Store the blocks to use for break and continue.
1520 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1521
1522 {
1523 // Create a separate cleanup scope for the loop variable and body.
1524 LexicalScope BodyScope(*this, S.getSourceRange());
1526 EmitStmt(S.getBody());
1527 }
1528 // The last block in the loop's body (which unconditionally branches to the
1529 // `inc` block if there is one).
1530 auto *FinalBodyBB = Builder.GetInsertBlock();
1531
1532 EmitStopPoint(&S);
1533 // If there is an increment, emit it next.
1534 EmitBlock(Continue.getBlock());
1535 EmitStmt(S.getInc());
1536
1537 BreakContinueStack.pop_back();
1538
1539 EmitBranch(CondBlock);
1540
1541 ForScope.ForceCleanup();
1542
1543 LoopStack.pop();
1544
1545 // Emit the fall-through block.
1546 EmitBlock(LoopExit.getBlock(), true);
1547
1548 // When single byte coverage mode is enabled, add a counter to continuation
1549 // block.
1552
1553 if (CGM.shouldEmitConvergenceTokens())
1554 ConvergenceTokenStack.pop_back();
1555
1556 if (FinalBodyBB) {
1557 // We want the for closing brace to be step-able on to match existing
1558 // behaviour.
1559 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1560 }
1561}
1562
1563void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1564 if (RV.isScalar()) {
1565 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1566 } else if (RV.isAggregate()) {
1567 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1570 } else {
1572 /*init*/ true);
1573 }
1575}
1576
1577namespace {
1578// RAII struct used to save and restore a return statment's result expression.
1579struct SaveRetExprRAII {
1580 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1581 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1582 CGF.RetExpr = RetExpr;
1583 }
1584 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1585 const Expr *OldRetExpr;
1586 CodeGenFunction &CGF;
1587};
1588} // namespace
1589
1590/// Determine if the given call uses the swiftasync calling convention.
1591static bool isSwiftAsyncCallee(const CallExpr *CE) {
1592 auto calleeQualType = CE->getCallee()->getType();
1593 const FunctionType *calleeType = nullptr;
1594 if (calleeQualType->isFunctionPointerType() ||
1595 calleeQualType->isFunctionReferenceType() ||
1596 calleeQualType->isBlockPointerType() ||
1597 calleeQualType->isMemberFunctionPointerType()) {
1598 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1599 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1600 calleeType = ty;
1601 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1602 if (auto methodDecl = CMCE->getMethodDecl()) {
1603 // getMethodDecl() doesn't handle member pointers at the moment.
1604 calleeType = methodDecl->getType()->castAs<FunctionType>();
1605 } else {
1606 return false;
1607 }
1608 } else {
1609 return false;
1610 }
1611 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1612}
1613
1614/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1615/// if the function returns void, or may be missing one if the function returns
1616/// non-void. Fun stuff :).
1619 if (requiresReturnValueCheck()) {
1620 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1621 auto *SLocPtr =
1622 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1623 llvm::GlobalVariable::PrivateLinkage, SLoc);
1624 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1625 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1626 assert(ReturnLocation.isValid() && "No valid return location");
1627 Builder.CreateStore(SLocPtr, ReturnLocation);
1628 }
1629
1630 // Returning from an outlined SEH helper is UB, and we already warn on it.
1631 if (IsOutlinedSEHHelper) {
1632 Builder.CreateUnreachable();
1633 Builder.ClearInsertionPoint();
1634 }
1635
1636 // Emit the result value, even if unused, to evaluate the side effects.
1637 const Expr *RV = S.getRetValue();
1638
1639 // Record the result expression of the return statement. The recorded
1640 // expression is used to determine whether a block capture's lifetime should
1641 // end at the end of the full expression as opposed to the end of the scope
1642 // enclosing the block expression.
1643 //
1644 // This permits a small, easily-implemented exception to our over-conservative
1645 // rules about not jumping to statements following block literals with
1646 // non-trivial cleanups.
1647 SaveRetExprRAII SaveRetExpr(RV, *this);
1648
1649 RunCleanupsScope cleanupScope(*this);
1650 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1651 RV = EWC->getSubExpr();
1652
1653 // If we're in a swiftasynccall function, and the return expression is a
1654 // call to a swiftasynccall function, mark the call as the musttail call.
1655 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1656 if (RV && CurFnInfo &&
1657 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1658 if (auto CE = dyn_cast<CallExpr>(RV)) {
1659 if (isSwiftAsyncCallee(CE)) {
1660 SaveMustTail.emplace(MustTailCall, CE);
1661 }
1662 }
1663 }
1664
1665 // FIXME: Clean this up by using an LValue for ReturnTemp,
1666 // EmitStoreThroughLValue, and EmitAnyExpr.
1667 // Check if the NRVO candidate was not globalized in OpenMP mode.
1668 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1670 (!getLangOpts().OpenMP ||
1671 !CGM.getOpenMPRuntime()
1672 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1673 .isValid())) {
1674 // Apply the named return value optimization for this return statement,
1675 // which means doing nothing: the appropriate result has already been
1676 // constructed into the NRVO variable.
1677
1678 // If there is an NRVO flag for this variable, set it to 1 into indicate
1679 // that the cleanup code should not destroy the variable.
1680 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1681 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1682 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1683 // Make sure not to return anything, but evaluate the expression
1684 // for side effects.
1685 if (RV) {
1686 EmitAnyExpr(RV);
1687 }
1688 } else if (!RV) {
1689 // Do nothing (return value is left uninitialized)
1690 } else if (FnRetTy->isReferenceType()) {
1691 // If this function returns a reference, take the address of the expression
1692 // rather than the value.
1694 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1695 addInstToCurrentSourceAtom(I, I->getValueOperand());
1696 } else {
1697 switch (getEvaluationKind(RV->getType())) {
1698 case TEK_Scalar: {
1699 llvm::Value *Ret = EmitScalarExpr(RV);
1700 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1702 /*isInit*/ true);
1703 } else {
1704 auto *I = Builder.CreateStore(Ret, ReturnValue);
1705 addInstToCurrentSourceAtom(I, I->getValueOperand());
1706 }
1707 break;
1708 }
1709 case TEK_Complex:
1711 /*isInit*/ true);
1712 break;
1713 case TEK_Aggregate:
1720 break;
1721 }
1722 }
1723
1724 ++NumReturnExprs;
1725 if (!RV || RV->isEvaluatable(getContext()))
1726 ++NumSimpleReturnExprs;
1727
1728 cleanupScope.ForceCleanup();
1730}
1731
1733 // As long as debug info is modeled with instructions, we have to ensure we
1734 // have a place to insert here and write the stop point here.
1735 if (HaveInsertPoint())
1736 EmitStopPoint(&S);
1737
1738 for (const auto *I : S.decls())
1739 EmitDecl(*I, /*EvaluateConditionDecl=*/true);
1740}
1741
1743 -> const BreakContinue * {
1744 if (!S.hasLabelTarget())
1745 return &BreakContinueStack.back();
1746
1747 const Stmt *LoopOrSwitch = S.getNamedLoopOrSwitch();
1748 assert(LoopOrSwitch && "break/continue target not set?");
1749 for (const BreakContinue &BC : llvm::reverse(BreakContinueStack))
1750 if (BC.LoopOrSwitch == LoopOrSwitch)
1751 return &BC;
1752
1753 llvm_unreachable("break/continue target not found");
1754}
1755
1757 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1758
1759 // If this code is reachable then emit a stop point (if generating
1760 // debug info). We have to do this ourselves because we are on the
1761 // "simple" statement path.
1762 if (HaveInsertPoint())
1763 EmitStopPoint(&S);
1764
1767}
1768
1770 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1771
1772 // If this code is reachable then emit a stop point (if generating
1773 // debug info). We have to do this ourselves because we are on the
1774 // "simple" statement path.
1775 if (HaveInsertPoint())
1776 EmitStopPoint(&S);
1777
1780}
1781
1782/// EmitCaseStmtRange - If case statement range is not too big then
1783/// add multiple cases to switch instruction, one for each value within
1784/// the range. If range is too big then emit "if" condition check.
1786 ArrayRef<const Attr *> Attrs) {
1787 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1788
1789 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1790 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1791
1792 // Emit the code for this case. We do this first to make sure it is
1793 // properly chained from our predecessor before generating the
1794 // switch machinery to enter this block.
1795 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1796 EmitBlockWithFallThrough(CaseDest, &S);
1797 EmitStmt(S.getSubStmt());
1798
1799 // If range is empty, do nothing.
1800 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1801 return;
1802
1804 llvm::APInt Range = RHS - LHS;
1805 // FIXME: parameters such as this should not be hardcoded.
1806 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1807 // Range is small enough to add multiple switch instruction cases.
1808 uint64_t Total = getProfileCount(&S);
1809 unsigned NCases = Range.getZExtValue() + 1;
1810 // We only have one region counter for the entire set of cases here, so we
1811 // need to divide the weights evenly between the generated cases, ensuring
1812 // that the total weight is preserved. E.g., a weight of 5 over three cases
1813 // will be distributed as weights of 2, 2, and 1.
1814 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1815 for (unsigned I = 0; I != NCases; ++I) {
1816 if (SwitchWeights)
1817 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1818 else if (SwitchLikelihood)
1819 SwitchLikelihood->push_back(LH);
1820
1821 if (Rem)
1822 Rem--;
1823 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1824 ++LHS;
1825 }
1826 return;
1827 }
1828
1829 // The range is too big. Emit "if" condition into a new block,
1830 // making sure to save and restore the current insertion point.
1831 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1832
1833 // Push this test onto the chain of range checks (which terminates
1834 // in the default basic block). The switch's default will be changed
1835 // to the top of this chain after switch emission is complete.
1836 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1837 CaseRangeBlock = createBasicBlock("sw.caserange");
1838
1839 CurFn->insert(CurFn->end(), CaseRangeBlock);
1840 Builder.SetInsertPoint(CaseRangeBlock);
1841
1842 // Emit range check.
1843 llvm::Value *Diff =
1844 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1845 llvm::Value *Cond =
1846 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1847
1848 llvm::MDNode *Weights = nullptr;
1849 if (SwitchWeights) {
1850 uint64_t ThisCount = getProfileCount(&S);
1851 uint64_t DefaultCount = (*SwitchWeights)[0];
1852 Weights = createProfileWeights(ThisCount, DefaultCount);
1853
1854 // Since we're chaining the switch default through each large case range, we
1855 // need to update the weight for the default, ie, the first case, to include
1856 // this case.
1857 (*SwitchWeights)[0] += ThisCount;
1858 } else if (SwitchLikelihood)
1859 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1860
1861 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1862
1863 // Restore the appropriate insertion point.
1864 if (RestoreBB)
1865 Builder.SetInsertPoint(RestoreBB);
1866 else
1867 Builder.ClearInsertionPoint();
1868}
1869
1871 ArrayRef<const Attr *> Attrs) {
1872 // If there is no enclosing switch instance that we're aware of, then this
1873 // case statement and its block can be elided. This situation only happens
1874 // when we've constant-folded the switch, are emitting the constant case,
1875 // and part of the constant case includes another case statement. For
1876 // instance: switch (4) { case 4: do { case 5: } while (1); }
1877 if (!SwitchInsn) {
1878 EmitStmt(S.getSubStmt());
1879 return;
1880 }
1881
1882 // Handle case ranges.
1883 if (S.getRHS()) {
1884 EmitCaseStmtRange(S, Attrs);
1885 return;
1886 }
1887
1888 llvm::ConstantInt *CaseVal =
1890
1891 // Emit debuginfo for the case value if it is an enum value.
1892 const ConstantExpr *CE;
1893 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1894 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1895 else
1896 CE = dyn_cast<ConstantExpr>(S.getLHS());
1897 if (CE) {
1898 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1899 if (CGDebugInfo *Dbg = getDebugInfo())
1900 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1901 Dbg->EmitGlobalVariable(DE->getDecl(),
1902 APValue(llvm::APSInt(CaseVal->getValue())));
1903 }
1904
1905 if (SwitchLikelihood)
1906 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1907
1908 // If the body of the case is just a 'break', try to not emit an empty block.
1909 // If we're profiling or we're not optimizing, leave the block in for better
1910 // debug and coverage analysis.
1911 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1912 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1914 JumpDest Block = BreakContinueStack.back().BreakBlock;
1915
1916 // Only do this optimization if there are no cleanups that need emitting.
1918 if (SwitchWeights)
1919 SwitchWeights->push_back(getProfileCount(&S));
1920 SwitchInsn->addCase(CaseVal, Block.getBlock());
1921
1922 // If there was a fallthrough into this case, make sure to redirect it to
1923 // the end of the switch as well.
1924 if (Builder.GetInsertBlock()) {
1925 Builder.CreateBr(Block.getBlock());
1926 Builder.ClearInsertionPoint();
1927 }
1928 return;
1929 }
1930 }
1931
1932 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1933 EmitBlockWithFallThrough(CaseDest, &S);
1934 if (SwitchWeights)
1935 SwitchWeights->push_back(getProfileCount(&S));
1936 SwitchInsn->addCase(CaseVal, CaseDest);
1937
1938 // Recursively emitting the statement is acceptable, but is not wonderful for
1939 // code where we have many case statements nested together, i.e.:
1940 // case 1:
1941 // case 2:
1942 // case 3: etc.
1943 // Handling this recursively will create a new block for each case statement
1944 // that falls through to the next case which is IR intensive. It also causes
1945 // deep recursion which can run into stack depth limitations. Handle
1946 // sequential non-range case statements specially.
1947 //
1948 // TODO When the next case has a likelihood attribute the code returns to the
1949 // recursive algorithm. Maybe improve this case if it becomes common practice
1950 // to use a lot of attributes.
1951 const CaseStmt *CurCase = &S;
1952 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1953
1954 // Otherwise, iteratively add consecutive cases to this switch stmt.
1955 while (NextCase && NextCase->getRHS() == nullptr) {
1956 CurCase = NextCase;
1957 llvm::ConstantInt *CaseVal =
1958 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1959
1960 if (SwitchWeights)
1961 SwitchWeights->push_back(getProfileCount(NextCase));
1962 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1963 CaseDest = createBasicBlock("sw.bb");
1964 EmitBlockWithFallThrough(CaseDest, CurCase);
1965 }
1966 // Since this loop is only executed when the CaseStmt has no attributes
1967 // use a hard-coded value.
1968 if (SwitchLikelihood)
1969 SwitchLikelihood->push_back(Stmt::LH_None);
1970
1971 SwitchInsn->addCase(CaseVal, CaseDest);
1972 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1973 }
1974
1975 // Generate a stop point for debug info if the case statement is
1976 // followed by a default statement. A fallthrough case before a
1977 // default case gets its own branch target.
1978 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1979 EmitStopPoint(CurCase);
1980
1981 // Normal default recursion for non-cases.
1982 EmitStmt(CurCase->getSubStmt());
1983}
1984
1986 ArrayRef<const Attr *> Attrs) {
1987 // If there is no enclosing switch instance that we're aware of, then this
1988 // default statement can be elided. This situation only happens when we've
1989 // constant-folded the switch.
1990 if (!SwitchInsn) {
1991 EmitStmt(S.getSubStmt());
1992 return;
1993 }
1994
1995 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1996 assert(DefaultBlock->empty() &&
1997 "EmitDefaultStmt: Default block already defined?");
1998
1999 if (SwitchLikelihood)
2000 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
2001
2002 EmitBlockWithFallThrough(DefaultBlock, &S);
2003
2004 EmitStmt(S.getSubStmt());
2005}
2006
2007namespace {
2008struct EmitDeferredStatement final : EHScopeStack::Cleanup {
2009 const DeferStmt &Stmt;
2010 EmitDeferredStatement(const DeferStmt *Stmt) : Stmt(*Stmt) {}
2011
2012 void Emit(CodeGenFunction &CGF, Flags) override {
2013 // Take care that any cleanups pushed by the body of a '_Defer' statement
2014 // don't clobber the current cleanup slot value.
2015 //
2016 // Assume we have a scope that pushes a cleanup; when that scope is exited,
2017 // we need to run that cleanup; this is accomplished by emitting the cleanup
2018 // into a separate block and then branching to that block at scope exit.
2019 //
2020 // Where this gets complicated is if we exit the scope in multiple different
2021 // ways; e.g. in a 'for' loop, we may exit the scope of its body by falling
2022 // off the end (in which case we need to run the cleanup and then branch to
2023 // the increment), or by 'break'ing out of the loop (in which case we need
2024 // to run the cleanup and then branch to the loop exit block); in both cases
2025 // we first branch to the cleanup block to run the cleanup, but the block we
2026 // need to jump to *after* running the cleanup is different.
2027 //
2028 // This is accomplished using a local integer variable called the 'cleanup
2029 // slot': before branching to the cleanup block, we store a value into that
2030 // slot. Then, in the cleanup block, after running the cleanup, we load the
2031 // value of that variable and 'switch' on it to branch to the appropriate
2032 // continuation block.
2033 //
2034 // The problem that arises once '_Defer' statements are involved is that the
2035 // body of a '_Defer' is an arbitrary statement which itself can create more
2036 // cleanups. This means we may end up overwriting the cleanup slot before we
2037 // ever have a chance to 'switch' on it, which means that once we *do* get
2038 // to the 'switch', we end up in whatever block the cleanup code happened to
2039 // pick as the default 'switch' exit label!
2040 //
2041 // That is, what is normally supposed to happen is something like:
2042 //
2043 // 1. Store 'X' to cleanup slot.
2044 // 2. Branch to cleanup block.
2045 // 3. Execute cleanup.
2046 // 4. Read value from cleanup slot.
2047 // 5. Branch to the block associated with 'X'.
2048 //
2049 // But if we encounter a _Defer' statement that contains a cleanup, then
2050 // what might instead happen is:
2051 //
2052 // 1. Store 'X' to cleanup slot.
2053 // 2. Branch to cleanup block.
2054 // 3. Execute cleanup; this ends up pushing another cleanup, so:
2055 // 3a. Store 'Y' to cleanup slot.
2056 // 3b. Run steps 2–5 recursively.
2057 // 4. Read value from cleanup slot, which is now 'Y' instead of 'X'.
2058 // 5. Branch to the block associated with 'Y'... which doesn't even
2059 // exist because the value 'Y' is only meaningful for the inner
2060 // cleanup. The result is we just branch 'somewhere random'.
2061 //
2062 // The rest of the cleanup code simply isn't prepared to handle this case
2063 // because most other cleanups can't push more cleanups, and thus, emitting
2064 // other cleanups generally cannot clobber the cleanup slot.
2065 //
2066 // To prevent this from happening, save the current cleanup slot value and
2067 // restore it after emitting the '_Defer' statement.
2068 llvm::Value *SavedCleanupDest = nullptr;
2069 if (CGF.NormalCleanupDest.isValid())
2070 SavedCleanupDest =
2071 CGF.Builder.CreateLoad(CGF.NormalCleanupDest, "cleanup.dest.saved");
2072
2073 CGF.EmitStmt(Stmt.getBody());
2074
2075 if (SavedCleanupDest && CGF.HaveInsertPoint())
2076 CGF.Builder.CreateStore(SavedCleanupDest, CGF.NormalCleanupDest);
2077
2078 // Cleanups must end with an insert point.
2079 CGF.EnsureInsertPoint();
2080 }
2081};
2082} // namespace
2083
2085 EHStack.pushCleanup<EmitDeferredStatement>(NormalAndEHCleanup, &S);
2086}
2087
2088/// CollectStatementsForCase - Given the body of a 'switch' statement and a
2089/// constant value that is being switched on, see if we can dead code eliminate
2090/// the body of the switch to a simple series of statements to emit. Basically,
2091/// on a switch (5) we want to find these statements:
2092/// case 5:
2093/// printf(...); <--
2094/// ++i; <--
2095/// break;
2096///
2097/// and add them to the ResultStmts vector. If it is unsafe to do this
2098/// transformation (for example, one of the elided statements contains a label
2099/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
2100/// should include statements after it (e.g. the printf() line is a substmt of
2101/// the case) then return CSFC_FallThrough. If we handled it and found a break
2102/// statement, then return CSFC_Success.
2103///
2104/// If Case is non-null, then we are looking for the specified case, checking
2105/// that nothing we jump over contains labels. If Case is null, then we found
2106/// the case and are looking for the break.
2107///
2108/// If the recursive walk actually finds our Case, then we set FoundCase to
2109/// true.
2110///
2113 const SwitchCase *Case,
2114 bool &FoundCase,
2115 SmallVectorImpl<const Stmt*> &ResultStmts) {
2116 // If this is a null statement, just succeed.
2117 if (!S)
2118 return Case ? CSFC_Success : CSFC_FallThrough;
2119
2120 // If this is the switchcase (case 4: or default) that we're looking for, then
2121 // we're in business. Just add the substatement.
2122 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
2123 if (S == Case) {
2124 FoundCase = true;
2125 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
2126 ResultStmts);
2127 }
2128
2129 // Otherwise, this is some other case or default statement, just ignore it.
2130 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
2131 ResultStmts);
2132 }
2133
2134 // If we are in the live part of the code and we found our break statement,
2135 // return a success!
2136 if (!Case && isa<BreakStmt>(S))
2137 return CSFC_Success;
2138
2139 // If this is a switch statement, then it might contain the SwitchCase, the
2140 // break, or neither.
2141 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
2142 // Handle this as two cases: we might be looking for the SwitchCase (if so
2143 // the skipped statements must be skippable) or we might already have it.
2144 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2145 bool StartedInLiveCode = FoundCase;
2146 unsigned StartSize = ResultStmts.size();
2147
2148 // If we've not found the case yet, scan through looking for it.
2149 if (Case) {
2150 // Keep track of whether we see a skipped declaration. The code could be
2151 // using the declaration even if it is skipped, so we can't optimize out
2152 // the decl if the kept statements might refer to it.
2153 bool HadSkippedDecl = false;
2154
2155 // If we're looking for the case, just see if we can skip each of the
2156 // substatements.
2157 for (; Case && I != E; ++I) {
2158 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
2159
2160 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
2161 case CSFC_Failure: return CSFC_Failure;
2162 case CSFC_Success:
2163 // A successful result means that either 1) that the statement doesn't
2164 // have the case and is skippable, or 2) does contain the case value
2165 // and also contains the break to exit the switch. In the later case,
2166 // we just verify the rest of the statements are elidable.
2167 if (FoundCase) {
2168 // If we found the case and skipped declarations, we can't do the
2169 // optimization.
2170 if (HadSkippedDecl)
2171 return CSFC_Failure;
2172
2173 for (++I; I != E; ++I)
2174 if (CodeGenFunction::ContainsLabel(*I, true))
2175 return CSFC_Failure;
2176 return CSFC_Success;
2177 }
2178 break;
2179 case CSFC_FallThrough:
2180 // If we have a fallthrough condition, then we must have found the
2181 // case started to include statements. Consider the rest of the
2182 // statements in the compound statement as candidates for inclusion.
2183 assert(FoundCase && "Didn't find case but returned fallthrough?");
2184 // We recursively found Case, so we're not looking for it anymore.
2185 Case = nullptr;
2186
2187 // If we found the case and skipped declarations, we can't do the
2188 // optimization.
2189 if (HadSkippedDecl)
2190 return CSFC_Failure;
2191 break;
2192 }
2193 }
2194
2195 if (!FoundCase)
2196 return CSFC_Success;
2197
2198 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2199 }
2200
2201 // If we have statements in our range, then we know that the statements are
2202 // live and need to be added to the set of statements we're tracking.
2203 bool AnyDecls = false;
2204 for (; I != E; ++I) {
2206
2207 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2208 case CSFC_Failure: return CSFC_Failure;
2209 case CSFC_FallThrough:
2210 // A fallthrough result means that the statement was simple and just
2211 // included in ResultStmt, keep adding them afterwards.
2212 break;
2213 case CSFC_Success:
2214 // A successful result means that we found the break statement and
2215 // stopped statement inclusion. We just ensure that any leftover stmts
2216 // are skippable and return success ourselves.
2217 for (++I; I != E; ++I)
2218 if (CodeGenFunction::ContainsLabel(*I, true))
2219 return CSFC_Failure;
2220 return CSFC_Success;
2221 }
2222 }
2223
2224 // If we're about to fall out of a scope without hitting a 'break;', we
2225 // can't perform the optimization if there were any decls in that scope
2226 // (we'd lose their end-of-lifetime).
2227 if (AnyDecls) {
2228 // If the entire compound statement was live, there's one more thing we
2229 // can try before giving up: emit the whole thing as a single statement.
2230 // We can do that unless the statement contains a 'break;'.
2231 // FIXME: Such a break must be at the end of a construct within this one.
2232 // We could emit this by just ignoring the BreakStmts entirely.
2233 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2234 ResultStmts.resize(StartSize);
2235 ResultStmts.push_back(S);
2236 } else {
2237 return CSFC_Failure;
2238 }
2239 }
2240
2241 return CSFC_FallThrough;
2242 }
2243
2244 // Okay, this is some other statement that we don't handle explicitly, like a
2245 // for statement or increment etc. If we are skipping over this statement,
2246 // just verify it doesn't have labels, which would make it invalid to elide.
2247 if (Case) {
2248 if (CodeGenFunction::ContainsLabel(S, true))
2249 return CSFC_Failure;
2250 return CSFC_Success;
2251 }
2252
2253 // Otherwise, we want to include this statement. Everything is cool with that
2254 // so long as it doesn't contain a break out of the switch we're in.
2256
2257 // Otherwise, everything is great. Include the statement and tell the caller
2258 // that we fall through and include the next statement as well.
2259 ResultStmts.push_back(S);
2260 return CSFC_FallThrough;
2261}
2262
2263/// FindCaseStatementsForValue - Find the case statement being jumped to and
2264/// then invoke CollectStatementsForCase to find the list of statements to emit
2265/// for a switch on constant. See the comment above CollectStatementsForCase
2266/// for more details.
2268 const llvm::APSInt &ConstantCondValue,
2269 SmallVectorImpl<const Stmt*> &ResultStmts,
2270 ASTContext &C,
2271 const SwitchCase *&ResultCase) {
2272 // First step, find the switch case that is being branched to. We can do this
2273 // efficiently by scanning the SwitchCase list.
2274 const SwitchCase *Case = S.getSwitchCaseList();
2275 const DefaultStmt *DefaultCase = nullptr;
2276
2277 for (; Case; Case = Case->getNextSwitchCase()) {
2278 // It's either a default or case. Just remember the default statement in
2279 // case we're not jumping to any numbered cases.
2280 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2281 DefaultCase = DS;
2282 continue;
2283 }
2284
2285 // Check to see if this case is the one we're looking for.
2286 const CaseStmt *CS = cast<CaseStmt>(Case);
2287 // Don't handle case ranges yet.
2288 if (CS->getRHS()) return false;
2289
2290 // If we found our case, remember it as 'case'.
2291 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2292 break;
2293 }
2294
2295 // If we didn't find a matching case, we use a default if it exists, or we
2296 // elide the whole switch body!
2297 if (!Case) {
2298 // It is safe to elide the body of the switch if it doesn't contain labels
2299 // etc. If it is safe, return successfully with an empty ResultStmts list.
2300 if (!DefaultCase)
2302 Case = DefaultCase;
2303 }
2304
2305 // Ok, we know which case is being jumped to, try to collect all the
2306 // statements that follow it. This can fail for a variety of reasons. Also,
2307 // check to see that the recursive walk actually found our case statement.
2308 // Insane cases like this can fail to find it in the recursive walk since we
2309 // don't handle every stmt kind:
2310 // switch (4) {
2311 // while (1) {
2312 // case 4: ...
2313 bool FoundCase = false;
2314 ResultCase = Case;
2315 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2316 ResultStmts) != CSFC_Failure &&
2317 FoundCase;
2318}
2319
2320static std::optional<SmallVector<uint64_t, 16>>
2322 // Are there enough branches to weight them?
2323 if (Likelihoods.size() <= 1)
2324 return std::nullopt;
2325
2326 uint64_t NumUnlikely = 0;
2327 uint64_t NumNone = 0;
2328 uint64_t NumLikely = 0;
2329 for (const auto LH : Likelihoods) {
2330 switch (LH) {
2331 case Stmt::LH_Unlikely:
2332 ++NumUnlikely;
2333 break;
2334 case Stmt::LH_None:
2335 ++NumNone;
2336 break;
2337 case Stmt::LH_Likely:
2338 ++NumLikely;
2339 break;
2340 }
2341 }
2342
2343 // Is there a likelihood attribute used?
2344 if (NumUnlikely == 0 && NumLikely == 0)
2345 return std::nullopt;
2346
2347 // When multiple cases share the same code they can be combined during
2348 // optimization. In that case the weights of the branch will be the sum of
2349 // the individual weights. Make sure the combined sum of all neutral cases
2350 // doesn't exceed the value of a single likely attribute.
2351 // The additions both avoid divisions by 0 and make sure the weights of None
2352 // don't exceed the weight of Likely.
2353 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2354 const uint64_t None = Likely / (NumNone + 1);
2355 const uint64_t Unlikely = 0;
2356
2358 Result.reserve(Likelihoods.size());
2359 for (const auto LH : Likelihoods) {
2360 switch (LH) {
2361 case Stmt::LH_Unlikely:
2362 Result.push_back(Unlikely);
2363 break;
2364 case Stmt::LH_None:
2365 Result.push_back(None);
2366 break;
2367 case Stmt::LH_Likely:
2368 Result.push_back(Likely);
2369 break;
2370 }
2371 }
2372
2373 return Result;
2374}
2375
2377 // Handle nested switch statements.
2378 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2379 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2380 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2381 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2382
2383 // See if we can constant fold the condition of the switch and therefore only
2384 // emit the live case statement (if any) of the switch.
2385 llvm::APSInt ConstantCondValue;
2386 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2388 const SwitchCase *Case = nullptr;
2389 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2390 getContext(), Case)) {
2391 if (Case)
2393 RunCleanupsScope ExecutedScope(*this);
2394
2395 if (S.getInit())
2396 EmitStmt(S.getInit());
2397
2398 // Emit the condition variable if needed inside the entire cleanup scope
2399 // used by this special case for constant folded switches.
2400 if (S.getConditionVariable())
2401 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2402
2403 // At this point, we are no longer "within" a switch instance, so
2404 // we can temporarily enforce this to ensure that any embedded case
2405 // statements are not emitted.
2406 SwitchInsn = nullptr;
2407
2408 // Okay, we can dead code eliminate everything except this case. Emit the
2409 // specified series of statements and we're good.
2410 for (const Stmt *CaseStmt : CaseStmts)
2413 PGO->markStmtMaybeUsed(S.getBody());
2414
2415 // Now we want to restore the saved switch instance so that nested
2416 // switches continue to function properly
2417 SwitchInsn = SavedSwitchInsn;
2418
2419 return;
2420 }
2421 }
2422
2423 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2424
2425 RunCleanupsScope ConditionScope(*this);
2426
2427 if (S.getInit())
2428 EmitStmt(S.getInit());
2429
2430 if (S.getConditionVariable())
2432 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2434
2435 // Create basic block to hold stuff that comes after switch
2436 // statement. We also need to create a default block now so that
2437 // explicit case ranges tests can have a place to jump to on
2438 // failure.
2439 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2440 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2441 addInstToNewSourceAtom(SwitchInsn, CondV);
2442
2443 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2444 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2445 llvm::ConstantInt *BranchHintConstant =
2447 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2448 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2449 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2450 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"),
2451 MDHelper.createConstant(BranchHintConstant)};
2452 SwitchInsn->setMetadata("hlsl.controlflow.hint",
2453 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2454 }
2455
2456 if (PGO->haveRegionCounts()) {
2457 // Walk the SwitchCase list to find how many there are.
2458 uint64_t DefaultCount = 0;
2459 unsigned NumCases = 0;
2460 for (const SwitchCase *Case = S.getSwitchCaseList();
2461 Case;
2462 Case = Case->getNextSwitchCase()) {
2463 if (isa<DefaultStmt>(Case))
2464 DefaultCount = getProfileCount(Case);
2465 NumCases += 1;
2466 }
2467 SwitchWeights = new SmallVector<uint64_t, 16>();
2468 SwitchWeights->reserve(NumCases);
2469 // The default needs to be first. We store the edge count, so we already
2470 // know the right weight.
2471 SwitchWeights->push_back(DefaultCount);
2472 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2473 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2474 // Initialize the default case.
2475 SwitchLikelihood->push_back(Stmt::LH_None);
2476 }
2477
2478 CaseRangeBlock = DefaultBlock;
2479
2480 // Clear the insertion point to indicate we are in unreachable code.
2481 Builder.ClearInsertionPoint();
2482
2483 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2484 // then reuse last ContinueBlock.
2485 JumpDest OuterContinue;
2486 if (!BreakContinueStack.empty())
2487 OuterContinue = BreakContinueStack.back().ContinueBlock;
2488
2489 BreakContinueStack.push_back(BreakContinue(S, SwitchExit, OuterContinue));
2490
2491 // Emit switch body.
2492 EmitStmt(S.getBody());
2493
2494 BreakContinueStack.pop_back();
2495
2496 // Update the default block in case explicit case range tests have
2497 // been chained on top.
2498 SwitchInsn->setDefaultDest(CaseRangeBlock);
2499
2500 // If a default was never emitted:
2501 if (!DefaultBlock->getParent()) {
2502 // If we have cleanups, emit the default block so that there's a
2503 // place to jump through the cleanups from.
2504 if (ConditionScope.requiresCleanups()) {
2505 EmitBlock(DefaultBlock);
2506
2507 // Otherwise, just forward the default block to the switch end.
2508 } else {
2509 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2510 delete DefaultBlock;
2511 }
2512 }
2513
2514 ConditionScope.ForceCleanup();
2515
2516 // Emit continuation.
2517 EmitBlock(SwitchExit.getBlock(), true);
2519
2520 // If the switch has a condition wrapped by __builtin_unpredictable,
2521 // create metadata that specifies that the switch is unpredictable.
2522 // Don't bother if not optimizing because that metadata would not be used.
2523 auto *Call = dyn_cast<CallExpr>(S.getCond());
2524 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2525 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2526 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2527 llvm::MDBuilder MDHelper(getLLVMContext());
2528 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2529 MDHelper.createUnpredictable());
2530 }
2531 }
2532
2533 if (SwitchWeights) {
2534 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2535 "switch weights do not match switch cases");
2536 // If there's only one jump destination there's no sense weighting it.
2537 if (SwitchWeights->size() > 1)
2538 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2539 createProfileWeights(*SwitchWeights));
2540 delete SwitchWeights;
2541 } else if (SwitchLikelihood) {
2542 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2543 "switch likelihoods do not match switch cases");
2544 std::optional<SmallVector<uint64_t, 16>> LHW =
2545 getLikelihoodWeights(*SwitchLikelihood);
2546 if (LHW) {
2547 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2548 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2549 createProfileWeights(*LHW));
2550 }
2551 delete SwitchLikelihood;
2552 }
2553 SwitchInsn = SavedSwitchInsn;
2554 SwitchWeights = SavedSwitchWeights;
2555 SwitchLikelihood = SavedSwitchLikelihood;
2556 CaseRangeBlock = SavedCRBlock;
2557}
2558
2559std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2560 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2561 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2562 if (Info.allowsRegister() || !Info.allowsMemory()) {
2564 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2565
2566 llvm::Type *Ty = ConvertType(InputType);
2567 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2568 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2569 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2570 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2571
2572 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2573 nullptr};
2574 }
2575 }
2576
2577 Address Addr = InputValue.getAddress();
2578 ConstraintStr += '*';
2579 return {InputValue.getPointer(*this), Addr.getElementType()};
2580}
2581std::pair<llvm::Value *, llvm::Type *>
2582CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2583 const Expr *InputExpr,
2584 std::string &ConstraintStr) {
2585 // If this can't be a register or memory, i.e., has to be a constant
2586 // (immediate or symbolic), try to emit it as such.
2587 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2588 if (Info.requiresImmediateConstant()) {
2589 Expr::EvalResult EVResult;
2590 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2591
2592 llvm::APSInt IntResult;
2593 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2594 getContext()))
2595 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2596 }
2597
2598 Expr::EvalResult Result;
2599 if (InputExpr->EvaluateAsInt(Result, getContext()))
2600 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2601 nullptr};
2602 }
2603
2604 if (Info.allowsRegister() || !Info.allowsMemory())
2606 return {EmitScalarExpr(InputExpr), nullptr};
2607 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2608 return {EmitScalarExpr(InputExpr), nullptr};
2609 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2610 LValue Dest = EmitLValue(InputExpr);
2611 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2612 InputExpr->getExprLoc());
2613}
2614
2615/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2616/// asm call instruction. The !srcloc MDNode contains a list of constant
2617/// integers which are the source locations of the start of each line in the
2618/// asm.
2619static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2620 CodeGenFunction &CGF) {
2622 // Add the location of the first line to the MDNode.
2623 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2624 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2625 StringRef StrVal = Str->getString();
2626 if (!StrVal.empty()) {
2628 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2629 unsigned StartToken = 0;
2630 unsigned ByteOffset = 0;
2631
2632 // Add the location of the start of each subsequent line of the asm to the
2633 // MDNode.
2634 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2635 if (StrVal[i] != '\n') continue;
2636 SourceLocation LineLoc = Str->getLocationOfByte(
2637 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2638 Locs.push_back(llvm::ConstantAsMetadata::get(
2639 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2640 }
2641 }
2642
2643 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2644}
2645
2646static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2647 bool HasUnwindClobber, bool ReadOnly,
2648 bool ReadNone, bool NoMerge, bool NoConvergent,
2649 const AsmStmt &S,
2650 const std::vector<llvm::Type *> &ResultRegTypes,
2651 const std::vector<llvm::Type *> &ArgElemTypes,
2652 CodeGenFunction &CGF,
2653 std::vector<llvm::Value *> &RegResults) {
2654 if (!HasUnwindClobber)
2655 Result.addFnAttr(llvm::Attribute::NoUnwind);
2656
2657 if (NoMerge)
2658 Result.addFnAttr(llvm::Attribute::NoMerge);
2659 // Attach readnone and readonly attributes.
2660 if (!HasSideEffect) {
2661 if (ReadNone)
2662 Result.setDoesNotAccessMemory();
2663 else if (ReadOnly)
2664 Result.setOnlyReadsMemory();
2665 }
2666
2667 // Add elementtype attribute for indirect constraints.
2668 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2669 if (Pair.value()) {
2670 auto Attr = llvm::Attribute::get(
2671 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2672 Result.addParamAttr(Pair.index(), Attr);
2673 }
2674 }
2675
2676 // Slap the source location of the inline asm into a !srcloc metadata on the
2677 // call.
2678 const StringLiteral *SL;
2679 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S);
2680 gccAsmStmt &&
2681 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) {
2682 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF));
2683 } else {
2684 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2685 // strings.
2686 llvm::Constant *Loc =
2687 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2688 Result.setMetadata("srcloc",
2689 llvm::MDNode::get(CGF.getLLVMContext(),
2690 llvm::ConstantAsMetadata::get(Loc)));
2691 }
2692
2693 // Make inline-asm calls Key for the debug info feature Key Instructions.
2694 CGF.addInstToNewSourceAtom(&Result, nullptr);
2695
2696 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2697 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2698 // convergent (meaning, they may call an intrinsically convergent op, such
2699 // as bar.sync, and so can't have certain optimizations applied around
2700 // them) unless it's explicitly marked 'noconvergent'.
2701 Result.addFnAttr(llvm::Attribute::Convergent);
2702 // Extract all of the register value results from the asm.
2703 if (ResultRegTypes.size() == 1) {
2704 RegResults.push_back(&Result);
2705 } else {
2706 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2707 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2708 RegResults.push_back(Tmp);
2709 }
2710 }
2711}
2712
2713static void
2715 const llvm::ArrayRef<llvm::Value *> RegResults,
2716 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2717 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2718 const llvm::ArrayRef<LValue> ResultRegDests,
2719 const llvm::ArrayRef<QualType> ResultRegQualTys,
2720 const llvm::BitVector &ResultTypeRequiresCast,
2721 const std::vector<std::optional<std::pair<unsigned, unsigned>>>
2722 &ResultBounds) {
2723 CGBuilderTy &Builder = CGF.Builder;
2724 CodeGenModule &CGM = CGF.CGM;
2725 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2726
2727 assert(RegResults.size() == ResultRegTypes.size());
2728 assert(RegResults.size() == ResultTruncRegTypes.size());
2729 assert(RegResults.size() == ResultRegDests.size());
2730 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2731 // in which case its size may grow.
2732 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2733 assert(ResultBounds.size() <= ResultRegDests.size());
2734
2735 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2736 llvm::Value *Tmp = RegResults[i];
2737 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2738
2739 if ((i < ResultBounds.size()) && ResultBounds[i].has_value()) {
2740 const auto [LowerBound, UpperBound] = ResultBounds[i].value();
2741 // FIXME: Support for nonzero lower bounds not yet implemented.
2742 assert(LowerBound == 0 && "Output operand lower bound is not zero.");
2743 llvm::Constant *UpperBoundConst =
2744 llvm::ConstantInt::get(Tmp->getType(), UpperBound);
2745 llvm::Value *IsBooleanValue =
2746 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, UpperBoundConst);
2747 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2748 Builder.CreateCall(FnAssume, IsBooleanValue);
2749 }
2750
2751 // If the result type of the LLVM IR asm doesn't match the result type of
2752 // the expression, do the conversion.
2753 if (ResultRegTypes[i] != TruncTy) {
2754
2755 // Truncate the integer result to the right size, note that TruncTy can be
2756 // a pointer.
2757 if (TruncTy->isFloatingPointTy())
2758 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2759 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2760 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2761 Tmp = Builder.CreateTrunc(
2762 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2763 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2764 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2765 uint64_t TmpSize =
2766 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2767 Tmp = Builder.CreatePtrToInt(
2768 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2769 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2770 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2771 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2772 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2773 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2774 }
2775 }
2776
2777 ApplyAtomGroup Grp(CGF.getDebugInfo());
2778 LValue Dest = ResultRegDests[i];
2779 // ResultTypeRequiresCast elements correspond to the first
2780 // ResultTypeRequiresCast.size() elements of RegResults.
2781 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2782 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2783 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2784 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2785 llvm::StoreInst *S = Builder.CreateStore(Tmp, A);
2786 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand());
2787 continue;
2788 }
2789
2790 QualType Ty =
2791 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2792 if (Ty.isNull()) {
2793 const Expr *OutExpr = S.getOutputExpr(i);
2794 CGM.getDiags().Report(OutExpr->getExprLoc(),
2795 diag::err_store_value_to_reg);
2796 return;
2797 }
2798 Dest = CGF.MakeAddrLValue(A, Ty);
2799 }
2800 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2801 }
2802}
2803
2805 const AsmStmt &S) {
2806 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2807
2808 std::string Asm;
2809 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2810 Asm = GCCAsm->getAsmString();
2811
2812 auto &Ctx = CGF->CGM.getLLVMContext();
2813
2814 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2815 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2816 {StrTy->getType()}, false);
2817 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2818
2819 CGF->Builder.CreateCall(UBF, {StrTy});
2820}
2821
2823 // Pop all cleanup blocks at the end of the asm statement.
2824 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2825
2826 // Assemble the final asm string.
2827 std::string AsmString = S.generateAsmString(getContext());
2828
2829 // Get all the output and input constraints together.
2830 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2831 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2832
2833 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2834 bool IsValidTargetAsm = true;
2835 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2836 StringRef Name;
2837 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2838 Name = GAS->getOutputName(i);
2840 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2841 if (IsHipStdPar && !IsValid)
2842 IsValidTargetAsm = false;
2843 else
2844 assert(IsValid && "Failed to parse output constraint");
2845 OutputConstraintInfos.push_back(Info);
2846 }
2847
2848 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2849 StringRef Name;
2850 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2851 Name = GAS->getInputName(i);
2853 bool IsValid =
2854 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2855 if (IsHipStdPar && !IsValid)
2856 IsValidTargetAsm = false;
2857 else
2858 assert(IsValid && "Failed to parse input constraint");
2859 InputConstraintInfos.push_back(Info);
2860 }
2861
2862 if (!IsValidTargetAsm)
2863 return EmitHipStdParUnsupportedAsm(this, S);
2864
2865 std::string Constraints;
2866
2867 std::vector<LValue> ResultRegDests;
2868 std::vector<QualType> ResultRegQualTys;
2869 std::vector<llvm::Type *> ResultRegTypes;
2870 std::vector<llvm::Type *> ResultTruncRegTypes;
2871 std::vector<llvm::Type *> ArgTypes;
2872 std::vector<llvm::Type *> ArgElemTypes;
2873 std::vector<llvm::Value*> Args;
2874 llvm::BitVector ResultTypeRequiresCast;
2875 std::vector<std::optional<std::pair<unsigned, unsigned>>> ResultBounds;
2876
2877 // Keep track of inout constraints.
2878 std::string InOutConstraints;
2879 std::vector<llvm::Value*> InOutArgs;
2880 std::vector<llvm::Type*> InOutArgTypes;
2881 std::vector<llvm::Type*> InOutArgElemTypes;
2882
2883 // Keep track of out constraints for tied input operand.
2884 std::vector<std::string> OutputConstraints;
2885
2886 // Keep track of defined physregs.
2887 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2888
2889 // An inline asm can be marked readonly if it meets the following conditions:
2890 // - it doesn't have any sideeffects
2891 // - it doesn't clobber memory
2892 // - it doesn't return a value by-reference
2893 // It can be marked readnone if it doesn't have any input memory constraints
2894 // in addition to meeting the conditions listed above.
2895 bool ReadOnly = true, ReadNone = true;
2896
2897 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2898 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2899
2900 // Simplify the output constraint.
2901 std::string OutputConstraint(S.getOutputConstraint(i));
2902 OutputConstraint = getTarget().simplifyConstraint(
2903 StringRef(OutputConstraint).substr(1), &OutputConstraintInfos);
2904
2905 const Expr *OutExpr = S.getOutputExpr(i);
2906 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2907
2908 std::string GCCReg;
2909 OutputConstraint = S.addVariableConstraints(
2910 OutputConstraint, *OutExpr, getTarget(), Info.earlyClobber(),
2911 [&](const Stmt *UnspStmt, StringRef Msg) {
2912 CGM.ErrorUnsupported(UnspStmt, Msg);
2913 },
2914 &GCCReg);
2915 // Give an error on multiple outputs to same physreg.
2916 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2917 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2918
2919 OutputConstraints.push_back(OutputConstraint);
2920 LValue Dest = EmitLValue(OutExpr);
2921 if (!Constraints.empty())
2922 Constraints += ',';
2923
2924 // If this is a register output, then make the inline asm return it
2925 // by-value. If this is a memory result, return the value by-reference.
2926 QualType QTy = OutExpr->getType();
2927 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2929 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2930
2931 Constraints += "=" + OutputConstraint;
2932 ResultRegQualTys.push_back(QTy);
2933 ResultRegDests.push_back(Dest);
2934
2935 ResultBounds.emplace_back(Info.getOutputOperandBounds());
2936
2937 llvm::Type *Ty = ConvertTypeForMem(QTy);
2938 const bool RequiresCast = Info.allowsRegister() &&
2940 Ty->isAggregateType());
2941
2942 ResultTruncRegTypes.push_back(Ty);
2943 ResultTypeRequiresCast.push_back(RequiresCast);
2944
2945 if (RequiresCast) {
2946 unsigned Size = getContext().getTypeSize(QTy);
2947 if (Size)
2948 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2949 else
2950 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2951 }
2952 ResultRegTypes.push_back(Ty);
2953 // If this output is tied to an input, and if the input is larger, then
2954 // we need to set the actual result type of the inline asm node to be the
2955 // same as the input type.
2956 if (Info.hasMatchingInput()) {
2957 unsigned InputNo;
2958 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2959 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2960 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2961 break;
2962 }
2963 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2964
2965 QualType InputTy = S.getInputExpr(InputNo)->getType();
2966 QualType OutputType = OutExpr->getType();
2967
2968 uint64_t InputSize = getContext().getTypeSize(InputTy);
2969 if (getContext().getTypeSize(OutputType) < InputSize) {
2970 // Form the asm to return the value as a larger integer or fp type.
2971 ResultRegTypes.back() = ConvertType(InputTy);
2972 }
2973 }
2974 if (llvm::Type* AdjTy =
2975 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2976 ResultRegTypes.back()))
2977 ResultRegTypes.back() = AdjTy;
2978 else {
2979 CGM.getDiags().Report(S.getAsmLoc(),
2980 diag::err_asm_invalid_type_in_input)
2981 << OutExpr->getType() << OutputConstraint;
2982 }
2983
2984 // Update largest vector width for any vector types.
2985 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2986 LargestVectorWidth =
2987 std::max((uint64_t)LargestVectorWidth,
2988 VT->getPrimitiveSizeInBits().getKnownMinValue());
2989 } else {
2990 Address DestAddr = Dest.getAddress();
2991 // Matrix types in memory are represented by arrays, but accessed through
2992 // vector pointers, with the alignment specified on the access operation.
2993 // For inline assembly, update pointer arguments to use vector pointers.
2994 // Otherwise there will be a mis-match if the matrix is also an
2995 // input-argument which is represented as vector.
2996 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2997 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2998
2999 ArgTypes.push_back(DestAddr.getType());
3000 ArgElemTypes.push_back(DestAddr.getElementType());
3001 Args.push_back(DestAddr.emitRawPointer(*this));
3002 Constraints += "=*";
3003 Constraints += OutputConstraint;
3004 ReadOnly = ReadNone = false;
3005 }
3006
3007 if (Info.isReadWrite()) {
3008 InOutConstraints += ',';
3009
3010 const Expr *InputExpr = S.getOutputExpr(i);
3011 llvm::Value *Arg;
3012 llvm::Type *ArgElemType;
3013 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
3014 Info, Dest, InputExpr->getType(), InOutConstraints,
3015 InputExpr->getExprLoc());
3016
3017 if (llvm::Type* AdjTy =
3018 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
3019 Arg->getType()))
3020 Arg = Builder.CreateBitCast(Arg, AdjTy);
3021
3022 // Update largest vector width for any vector types.
3023 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3024 LargestVectorWidth =
3025 std::max((uint64_t)LargestVectorWidth,
3026 VT->getPrimitiveSizeInBits().getKnownMinValue());
3027 // Only tie earlyclobber physregs.
3028 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
3029 InOutConstraints += llvm::utostr(i);
3030 else
3031 InOutConstraints += OutputConstraint;
3032
3033 InOutArgTypes.push_back(Arg->getType());
3034 InOutArgElemTypes.push_back(ArgElemType);
3035 InOutArgs.push_back(Arg);
3036 }
3037 }
3038
3039 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
3040 // to the return value slot. Only do this when returning in registers.
3041 if (isa<MSAsmStmt>(&S)) {
3042 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
3043 if (RetAI.isDirect() || RetAI.isExtend()) {
3044 // Make a fake lvalue for the return value slot.
3046 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
3047 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3048 ResultRegDests, AsmString, S.getNumOutputs());
3049 SawAsmBlock = true;
3050 }
3051 }
3052
3053 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3054 const Expr *InputExpr = S.getInputExpr(i);
3055
3056 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3057
3058 if (Info.allowsMemory())
3059 ReadNone = false;
3060
3061 if (!Constraints.empty())
3062 Constraints += ',';
3063
3064 // Simplify the input constraint.
3065 std::string InputConstraint(S.getInputConstraint(i));
3066 InputConstraint =
3067 getTarget().simplifyConstraint(InputConstraint, &OutputConstraintInfos);
3068
3069 InputConstraint = S.addVariableConstraints(
3070 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
3071 getTarget(), false /* No EarlyClobber */,
3072 [&](const Stmt *UnspStmt, std::string_view Msg) {
3073 CGM.ErrorUnsupported(UnspStmt, Msg);
3074 });
3075
3076 std::string ReplaceConstraint (InputConstraint);
3077 llvm::Value *Arg;
3078 llvm::Type *ArgElemType;
3079 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
3080
3081 // If this input argument is tied to a larger output result, extend the
3082 // input to be the same size as the output. The LLVM backend wants to see
3083 // the input and output of a matching constraint be the same size. Note
3084 // that GCC does not define what the top bits are here. We use zext because
3085 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3086 if (Info.hasTiedOperand()) {
3087 unsigned Output = Info.getTiedOperand();
3088 QualType OutputType = S.getOutputExpr(Output)->getType();
3089 QualType InputTy = InputExpr->getType();
3090
3091 if (getContext().getTypeSize(OutputType) >
3092 getContext().getTypeSize(InputTy)) {
3093 // Use ptrtoint as appropriate so that we can do our extension.
3094 if (isa<llvm::PointerType>(Arg->getType()))
3095 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
3096 llvm::Type *OutputTy = ConvertType(OutputType);
3097 if (isa<llvm::IntegerType>(OutputTy))
3098 Arg = Builder.CreateZExt(Arg, OutputTy);
3099 else if (isa<llvm::PointerType>(OutputTy))
3100 Arg = Builder.CreateZExt(Arg, IntPtrTy);
3101 else if (OutputTy->isFloatingPointTy())
3102 Arg = Builder.CreateFPExt(Arg, OutputTy);
3103 }
3104 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3105 ReplaceConstraint = OutputConstraints[Output];
3106 }
3107 if (llvm::Type* AdjTy =
3108 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
3109 Arg->getType()))
3110 Arg = Builder.CreateBitCast(Arg, AdjTy);
3111 else
3112 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
3113 << InputExpr->getType() << InputConstraint;
3114
3115 // Update largest vector width for any vector types.
3116 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3117 LargestVectorWidth =
3118 std::max((uint64_t)LargestVectorWidth,
3119 VT->getPrimitiveSizeInBits().getKnownMinValue());
3120
3121 ArgTypes.push_back(Arg->getType());
3122 ArgElemTypes.push_back(ArgElemType);
3123 Args.push_back(Arg);
3124 Constraints += InputConstraint;
3125 }
3126
3127 // Append the "input" part of inout constraints.
3128 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3129 ArgTypes.push_back(InOutArgTypes[i]);
3130 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3131 Args.push_back(InOutArgs[i]);
3132 }
3133 Constraints += InOutConstraints;
3134
3135 // Labels
3137 llvm::BasicBlock *Fallthrough = nullptr;
3138 bool IsGCCAsmGoto = false;
3139 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3140 IsGCCAsmGoto = GS->isAsmGoto();
3141 if (IsGCCAsmGoto) {
3142 for (const auto *E : GS->labels()) {
3143 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3144 Transfer.push_back(Dest.getBlock());
3145 if (!Constraints.empty())
3146 Constraints += ',';
3147 Constraints += "!i";
3148 }
3149 Fallthrough = createBasicBlock("asm.fallthrough");
3150 }
3151 }
3152
3153 bool HasUnwindClobber = false;
3154
3155 // Clobbers
3156 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3157 std::string Clobber = S.getClobber(i);
3158
3159 if (Clobber == "memory")
3160 ReadOnly = ReadNone = false;
3161 else if (Clobber == "unwind") {
3162 HasUnwindClobber = true;
3163 continue;
3164 } else if (Clobber != "cc") {
3165 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3166 if (CGM.getCodeGenOpts().StackClashProtector &&
3167 getTarget().isSPRegName(Clobber)) {
3168 CGM.getDiags().Report(S.getAsmLoc(),
3169 diag::warn_stack_clash_protection_inline_asm);
3170 }
3171 }
3172
3173 if (isa<MSAsmStmt>(&S)) {
3174 if (Clobber == "eax" || Clobber == "edx") {
3175 if (Constraints.find("=&A") != std::string::npos)
3176 continue;
3177 std::string::size_type position1 =
3178 Constraints.find("={" + Clobber + "}");
3179 if (position1 != std::string::npos) {
3180 Constraints.insert(position1 + 1, "&");
3181 continue;
3182 }
3183 std::string::size_type position2 = Constraints.find("=A");
3184 if (position2 != std::string::npos) {
3185 Constraints.insert(position2 + 1, "&");
3186 continue;
3187 }
3188 }
3189 }
3190 if (!Constraints.empty())
3191 Constraints += ',';
3192
3193 Constraints += "~{";
3194 Constraints += Clobber;
3195 Constraints += '}';
3196 }
3197
3198 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3199 "unwind clobber can't be used with asm goto");
3200
3201 // Add machine specific clobbers
3202 std::string_view MachineClobbers = getTarget().getClobbers();
3203 if (!MachineClobbers.empty()) {
3204 if (!Constraints.empty())
3205 Constraints += ',';
3206 Constraints += MachineClobbers;
3207 }
3208
3209 llvm::Type *ResultType;
3210 if (ResultRegTypes.empty())
3211 ResultType = VoidTy;
3212 else if (ResultRegTypes.size() == 1)
3213 ResultType = ResultRegTypes[0];
3214 else
3215 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3216
3217 llvm::FunctionType *FTy =
3218 llvm::FunctionType::get(ResultType, ArgTypes, false);
3219
3220 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3221
3222 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3223 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3224 ? llvm::InlineAsm::AD_ATT
3225 : llvm::InlineAsm::AD_Intel;
3226 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3227 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3228
3229 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3230 FTy, AsmString, Constraints, HasSideEffect,
3231 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3232 std::vector<llvm::Value*> RegResults;
3233 llvm::CallBrInst *CBR;
3234 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3235 CBRRegResults;
3236 if (IsGCCAsmGoto) {
3237 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3238 EmitBlock(Fallthrough);
3239 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3240 ReadNone, InNoMergeAttributedStmt,
3241 InNoConvergentAttributedStmt, S, ResultRegTypes,
3242 ArgElemTypes, *this, RegResults);
3243 // Because we are emitting code top to bottom, we don't have enough
3244 // information at this point to know precisely whether we have a critical
3245 // edge. If we have outputs, split all indirect destinations.
3246 if (!RegResults.empty()) {
3247 unsigned i = 0;
3248 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3249 llvm::Twine SynthName = Dest->getName() + ".split";
3250 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3251 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3252 Builder.SetInsertPoint(SynthBB);
3253
3254 if (ResultRegTypes.size() == 1) {
3255 CBRRegResults[SynthBB].push_back(CBR);
3256 } else {
3257 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3258 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3259 CBRRegResults[SynthBB].push_back(Tmp);
3260 }
3261 }
3262
3263 EmitBranch(Dest);
3264 EmitBlock(SynthBB);
3265 CBR->setIndirectDest(i++, SynthBB);
3266 }
3267 }
3268 } else if (HasUnwindClobber) {
3269 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3270 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3271 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3272 InNoConvergentAttributedStmt, S, ResultRegTypes,
3273 ArgElemTypes, *this, RegResults);
3274 } else {
3275 llvm::CallInst *Result =
3276 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3277 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3278 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3279 InNoConvergentAttributedStmt, S, ResultRegTypes,
3280 ArgElemTypes, *this, RegResults);
3281 }
3282
3283 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3284 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3285 ResultBounds);
3286
3287 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3288 // different insertion point; one for each indirect destination and with
3289 // CBRRegResults rather than RegResults.
3290 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3291 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3292 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3293 Builder.SetInsertPoint(Succ, --(Succ->end()));
3294 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3295 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3296 ResultTypeRequiresCast, ResultBounds);
3297 }
3298 }
3299}
3300
3302 const RecordDecl *RD = S.getCapturedRecordDecl();
3304
3305 // Initialize the captured struct.
3306 LValue SlotLV =
3307 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3308
3309 RecordDecl::field_iterator CurField = RD->field_begin();
3311 E = S.capture_init_end();
3312 I != E; ++I, ++CurField) {
3313 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3314 if (CurField->hasCapturedVLAType()) {
3315 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3316 } else {
3317 EmitInitializerForField(*CurField, LV, *I);
3318 }
3319 }
3320
3321 return SlotLV;
3322}
3323
3324/// Generate an outlined function for the body of a CapturedStmt, store any
3325/// captured variables into the captured struct, and call the outlined function.
3326llvm::Function *
3328 LValue CapStruct = InitCapturedStruct(S);
3329
3330 // Emit the CapturedDecl
3331 CodeGenFunction CGF(CGM, true);
3332 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3333 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3334 delete CGF.CapturedStmtInfo;
3335
3336 // Emit call to the helper function.
3337 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3338
3339 return F;
3340}
3341
3343 LValue CapStruct = InitCapturedStruct(S);
3344 return CapStruct.getAddress();
3345}
3346
3347/// Creates the outlined function for a CapturedStmt.
3348llvm::Function *
3350 assert(CapturedStmtInfo &&
3351 "CapturedStmtInfo should be set when generating the captured function");
3352 const CapturedDecl *CD = S.getCapturedDecl();
3353 const RecordDecl *RD = S.getCapturedRecordDecl();
3354 SourceLocation Loc = S.getBeginLoc();
3355 assert(CD->hasBody() && "missing CapturedDecl body");
3356
3357 // Build the argument list.
3358 ASTContext &Ctx = CGM.getContext();
3359 FunctionArgList Args;
3360 Args.append(CD->param_begin(), CD->param_end());
3361
3362 // Create the function declaration.
3363 const CGFunctionInfo &FuncInfo =
3364 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
3365 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3366
3367 llvm::Function *F =
3368 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3369 CapturedStmtInfo->getHelperName(), &CGM.getModule());
3370 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3371 if (CD->isNothrow())
3372 F->addFnAttr(llvm::Attribute::NoUnwind);
3373
3374 // Generate the function.
3375 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3376 CD->getBody()->getBeginLoc());
3377 // Set the context parameter in CapturedStmtInfo.
3378 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3379 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
3380
3381 // Initialize variable-length arrays.
3383 CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
3384 for (auto *FD : RD->fields()) {
3385 if (FD->hasCapturedVLAType()) {
3386 auto *ExprArg =
3388 .getScalarVal();
3389 auto VAT = FD->getCapturedVLAType();
3390 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3391 }
3392 }
3393
3394 // If 'this' is captured, load it into CXXThisValue.
3395 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3396 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3397 LValue ThisLValue = EmitLValueForField(Base, FD);
3398 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3399 }
3400
3401 PGO->assignRegionCounters(GlobalDecl(CD), F);
3402 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3404
3405 return F;
3406}
3407
3408// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3409// std::nullptr otherwise.
3410static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3411 for (auto &I : *BB) {
3412 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3413 return CI;
3414 }
3415 return nullptr;
3416}
3417
3418llvm::CallBase *
3419CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3420 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3421 assert(ParentToken);
3422
3423 llvm::Value *bundleArgs[] = {ParentToken};
3424 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3425 auto *Output = llvm::CallBase::addOperandBundle(
3426 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3427 Input->replaceAllUsesWith(Output);
3428 Input->eraseFromParent();
3429 return Output;
3430}
3431
3432llvm::ConvergenceControlInst *
3433CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3434 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3435 assert(ParentToken);
3436 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken);
3437}
3438
3439llvm::ConvergenceControlInst *
3440CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3441 llvm::BasicBlock *BB = &F->getEntryBlock();
3442 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3443 if (Token)
3444 return Token;
3445
3446 // Adding a convergence token requires the function to be marked as
3447 // convergent.
3448 F->setConvergent();
3449 return llvm::ConvergenceControlInst::CreateEntry(*BB);
3450}
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition CGStmt.cpp:2267
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition CGStmt.cpp:3410
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition CGStmt.cpp:2804
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition CGStmt.cpp:2321
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition CGStmt.cpp:2619
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition CGStmt.cpp:1591
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition CGStmt.cpp:2112
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const std::vector< std::optional< std::pair< unsigned, unsigned > > > &ResultBounds)
Definition CGStmt.cpp:2714
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition CGStmt.cpp:1066
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition CGStmt.cpp:2111
@ CSFC_Failure
Definition CGStmt.cpp:2111
@ CSFC_Success
Definition CGStmt.cpp:2111
@ CSFC_FallThrough
Definition CGStmt.cpp:2111
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition CGStmt.cpp:2646
#define SM(sm)
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition APValue.cpp:963
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:851
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CanQualType VoidTy
CanQualType getCanonicalTagType(const TagDecl *TD) const
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3269
std::string getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition Stmt.cpp:510
bool isVolatile() const
Definition Stmt.h:3305
std::string getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition Stmt.cpp:494
SourceLocation getAsmLoc() const
Definition Stmt.h:3299
const Expr * getInputExpr(unsigned i) const
Definition Stmt.cpp:518
std::string addVariableConstraints(StringRef Constraint, const Expr &AsmExpr, const TargetInfo &Target, bool EarlyClobber, UnsupportedConstraintCallbackTy UnsupportedCB, std::string *GCCReg=nullptr) const
Look at AsmExpr and if it is a variable declared as using a particular register add that as a constra...
Definition Stmt.cpp:454
unsigned getNumClobbers() const
Definition Stmt.h:3360
const Expr * getOutputExpr(unsigned i) const
Definition Stmt.cpp:502
unsigned getNumOutputs() const
Definition Stmt.h:3328
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition Stmt.cpp:486
unsigned getNumInputs() const
Definition Stmt.h:3350
std::string getClobber(unsigned i) const
Definition Stmt.cpp:526
Attr - This represents one attribute.
Definition Attr.h:46
Represents an attribute applied to a statement.
Definition Stmt.h:2195
Stmt * getSubStmt()
Definition Stmt.h:2231
ArrayRef< const Attr * > getAttrs() const
Definition Stmt.h:2227
BreakStmt - This represents a break.
Definition Stmt.h:3127
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
DeclStmt * getBeginStmt()
Definition StmtCXX.h:163
DeclStmt * getLoopVarStmt()
Definition StmtCXX.h:169
DeclStmt * getEndStmt()
Definition StmtCXX.h:166
DeclStmt * getRangeStmt()
Definition StmtCXX.h:162
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getCallee()
Definition Expr.h:3090
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition Decl.h:4943
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition Decl.h:5001
bool isNothrow() const
Definition Decl.cpp:5696
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition Decl.h:5018
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition Decl.h:5016
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition Decl.cpp:5693
This captures a statement into a function.
Definition Stmt.h:3929
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition Stmt.cpp:1488
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition Stmt.h:4050
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition Stmt.h:4106
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:4124
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition Stmt.h:4116
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition Stmt.h:4093
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition Stmt.cpp:1503
CaseStmt - Represent a case statement.
Definition Stmt.h:1912
Stmt * getSubStmt()
Definition Stmt.h:2025
Expr * getLHS()
Definition Stmt.h:1995
Expr * getRHS()
Definition Stmt.h:2007
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:551
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:634
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
API for captured statement code generation.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition CGStmt.cpp:758
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1452
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition CGStmt.cpp:3301
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition CGCall.cpp:5103
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:710
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition CGObjC.cpp:2129
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3903
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition CGStmt.cpp:509
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:688
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition CGStmt.cpp:693
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition CGStmt.cpp:634
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
void EmitOMPScopeDirective(const OMPScopeDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5564
const TargetInfo & getTarget() const
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:585
void EmitGotoStmt(const GotoStmt &S)
Definition CGStmt.cpp:846
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:245
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2407
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1294
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition CGObjC.cpp:3688
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1079
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition CGStmt.cpp:1016
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5738
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition CGObjC.cpp:2125
const TargetCodeGenInfo & getTargetHooks() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition CGCall.cpp:5031
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition CGStmt.cpp:51
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition CGStmt.cpp:882
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitDeferStmt(const DeferStmt &S)
Definition CGStmt.cpp:2084
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2643
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition CGObjC.cpp:2133
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:573
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPFuseDirective(const OMPFuseDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition CGClass.cpp:668
void EmitAsmStmt(const AsmStmt &S)
Definition CGStmt.cpp:2822
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1985
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition CGStmt.cpp:2376
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:296
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:267
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:61
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition CGStmt.cpp:3327
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1870
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition CGStmt.cpp:1756
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1206
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition CGStmt.cpp:3342
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:676
void EmitOMPSimdDirective(const OMPSimdDirective &S)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
const BreakContinue * GetDestForLoopControlStmt(const LoopControlStmt &S)
Definition CGStmt.cpp:1742
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPStripeDirective(const OMPStripeDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition CGStmt.cpp:1785
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition CGStmt.cpp:1617
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition CGStmt.cpp:3349
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition CGStmt.cpp:1732
void EmitLabelStmt(const LabelStmt &S)
Definition CGStmt.cpp:779
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition CGDecl.cpp:52
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1692
void EmitAttributedStmt(const AttributedStmt &S)
Definition CGStmt.cpp:789
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition CGObjC.cpp:1804
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition CGStmt.cpp:858
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2089
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition CGStmt.cpp:721
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition CGStmt.cpp:1769
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
A saved depth on the scope stack.
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition TargetInfo.h:204
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
Stmt *const * const_body_iterator
Definition Stmt.h:1804
body_iterator body_end()
Definition Stmt.h:1797
SourceLocation getLBracLoc() const
Definition Stmt.h:1849
body_iterator body_begin()
Definition Stmt.h:1796
Stmt * body_back()
Definition Stmt.h:1800
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1082
ContinueStmt - This represents a continue.
Definition Stmt.h:3111
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1623
decl_range decls()
Definition Stmt.h:1671
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition DeclBase.h:1093
SourceLocation getLocation() const
Definition DeclBase.h:439
Stmt * getSubStmt()
Definition Stmt.h:2073
DeferStmt - This represents a deferred statement.
Definition Stmt.h:3228
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2824
Stmt * getBody()
Definition Stmt.h:2849
Expr * getCond()
Definition Stmt.h:2842
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3116
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2880
Stmt * getInit()
Definition Stmt.h:2895
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition Stmt.cpp:1115
Stmt * getBody()
Definition Stmt.h:2924
Expr * getInc()
Definition Stmt.h:2923
Expr * getCond()
Definition Stmt.h:2922
const Expr * getSubExpr() const
Definition Expr.h:1062
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4465
CallingConv getCallConv() const
Definition TypeBase.h:4820
This represents a GCC inline-assembly statement extension.
Definition Stmt.h:3438
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2961
LabelDecl * getLabel() const
Definition Stmt.h:2974
IfStmt - This represents an if/then/else.
Definition Stmt.h:2251
Stmt * getThen()
Definition Stmt.h:2340
Stmt * getInit()
Definition Stmt.h:2401
Expr * getCond()
Definition Stmt.h:2328
bool isConstexpr() const
Definition Stmt.h:2444
bool isNegatedConsteval() const
Definition Stmt.h:2440
Stmt * getElse()
Definition Stmt.h:2349
bool isConsteval() const
Definition Stmt.h:2431
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition Stmt.cpp:1063
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3000
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition Stmt.cpp:1264
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt * getStmt() const
Definition Decl.h:548
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2138
LabelDecl * getDecl() const
Definition Stmt.h:2156
bool isSideEntry() const
Definition Stmt.h:2185
Stmt * getSubStmt()
Definition Stmt.h:2160
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
bool assumeFunctionsAreConvergent() const
Base class for BreakStmt and ContinueStmt.
Definition Stmt.h:3049
Represents a point when we exit a loop.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
QualType getCanonicalType() const
Definition TypeBase.h:8344
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Represents a struct/union/class.
Definition Decl.h:4324
field_range fields() const
Definition Decl.h:4527
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4524
field_iterator field_begin() const
Definition Decl.cpp:5270
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3152
SourceLocation getBeginLoc() const
Definition Stmt.h:3204
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization.
Definition Stmt.h:3188
Expr * getRetValue()
Definition Stmt.h:3179
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:86
@ NoStmtClass
Definition Stmt.h:89
StmtClass getStmtClass() const
Definition Stmt.h:1485
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1428
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1429
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1430
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1432
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition Stmt.cpp:171
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition Stmt.cpp:163
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:1973
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition Expr.cpp:1325
StringRef getString() const
Definition Expr.h:1867
const SwitchCase * getNextSwitchCase() const
Definition Stmt.h:1885
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2501
Expr * getCond()
Definition Stmt.h:2564
Stmt * getBody()
Definition Stmt.h:2576
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition Stmt.cpp:1181
Stmt * getInit()
Definition Stmt.h:2581
SwitchCase * getSwitchCaseList()
Definition Stmt.h:2632
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
std::string simplifyConstraint(StringRef Constraint, SmallVectorImpl< ConstraintInfo > *OutCons=nullptr) const
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
bool validateOutputConstraint(ConstraintInfo &Info) const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition TypeBase.h:8891
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO).
Definition Decl.h:1512
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2689
Expr * getCond()
Definition Stmt.h:2741
SourceLocation getWhileLoc() const
Definition Stmt.h:2794
SourceLocation getRParenLoc() const
Definition Stmt.h:2799
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition Stmt.cpp:1242
Stmt * getBody()
Definition Stmt.h:2753
Defines the clang::TargetInfo interface.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ CPlusPlus11
CapturedRegionKind
The different kinds of captured statement.
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ CC_SwiftAsync
Definition Specifiers.h:294
U cast(CodeGen::Address addr)
Definition Address.h:327
@ None
The alignment was not explicit in code.
Definition ASTContext.h:178
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
Definition CGStmt.cpp:48
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
std::optional< std::pair< unsigned, unsigned > > getOutputOperandBounds() const
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.