clang 22.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/IR/Assumptions.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/InlineAsm.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/MDBuilder.h"
37#include "llvm/Support/SaveAndRestore.h"
38#include <optional>
39
40using namespace clang;
41using namespace CodeGen;
42
43//===----------------------------------------------------------------------===//
44// Statement Emission
45//===----------------------------------------------------------------------===//
46
47namespace llvm {
48extern cl::opt<bool> EnableSingleByteCoverage;
49} // namespace llvm
50
52 if (CGDebugInfo *DI = getDebugInfo()) {
54 Loc = S->getBeginLoc();
55 DI->EmitLocation(Builder, Loc);
56
57 LastStopPoint = Loc;
58 }
59}
60
62 assert(S && "Null statement?");
63 PGO->setCurrentStmt(S);
64
65 // These statements have their own debug info handling.
66 if (EmitSimpleStmt(S, Attrs))
67 return;
68
69 // Check if we are generating unreachable code.
70 if (!HaveInsertPoint()) {
71 // If so, and the statement doesn't contain a label, then we do not need to
72 // generate actual code. This is safe because (1) the current point is
73 // unreachable, so we don't need to execute the code, and (2) we've already
74 // handled the statements which update internal data structures (like the
75 // local variable map) which could be used by subsequent statements.
76 if (!ContainsLabel(S)) {
77 // Verify that any decl statements were handled as simple, they may be in
78 // scope of subsequent reachable statements.
79 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
80 PGO->markStmtMaybeUsed(S);
81 return;
82 }
83
84 // Otherwise, make a new block to hold the code.
86 }
87
88 // Generate a stoppoint if we are emitting debug info.
90
91 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
92 // enabled.
93 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
94 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
96 return;
97 }
98 }
99
100 switch (S->getStmtClass()) {
102 case Stmt::CXXCatchStmtClass:
103 case Stmt::SEHExceptStmtClass:
104 case Stmt::SEHFinallyStmtClass:
105 case Stmt::MSDependentExistsStmtClass:
106 llvm_unreachable("invalid statement class to emit generically");
107 case Stmt::NullStmtClass:
108 case Stmt::CompoundStmtClass:
109 case Stmt::DeclStmtClass:
110 case Stmt::LabelStmtClass:
111 case Stmt::AttributedStmtClass:
112 case Stmt::GotoStmtClass:
113 case Stmt::BreakStmtClass:
114 case Stmt::ContinueStmtClass:
115 case Stmt::DefaultStmtClass:
116 case Stmt::CaseStmtClass:
117 case Stmt::DeferStmtClass:
118 case Stmt::SEHLeaveStmtClass:
119 case Stmt::SYCLKernelCallStmtClass:
120 llvm_unreachable("should have emitted these statements as simple");
121
122#define STMT(Type, Base)
123#define ABSTRACT_STMT(Op)
124#define EXPR(Type, Base) \
125 case Stmt::Type##Class:
126#include "clang/AST/StmtNodes.inc"
127 {
128 // Remember the block we came in on.
129 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
130 assert(incoming && "expression emission must have an insertion point");
131
133
134 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
135 assert(outgoing && "expression emission cleared block!");
136
137 // The expression emitters assume (reasonably!) that the insertion
138 // point is always set. To maintain that, the call-emission code
139 // for noreturn functions has to enter a new block with no
140 // predecessors. We want to kill that block and mark the current
141 // insertion point unreachable in the common case of a call like
142 // "exit();". Since expression emission doesn't otherwise create
143 // blocks with no predecessors, we can just test for that.
144 // However, we must be careful not to do this to our incoming
145 // block, because *statement* emission does sometimes create
146 // reachable blocks which will have no predecessors until later in
147 // the function. This occurs with, e.g., labels that are not
148 // reachable by fallthrough.
149 if (incoming != outgoing && outgoing->use_empty()) {
150 outgoing->eraseFromParent();
151 Builder.ClearInsertionPoint();
152 }
153 break;
154 }
155
156 case Stmt::IndirectGotoStmtClass:
158
159 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
160 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
161 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
162 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
163
164 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
165
166 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
167 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
168 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
169 case Stmt::CoroutineBodyStmtClass:
171 break;
172 case Stmt::CoreturnStmtClass:
174 break;
175 case Stmt::CapturedStmtClass: {
176 const CapturedStmt *CS = cast<CapturedStmt>(S);
178 }
179 break;
180 case Stmt::ObjCAtTryStmtClass:
182 break;
183 case Stmt::ObjCAtCatchStmtClass:
184 llvm_unreachable(
185 "@catch statements should be handled by EmitObjCAtTryStmt");
186 case Stmt::ObjCAtFinallyStmtClass:
187 llvm_unreachable(
188 "@finally statements should be handled by EmitObjCAtTryStmt");
189 case Stmt::ObjCAtThrowStmtClass:
191 break;
192 case Stmt::ObjCAtSynchronizedStmtClass:
194 break;
195 case Stmt::ObjCForCollectionStmtClass:
197 break;
198 case Stmt::ObjCAutoreleasePoolStmtClass:
200 break;
201
202 case Stmt::CXXTryStmtClass:
204 break;
205 case Stmt::CXXForRangeStmtClass:
207 break;
208 case Stmt::SEHTryStmtClass:
210 break;
211 case Stmt::OMPMetaDirectiveClass:
213 break;
214 case Stmt::OMPCanonicalLoopClass:
216 break;
217 case Stmt::OMPParallelDirectiveClass:
219 break;
220 case Stmt::OMPSimdDirectiveClass:
222 break;
223 case Stmt::OMPTileDirectiveClass:
225 break;
226 case Stmt::OMPStripeDirectiveClass:
228 break;
229 case Stmt::OMPUnrollDirectiveClass:
231 break;
232 case Stmt::OMPReverseDirectiveClass:
234 break;
235 case Stmt::OMPInterchangeDirectiveClass:
237 break;
238 case Stmt::OMPFuseDirectiveClass:
240 break;
241 case Stmt::OMPForDirectiveClass:
243 break;
244 case Stmt::OMPForSimdDirectiveClass:
246 break;
247 case Stmt::OMPSectionsDirectiveClass:
249 break;
250 case Stmt::OMPSectionDirectiveClass:
252 break;
253 case Stmt::OMPSingleDirectiveClass:
255 break;
256 case Stmt::OMPMasterDirectiveClass:
258 break;
259 case Stmt::OMPCriticalDirectiveClass:
261 break;
262 case Stmt::OMPParallelForDirectiveClass:
264 break;
265 case Stmt::OMPParallelForSimdDirectiveClass:
267 break;
268 case Stmt::OMPParallelMasterDirectiveClass:
270 break;
271 case Stmt::OMPParallelSectionsDirectiveClass:
273 break;
274 case Stmt::OMPTaskDirectiveClass:
276 break;
277 case Stmt::OMPTaskyieldDirectiveClass:
279 break;
280 case Stmt::OMPErrorDirectiveClass:
282 break;
283 case Stmt::OMPBarrierDirectiveClass:
285 break;
286 case Stmt::OMPTaskwaitDirectiveClass:
288 break;
289 case Stmt::OMPTaskgroupDirectiveClass:
291 break;
292 case Stmt::OMPFlushDirectiveClass:
294 break;
295 case Stmt::OMPDepobjDirectiveClass:
297 break;
298 case Stmt::OMPScanDirectiveClass:
300 break;
301 case Stmt::OMPOrderedDirectiveClass:
303 break;
304 case Stmt::OMPAtomicDirectiveClass:
306 break;
307 case Stmt::OMPTargetDirectiveClass:
309 break;
310 case Stmt::OMPTeamsDirectiveClass:
312 break;
313 case Stmt::OMPCancellationPointDirectiveClass:
315 break;
316 case Stmt::OMPCancelDirectiveClass:
318 break;
319 case Stmt::OMPTargetDataDirectiveClass:
321 break;
322 case Stmt::OMPTargetEnterDataDirectiveClass:
324 break;
325 case Stmt::OMPTargetExitDataDirectiveClass:
327 break;
328 case Stmt::OMPTargetParallelDirectiveClass:
330 break;
331 case Stmt::OMPTargetParallelForDirectiveClass:
333 break;
334 case Stmt::OMPTaskLoopDirectiveClass:
336 break;
337 case Stmt::OMPTaskLoopSimdDirectiveClass:
339 break;
340 case Stmt::OMPMasterTaskLoopDirectiveClass:
342 break;
343 case Stmt::OMPMaskedTaskLoopDirectiveClass:
345 break;
346 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
349 break;
350 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
353 break;
354 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
357 break;
358 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
361 break;
362 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
365 break;
366 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
369 break;
370 case Stmt::OMPDistributeDirectiveClass:
372 break;
373 case Stmt::OMPTargetUpdateDirectiveClass:
375 break;
376 case Stmt::OMPDistributeParallelForDirectiveClass:
379 break;
380 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
383 break;
384 case Stmt::OMPDistributeSimdDirectiveClass:
386 break;
387 case Stmt::OMPTargetParallelForSimdDirectiveClass:
390 break;
391 case Stmt::OMPTargetSimdDirectiveClass:
393 break;
394 case Stmt::OMPTeamsDistributeDirectiveClass:
396 break;
397 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
400 break;
401 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
404 break;
405 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
408 break;
409 case Stmt::OMPTargetTeamsDirectiveClass:
411 break;
412 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
415 break;
416 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
419 break;
420 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
423 break;
424 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
427 break;
428 case Stmt::OMPInteropDirectiveClass:
430 break;
431 case Stmt::OMPDispatchDirectiveClass:
432 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
433 break;
434 case Stmt::OMPScopeDirectiveClass:
436 break;
437 case Stmt::OMPMaskedDirectiveClass:
439 break;
440 case Stmt::OMPGenericLoopDirectiveClass:
442 break;
443 case Stmt::OMPTeamsGenericLoopDirectiveClass:
445 break;
446 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
449 break;
450 case Stmt::OMPParallelGenericLoopDirectiveClass:
453 break;
454 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
457 break;
458 case Stmt::OMPParallelMaskedDirectiveClass:
460 break;
461 case Stmt::OMPAssumeDirectiveClass:
463 break;
464 case Stmt::OpenACCComputeConstructClass:
466 break;
467 case Stmt::OpenACCLoopConstructClass:
469 break;
470 case Stmt::OpenACCCombinedConstructClass:
472 break;
473 case Stmt::OpenACCDataConstructClass:
475 break;
476 case Stmt::OpenACCEnterDataConstructClass:
478 break;
479 case Stmt::OpenACCExitDataConstructClass:
481 break;
482 case Stmt::OpenACCHostDataConstructClass:
484 break;
485 case Stmt::OpenACCWaitConstructClass:
487 break;
488 case Stmt::OpenACCInitConstructClass:
490 break;
491 case Stmt::OpenACCShutdownConstructClass:
493 break;
494 case Stmt::OpenACCSetConstructClass:
496 break;
497 case Stmt::OpenACCUpdateConstructClass:
499 break;
500 case Stmt::OpenACCAtomicConstructClass:
502 break;
503 case Stmt::OpenACCCacheConstructClass:
505 break;
506 }
507}
508
511 switch (S->getStmtClass()) {
512 default:
513 return false;
514 case Stmt::NullStmtClass:
515 break;
516 case Stmt::CompoundStmtClass:
518 break;
519 case Stmt::DeclStmtClass:
521 break;
522 case Stmt::LabelStmtClass:
524 break;
525 case Stmt::AttributedStmtClass:
527 break;
528 case Stmt::GotoStmtClass:
530 break;
531 case Stmt::BreakStmtClass:
533 break;
534 case Stmt::ContinueStmtClass:
536 break;
537 case Stmt::DefaultStmtClass:
539 break;
540 case Stmt::CaseStmtClass:
541 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
542 break;
543 case Stmt::DeferStmtClass:
545 break;
546 case Stmt::SEHLeaveStmtClass:
548 break;
549 case Stmt::SYCLKernelCallStmtClass:
550 // SYCL kernel call statements are generated as wrappers around the body
551 // of functions declared with the sycl_kernel_entry_point attribute. Such
552 // functions are used to specify how a SYCL kernel (a function object) is
553 // to be invoked; the SYCL kernel call statement contains a transformed
554 // variation of the function body and is used to generate a SYCL kernel
555 // caller function; a function that serves as the device side entry point
556 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed
557 // function is invoked by host code in order to trigger emission of the
558 // device side SYCL kernel caller function and to generate metadata needed
559 // by SYCL run-time library implementations; the function is otherwise
560 // intended to have no effect. As such, the function body is not evaluated
561 // as part of the invocation during host compilation (and the function
562 // should not be called or emitted during device compilation); the SYCL
563 // kernel call statement is thus handled as a null statement for the
564 // purpose of code generation.
565 break;
566 }
567 return true;
568}
569
570/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
571/// this captures the expression result of the last sub-statement and returns it
572/// (for use by the statement expression extension).
574 AggValueSlot AggSlot) {
575 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
576 "LLVM IR generation of compound statement ('{}')");
577
578 // Keep track of the current cleanup stack depth, including debug scopes.
580
581 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
582}
583
586 bool GetLast,
587 AggValueSlot AggSlot) {
588
590 E = S.body_end() - GetLast;
591 I != E; ++I)
592 EmitStmt(*I);
593
594 Address RetAlloca = Address::invalid();
595 if (GetLast) {
596 // We have to special case labels here. They are statements, but when put
597 // at the end of a statement expression, they yield the value of their
598 // subexpression. Handle this by walking through all labels we encounter,
599 // emitting them before we evaluate the subexpr.
600 // Similar issues arise for attributed statements.
601 const Stmt *LastStmt = S.body_back();
602 while (!isa<Expr>(LastStmt)) {
603 if (const auto *LS = dyn_cast<LabelStmt>(LastStmt)) {
604 EmitLabel(LS->getDecl());
605 LastStmt = LS->getSubStmt();
606 } else if (const auto *AS = dyn_cast<AttributedStmt>(LastStmt)) {
607 // FIXME: Update this if we ever have attributes that affect the
608 // semantics of an expression.
609 LastStmt = AS->getSubStmt();
610 } else {
611 llvm_unreachable("unknown value statement");
612 }
613 }
614
616
617 const Expr *E = cast<Expr>(LastStmt);
618 QualType ExprTy = E->getType();
619 if (hasAggregateEvaluationKind(ExprTy)) {
620 EmitAggExpr(E, AggSlot);
621 } else {
622 // We can't return an RValue here because there might be cleanups at
623 // the end of the StmtExpr. Because of that, we have to emit the result
624 // here into a temporary alloca.
625 RetAlloca = CreateMemTemp(ExprTy);
626 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
627 /*IsInit*/ false);
628 }
629 }
630
631 return RetAlloca;
632}
633
635 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
636
637 // If there is a cleanup stack, then we it isn't worth trying to
638 // simplify this block (we would need to remove it from the scope map
639 // and cleanup entry).
640 if (!EHStack.empty())
641 return;
642
643 // Can only simplify direct branches.
644 if (!BI || !BI->isUnconditional())
645 return;
646
647 // Can only simplify empty blocks.
648 if (BI->getIterator() != BB->begin())
649 return;
650
651 BB->replaceAllUsesWith(BI->getSuccessor(0));
652 BI->eraseFromParent();
653 BB->eraseFromParent();
654}
655
656void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
657 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
658
659 // Fall out of the current block (if necessary).
660 EmitBranch(BB);
661
662 if (IsFinished && BB->use_empty()) {
663 delete BB;
664 return;
665 }
666
667 // Place the block after the current block, if possible, or else at
668 // the end of the function.
669 if (CurBB && CurBB->getParent())
670 CurFn->insert(std::next(CurBB->getIterator()), BB);
671 else
672 CurFn->insert(CurFn->end(), BB);
673 Builder.SetInsertPoint(BB);
674}
675
676void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
677 // Emit a branch from the current block to the target one if this
678 // was a real block. If this was just a fall-through block after a
679 // terminator, don't emit it.
680 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
681
682 if (!CurBB || CurBB->getTerminator()) {
683 // If there is no insert point or the previous block is already
684 // terminated, don't touch it.
685 } else {
686 // Otherwise, create a fall-through branch.
687 Builder.CreateBr(Target);
688 }
689
690 Builder.ClearInsertionPoint();
691}
692
693void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
694 bool inserted = false;
695 for (llvm::User *u : block->users()) {
696 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
697 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
698 inserted = true;
699 break;
700 }
701 }
702
703 if (!inserted)
704 CurFn->insert(CurFn->end(), block);
705
706 Builder.SetInsertPoint(block);
707}
708
711 JumpDest &Dest = LabelMap[D];
712 if (Dest.isValid()) return Dest;
713
714 // Create, but don't insert, the new block.
715 Dest = JumpDest(createBasicBlock(D->getName()),
718 return Dest;
719}
720
722 // Add this label to the current lexical scope if we're within any
723 // normal cleanups. Jumps "in" to this label --- when permitted by
724 // the language --- may need to be routed around such cleanups.
725 if (EHStack.hasNormalCleanups() && CurLexicalScope)
726 CurLexicalScope->addLabel(D);
727
728 JumpDest &Dest = LabelMap[D];
729
730 // If we didn't need a forward reference to this label, just go
731 // ahead and create a destination at the current scope.
732 if (!Dest.isValid()) {
734
735 // Otherwise, we need to give this label a target depth and remove
736 // it from the branch-fixups list.
737 } else {
738 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
739 Dest.setScopeDepth(EHStack.stable_begin());
741 }
742
743 EmitBlock(Dest.getBlock());
744
745 // Emit debug info for labels.
746 if (CGDebugInfo *DI = getDebugInfo()) {
747 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
748 DI->setLocation(D->getLocation());
749 DI->EmitLabel(D, Builder);
750 }
751 }
752
754}
755
756/// Change the cleanup scope of the labels in this lexical scope to
757/// match the scope of the enclosing context.
759 assert(!Labels.empty());
760 EHScopeStack::stable_iterator innermostScope
761 = CGF.EHStack.getInnermostNormalCleanup();
762
763 // Change the scope depth of all the labels.
764 for (const LabelDecl *Label : Labels) {
765 assert(CGF.LabelMap.count(Label));
766 JumpDest &dest = CGF.LabelMap.find(Label)->second;
767 assert(dest.getScopeDepth().isValid());
768 assert(innermostScope.encloses(dest.getScopeDepth()));
769 dest.setScopeDepth(innermostScope);
770 }
771
772 // Reparent the labels if the new scope also has cleanups.
773 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
774 ParentScope->Labels.append(Labels.begin(), Labels.end());
775 }
776}
777
778
780 EmitLabel(S.getDecl());
781
782 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
783 if (getLangOpts().EHAsynch && S.isSideEntry())
785
786 EmitStmt(S.getSubStmt());
787}
788
790 bool nomerge = false;
791 bool noinline = false;
792 bool alwaysinline = false;
793 bool noconvergent = false;
794 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
795 HLSLControlFlowHintAttr::SpellingNotCalculated;
796 const CallExpr *musttail = nullptr;
797 const AtomicAttr *AA = nullptr;
798
799 for (const auto *A : S.getAttrs()) {
800 switch (A->getKind()) {
801 default:
802 break;
803 case attr::NoMerge:
804 nomerge = true;
805 break;
806 case attr::NoInline:
807 noinline = true;
808 break;
809 case attr::AlwaysInline:
810 alwaysinline = true;
811 break;
812 case attr::NoConvergent:
813 noconvergent = true;
814 break;
815 case attr::MustTail: {
816 const Stmt *Sub = S.getSubStmt();
817 const ReturnStmt *R = cast<ReturnStmt>(Sub);
818 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
819 } break;
820 case attr::CXXAssume: {
821 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
822 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
823 !Assumption->HasSideEffects(getContext())) {
824 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
825 Builder.CreateAssumption(AssumptionVal);
826 }
827 } break;
828 case attr::Atomic:
829 AA = cast<AtomicAttr>(A);
830 break;
831 case attr::HLSLControlFlowHint: {
832 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
833 } break;
834 }
835 }
836 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
837 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
838 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
839 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
840 SaveAndRestore save_musttail(MustTailCall, musttail);
841 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
842 CGAtomicOptionsRAII AORAII(CGM, AA);
843 EmitStmt(S.getSubStmt(), S.getAttrs());
844}
845
847 // If this code is reachable then emit a stop point (if generating
848 // debug info). We have to do this ourselves because we are on the
849 // "simple" statement path.
850 if (HaveInsertPoint())
851 EmitStopPoint(&S);
852
855}
856
857
860 if (const LabelDecl *Target = S.getConstantTarget()) {
862 return;
863 }
864
865 // Ensure that we have an i8* for our PHI node.
866 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
867 Int8PtrTy, "addr");
868 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
869
870 // Get the basic block for the indirect goto.
871 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
872
873 // The first instruction in the block has to be the PHI for the switch dest,
874 // add an entry for this branch.
875 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
876
877 EmitBranch(IndGotoBB);
878 if (CurBB && CurBB->getTerminator())
879 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr);
880}
881
883 const Stmt *Else = S.getElse();
884
885 // The else branch of a consteval if statement is always the only branch that
886 // can be runtime evaluated.
887 if (S.isConsteval()) {
888 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
889 if (Executed) {
890 RunCleanupsScope ExecutedScope(*this);
891 EmitStmt(Executed);
892 }
893 return;
894 }
895
896 // C99 6.8.4.1: The first substatement is executed if the expression compares
897 // unequal to 0. The condition must be a scalar type.
898 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
899 ApplyDebugLocation DL(*this, S.getCond());
900
901 if (S.getInit())
902 EmitStmt(S.getInit());
903
904 if (S.getConditionVariable())
906
907 // If the condition constant folds and can be elided, try to avoid emitting
908 // the condition and the dead arm of the if/else.
909 bool CondConstant;
910 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
911 S.isConstexpr())) {
912 // Figure out which block (then or else) is executed.
913 const Stmt *Executed = S.getThen();
914 const Stmt *Skipped = Else;
915 if (!CondConstant) // Condition false?
916 std::swap(Executed, Skipped);
917
918 // If the skipped block has no labels in it, just emit the executed block.
919 // This avoids emitting dead code and simplifies the CFG substantially.
920 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
921 if (CondConstant)
923 if (Executed) {
925 RunCleanupsScope ExecutedScope(*this);
926 EmitStmt(Executed);
927 }
928 PGO->markStmtMaybeUsed(Skipped);
929 return;
930 }
931 }
932
933 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
934 // the conditional branch.
935 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
936 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
937 llvm::BasicBlock *ElseBlock = ContBlock;
938 if (Else)
939 ElseBlock = createBasicBlock("if.else");
940
941 // Prefer the PGO based weights over the likelihood attribute.
942 // When the build isn't optimized the metadata isn't used, so don't generate
943 // it.
944 // Also, differentiate between disabled PGO and a never executed branch with
945 // PGO. Assuming PGO is in use:
946 // - we want to ignore the [[likely]] attribute if the branch is never
947 // executed,
948 // - assuming the profile is poor, preserving the attribute may still be
949 // beneficial.
950 // As an approximation, preserve the attribute only if both the branch and the
951 // parent context were not executed.
953 uint64_t ThenCount = getProfileCount(S.getThen());
954 if (!ThenCount && !getCurrentProfileCount() &&
955 CGM.getCodeGenOpts().OptimizationLevel)
956 LH = Stmt::getLikelihood(S.getThen(), Else);
957
958 // When measuring MC/DC, always fully evaluate the condition up front using
959 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
960 // executing the body of the if.then or if.else. This is useful for when
961 // there is a 'return' within the body, but this is particularly beneficial
962 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
963 // updates are kept linear and consistent.
964 if (!CGM.getCodeGenOpts().MCDCCoverage) {
965 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH,
966 /*ConditionalOp=*/nullptr,
967 /*ConditionalDecl=*/S.getConditionVariable());
968 } else {
969 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
971 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
972 }
973
974 // Emit the 'then' code.
975 EmitBlock(ThenBlock);
978 else
980 {
981 RunCleanupsScope ThenScope(*this);
982 EmitStmt(S.getThen());
983 }
984 EmitBranch(ContBlock);
985
986 // Emit the 'else' code if present.
987 if (Else) {
988 {
989 // There is no need to emit line number for an unconditional branch.
990 auto NL = ApplyDebugLocation::CreateEmpty(*this);
991 EmitBlock(ElseBlock);
992 }
993 // When single byte coverage mode is enabled, add a counter to else block.
996 {
997 RunCleanupsScope ElseScope(*this);
998 EmitStmt(Else);
999 }
1000 {
1001 // There is no need to emit line number for an unconditional branch.
1002 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1003 EmitBranch(ContBlock);
1004 }
1005 }
1006
1007 // Emit the continuation block for code after the if.
1008 EmitBlock(ContBlock, true);
1009
1010 // When single byte coverage mode is enabled, add a counter to continuation
1011 // block.
1014}
1015
1016bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
1017 bool HasEmptyBody) {
1018 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1020 return false;
1021
1022 // Now apply rules for plain C (see 6.8.5.6 in C11).
1023 // Loops with constant conditions do not have to make progress in any C
1024 // version.
1025 // As an extension, we consisider loops whose constant expression
1026 // can be constant-folded.
1028 bool CondIsConstInt =
1029 !ControllingExpression ||
1030 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1031 Result.Val.isInt());
1032
1033 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1034 Result.Val.getInt().getBoolValue());
1035
1036 // Loops with non-constant conditions must make progress in C11 and later.
1037 if (getLangOpts().C11 && !CondIsConstInt)
1038 return true;
1039
1040 // [C++26][intro.progress] (DR)
1041 // The implementation may assume that any thread will eventually do one of the
1042 // following:
1043 // [...]
1044 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1045 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1048 if (HasEmptyBody && CondIsTrue) {
1049 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1050 return false;
1051 }
1052 return true;
1053 }
1054 return false;
1055}
1056
1057// [C++26][stmt.iter.general] (DR)
1058// A trivially empty iteration statement is an iteration statement matching one
1059// of the following forms:
1060// - while ( expression ) ;
1061// - while ( expression ) { }
1062// - do ; while ( expression ) ;
1063// - do { } while ( expression ) ;
1064// - for ( init-statement expression(opt); ) ;
1065// - for ( init-statement expression(opt); ) { }
1066template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1067 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1068 if (S.getInc())
1069 return false;
1070 }
1071 const Stmt *Body = S.getBody();
1072 if (!Body || isa<NullStmt>(Body))
1073 return true;
1074 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1075 return Compound->body_empty();
1076 return false;
1077}
1078
1080 ArrayRef<const Attr *> WhileAttrs) {
1081 // Emit the header for the loop, which will also become
1082 // the continue target.
1083 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1084 EmitBlock(LoopHeader.getBlock());
1085
1086 if (CGM.shouldEmitConvergenceTokens())
1087 ConvergenceTokenStack.push_back(
1088 emitConvergenceLoopToken(LoopHeader.getBlock()));
1089
1090 // Create an exit block for when the condition fails, which will
1091 // also become the break target.
1093
1094 // Store the blocks to use for break and continue.
1095 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopHeader));
1096
1097 // C++ [stmt.while]p2:
1098 // When the condition of a while statement is a declaration, the
1099 // scope of the variable that is declared extends from its point
1100 // of declaration (3.3.2) to the end of the while statement.
1101 // [...]
1102 // The object created in a condition is destroyed and created
1103 // with each iteration of the loop.
1104 RunCleanupsScope ConditionScope(*this);
1105
1106 if (S.getConditionVariable())
1108
1109 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1110 // evaluation of the controlling expression takes place before each
1111 // execution of the loop body.
1112 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1113
1115
1116 // while(1) is common, avoid extra exit blocks. Be sure
1117 // to correctly handle break/continue though.
1118 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1119 bool EmitBoolCondBranch = !C || !C->isOne();
1120 const SourceRange &R = S.getSourceRange();
1121 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1122 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1125
1126 // When single byte coverage mode is enabled, add a counter to loop condition.
1129
1130 // As long as the condition is true, go to the loop body.
1131 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1132 if (EmitBoolCondBranch) {
1133 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1134 if (ConditionScope.requiresCleanups())
1135 ExitBlock = createBasicBlock("while.exit");
1136 llvm::MDNode *Weights =
1137 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1138 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1139 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1140 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1141 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1142 // Key Instructions: Emit the condition and branch as separate source
1143 // location atoms otherwise we may omit a step onto the loop condition in
1144 // favour of the `while` keyword.
1145 // FIXME: We could have the branch as the backup location for the condition,
1146 // which would probably be a better experience. Explore this later.
1147 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1148 addInstToNewSourceAtom(CondI, nullptr);
1149 addInstToNewSourceAtom(I, nullptr);
1150
1151 if (ExitBlock != LoopExit.getBlock()) {
1152 EmitBlock(ExitBlock);
1154 }
1155 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1156 CGM.getDiags().Report(A->getLocation(),
1157 diag::warn_attribute_has_no_effect_on_infinite_loop)
1158 << A << A->getRange();
1159 CGM.getDiags().Report(
1160 S.getWhileLoc(),
1161 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1163 }
1164
1165 // Emit the loop body. We have to emit this in a cleanup scope
1166 // because it might be a singleton DeclStmt.
1167 {
1168 RunCleanupsScope BodyScope(*this);
1169 EmitBlock(LoopBody);
1170 // When single byte coverage mode is enabled, add a counter to the body.
1173 else
1175 EmitStmt(S.getBody());
1176 }
1177
1178 BreakContinueStack.pop_back();
1179
1180 // Immediately force cleanup.
1181 ConditionScope.ForceCleanup();
1182
1183 EmitStopPoint(&S);
1184 // Branch to the loop header again.
1185 EmitBranch(LoopHeader.getBlock());
1186
1187 LoopStack.pop();
1188
1189 // Emit the exit block.
1190 EmitBlock(LoopExit.getBlock(), true);
1191
1192 // The LoopHeader typically is just a branch if we skipped emitting
1193 // a branch, try to erase it.
1194 if (!EmitBoolCondBranch)
1195 SimplifyForwardingBlocks(LoopHeader.getBlock());
1196
1197 // When single byte coverage mode is enabled, add a counter to continuation
1198 // block.
1201
1202 if (CGM.shouldEmitConvergenceTokens())
1203 ConvergenceTokenStack.pop_back();
1204}
1205
1207 ArrayRef<const Attr *> DoAttrs) {
1209 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1210
1211 uint64_t ParentCount = getCurrentProfileCount();
1212
1213 // Store the blocks to use for break and continue.
1214 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopCond));
1215
1216 // Emit the body of the loop.
1217 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1218
1220 EmitBlockWithFallThrough(LoopBody, S.getBody());
1221 else
1222 EmitBlockWithFallThrough(LoopBody, &S);
1223
1224 if (CGM.shouldEmitConvergenceTokens())
1225 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1226
1227 {
1228 RunCleanupsScope BodyScope(*this);
1229 EmitStmt(S.getBody());
1230 }
1231
1232 EmitBlock(LoopCond.getBlock());
1233 // When single byte coverage mode is enabled, add a counter to loop condition.
1236
1237 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1238 // after each execution of the loop body."
1239
1240 // Evaluate the conditional in the while header.
1241 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1242 // compares unequal to 0. The condition must be a scalar type.
1243 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1244
1245 BreakContinueStack.pop_back();
1246
1247 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1248 // to correctly handle break/continue though.
1249 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1250 bool EmitBoolCondBranch = !C || !C->isZero();
1251
1252 const SourceRange &R = S.getSourceRange();
1253 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1257
1258 // As long as the condition is true, iterate the loop.
1259 if (EmitBoolCondBranch) {
1260 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1261 auto *I = Builder.CreateCondBr(
1262 BoolCondVal, LoopBody, LoopExit.getBlock(),
1263 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1264
1265 // Key Instructions: Emit the condition and branch as separate source
1266 // location atoms otherwise we may omit a step onto the loop condition in
1267 // favour of the closing brace.
1268 // FIXME: We could have the branch as the backup location for the condition,
1269 // which would probably be a better experience (no jumping to the brace).
1270 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1271 addInstToNewSourceAtom(CondI, nullptr);
1272 addInstToNewSourceAtom(I, nullptr);
1273 }
1274
1275 LoopStack.pop();
1276
1277 // Emit the exit block.
1278 EmitBlock(LoopExit.getBlock());
1279
1280 // The DoCond block typically is just a branch if we skipped
1281 // emitting a branch, try to erase it.
1282 if (!EmitBoolCondBranch)
1284
1285 // When single byte coverage mode is enabled, add a counter to continuation
1286 // block.
1289
1290 if (CGM.shouldEmitConvergenceTokens())
1291 ConvergenceTokenStack.pop_back();
1292}
1293
1295 ArrayRef<const Attr *> ForAttrs) {
1297
1298 std::optional<LexicalScope> ForScope;
1300 ForScope.emplace(*this, S.getSourceRange());
1301
1302 // Evaluate the first part before the loop.
1303 if (S.getInit())
1304 EmitStmt(S.getInit());
1305
1306 // Start the loop with a block that tests the condition.
1307 // If there's an increment, the continue scope will be overwritten
1308 // later.
1309 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1310 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1311 EmitBlock(CondBlock);
1312
1313 if (CGM.shouldEmitConvergenceTokens())
1314 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1315
1316 const SourceRange &R = S.getSourceRange();
1317 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1321
1322 // Create a cleanup scope for the condition variable cleanups.
1323 LexicalScope ConditionScope(*this, S.getSourceRange());
1324
1325 // If the for loop doesn't have an increment we can just use the condition as
1326 // the continue block. Otherwise, if there is no condition variable, we can
1327 // form the continue block now. If there is a condition variable, we can't
1328 // form the continue block until after we've emitted the condition, because
1329 // the condition is in scope in the increment, but Sema's jump diagnostics
1330 // ensure that there are no continues from the condition variable that jump
1331 // to the loop increment.
1332 JumpDest Continue;
1333 if (!S.getInc())
1334 Continue = CondDest;
1335 else if (!S.getConditionVariable())
1336 Continue = getJumpDestInCurrentScope("for.inc");
1337 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1338
1339 if (S.getCond()) {
1340 // If the for statement has a condition scope, emit the local variable
1341 // declaration.
1342 if (S.getConditionVariable()) {
1344
1345 // We have entered the condition variable's scope, so we're now able to
1346 // jump to the continue block.
1347 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1348 BreakContinueStack.back().ContinueBlock = Continue;
1349 }
1350
1351 // When single byte coverage mode is enabled, add a counter to loop
1352 // condition.
1355
1356 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1357 // If there are any cleanups between here and the loop-exit scope,
1358 // create a block to stage a loop exit along.
1359 if (ForScope && ForScope->requiresCleanups())
1360 ExitBlock = createBasicBlock("for.cond.cleanup");
1361
1362 // As long as the condition is true, iterate the loop.
1363 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1364
1365 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1366 // compares unequal to 0. The condition must be a scalar type.
1367 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1368
1370
1371 llvm::MDNode *Weights =
1372 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1373 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1374 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1375 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1376
1377 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1378 // Key Instructions: Emit the condition and branch as separate atoms to
1379 // match existing loop stepping behaviour. FIXME: We could have the branch
1380 // as the backup location for the condition, which would probably be a
1381 // better experience (no jumping to the brace).
1382 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1383 addInstToNewSourceAtom(CondI, nullptr);
1384 addInstToNewSourceAtom(I, nullptr);
1385
1386 if (ExitBlock != LoopExit.getBlock()) {
1387 EmitBlock(ExitBlock);
1389 }
1390
1391 EmitBlock(ForBody);
1392 } else {
1393 // Treat it as a non-zero constant. Don't even create a new block for the
1394 // body, just fall into it.
1395 }
1396
1397 // When single byte coverage mode is enabled, add a counter to the body.
1400 else
1402 {
1403 // Create a separate cleanup scope for the body, in case it is not
1404 // a compound statement.
1405 RunCleanupsScope BodyScope(*this);
1406 EmitStmt(S.getBody());
1407 }
1408
1409 // The last block in the loop's body (which unconditionally branches to the
1410 // `inc` block if there is one).
1411 auto *FinalBodyBB = Builder.GetInsertBlock();
1412
1413 // If there is an increment, emit it next.
1414 if (S.getInc()) {
1415 EmitBlock(Continue.getBlock());
1416 EmitStmt(S.getInc());
1419 }
1420
1421 BreakContinueStack.pop_back();
1422
1423 ConditionScope.ForceCleanup();
1424
1425 EmitStopPoint(&S);
1426 EmitBranch(CondBlock);
1427
1428 if (ForScope)
1429 ForScope->ForceCleanup();
1430
1431 LoopStack.pop();
1432
1433 // Emit the fall-through block.
1434 EmitBlock(LoopExit.getBlock(), true);
1435
1436 // When single byte coverage mode is enabled, add a counter to continuation
1437 // block.
1440
1441 if (CGM.shouldEmitConvergenceTokens())
1442 ConvergenceTokenStack.pop_back();
1443
1444 if (FinalBodyBB) {
1445 // Key Instructions: We want the for closing brace to be step-able on to
1446 // match existing behaviour.
1447 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1448 }
1449}
1450
1451void
1453 ArrayRef<const Attr *> ForAttrs) {
1455
1456 LexicalScope ForScope(*this, S.getSourceRange());
1457
1458 // Evaluate the first pieces before the loop.
1459 if (S.getInit())
1460 EmitStmt(S.getInit());
1463 EmitStmt(S.getEndStmt());
1464
1465 // Start the loop with a block that tests the condition.
1466 // If there's an increment, the continue scope will be overwritten
1467 // later.
1468 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1469 EmitBlock(CondBlock);
1470
1471 if (CGM.shouldEmitConvergenceTokens())
1472 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1473
1474 const SourceRange &R = S.getSourceRange();
1475 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1478
1479 // If there are any cleanups between here and the loop-exit scope,
1480 // create a block to stage a loop exit along.
1481 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1482 if (ForScope.requiresCleanups())
1483 ExitBlock = createBasicBlock("for.cond.cleanup");
1484
1485 // The loop body, consisting of the specified body and the loop variable.
1486 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1487
1488 // The body is executed if the expression, contextually converted
1489 // to bool, is true.
1490 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1491 llvm::MDNode *Weights =
1492 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1493 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1494 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1495 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1496 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1497 // Key Instructions: Emit the condition and branch as separate atoms to
1498 // match existing loop stepping behaviour. FIXME: We could have the branch as
1499 // the backup location for the condition, which would probably be a better
1500 // experience.
1501 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1502 addInstToNewSourceAtom(CondI, nullptr);
1503 addInstToNewSourceAtom(I, nullptr);
1504
1505 if (ExitBlock != LoopExit.getBlock()) {
1506 EmitBlock(ExitBlock);
1508 }
1509
1510 EmitBlock(ForBody);
1513 else
1515
1516 // Create a block for the increment. In case of a 'continue', we jump there.
1517 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1518
1519 // Store the blocks to use for break and continue.
1520 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1521
1522 {
1523 // Create a separate cleanup scope for the loop variable and body.
1524 LexicalScope BodyScope(*this, S.getSourceRange());
1526 EmitStmt(S.getBody());
1527 }
1528 // The last block in the loop's body (which unconditionally branches to the
1529 // `inc` block if there is one).
1530 auto *FinalBodyBB = Builder.GetInsertBlock();
1531
1532 EmitStopPoint(&S);
1533 // If there is an increment, emit it next.
1534 EmitBlock(Continue.getBlock());
1535 EmitStmt(S.getInc());
1536
1537 BreakContinueStack.pop_back();
1538
1539 EmitBranch(CondBlock);
1540
1541 ForScope.ForceCleanup();
1542
1543 LoopStack.pop();
1544
1545 // Emit the fall-through block.
1546 EmitBlock(LoopExit.getBlock(), true);
1547
1548 // When single byte coverage mode is enabled, add a counter to continuation
1549 // block.
1552
1553 if (CGM.shouldEmitConvergenceTokens())
1554 ConvergenceTokenStack.pop_back();
1555
1556 if (FinalBodyBB) {
1557 // We want the for closing brace to be step-able on to match existing
1558 // behaviour.
1559 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1560 }
1561}
1562
1563void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1564 if (RV.isScalar()) {
1565 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1566 } else if (RV.isAggregate()) {
1567 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1570 } else {
1572 /*init*/ true);
1573 }
1575}
1576
1577namespace {
1578// RAII struct used to save and restore a return statment's result expression.
1579struct SaveRetExprRAII {
1580 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1581 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1582 CGF.RetExpr = RetExpr;
1583 }
1584 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1585 const Expr *OldRetExpr;
1586 CodeGenFunction &CGF;
1587};
1588} // namespace
1589
1590/// Determine if the given call uses the swiftasync calling convention.
1591static bool isSwiftAsyncCallee(const CallExpr *CE) {
1592 auto calleeQualType = CE->getCallee()->getType();
1593 const FunctionType *calleeType = nullptr;
1594 if (calleeQualType->isFunctionPointerType() ||
1595 calleeQualType->isFunctionReferenceType() ||
1596 calleeQualType->isBlockPointerType() ||
1597 calleeQualType->isMemberFunctionPointerType()) {
1598 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1599 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1600 calleeType = ty;
1601 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1602 if (auto methodDecl = CMCE->getMethodDecl()) {
1603 // getMethodDecl() doesn't handle member pointers at the moment.
1604 calleeType = methodDecl->getType()->castAs<FunctionType>();
1605 } else {
1606 return false;
1607 }
1608 } else {
1609 return false;
1610 }
1611 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1612}
1613
1614/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1615/// if the function returns void, or may be missing one if the function returns
1616/// non-void. Fun stuff :).
1619 if (requiresReturnValueCheck()) {
1620 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1621 auto *SLocPtr =
1622 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1623 llvm::GlobalVariable::PrivateLinkage, SLoc);
1624 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1625 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1626 assert(ReturnLocation.isValid() && "No valid return location");
1627 Builder.CreateStore(SLocPtr, ReturnLocation);
1628 }
1629
1630 // Returning from an outlined SEH helper is UB, and we already warn on it.
1631 if (IsOutlinedSEHHelper) {
1632 Builder.CreateUnreachable();
1633 Builder.ClearInsertionPoint();
1634 }
1635
1636 // Emit the result value, even if unused, to evaluate the side effects.
1637 const Expr *RV = S.getRetValue();
1638
1639 // Record the result expression of the return statement. The recorded
1640 // expression is used to determine whether a block capture's lifetime should
1641 // end at the end of the full expression as opposed to the end of the scope
1642 // enclosing the block expression.
1643 //
1644 // This permits a small, easily-implemented exception to our over-conservative
1645 // rules about not jumping to statements following block literals with
1646 // non-trivial cleanups.
1647 SaveRetExprRAII SaveRetExpr(RV, *this);
1648
1649 RunCleanupsScope cleanupScope(*this);
1650 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1651 RV = EWC->getSubExpr();
1652
1653 // If we're in a swiftasynccall function, and the return expression is a
1654 // call to a swiftasynccall function, mark the call as the musttail call.
1655 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1656 if (RV && CurFnInfo &&
1657 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1658 if (auto CE = dyn_cast<CallExpr>(RV)) {
1659 if (isSwiftAsyncCallee(CE)) {
1660 SaveMustTail.emplace(MustTailCall, CE);
1661 }
1662 }
1663 }
1664
1665 // FIXME: Clean this up by using an LValue for ReturnTemp,
1666 // EmitStoreThroughLValue, and EmitAnyExpr.
1667 // Check if the NRVO candidate was not globalized in OpenMP mode.
1668 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1670 (!getLangOpts().OpenMP ||
1671 !CGM.getOpenMPRuntime()
1672 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1673 .isValid())) {
1674 // Apply the named return value optimization for this return statement,
1675 // which means doing nothing: the appropriate result has already been
1676 // constructed into the NRVO variable.
1677
1678 // If there is an NRVO flag for this variable, set it to 1 into indicate
1679 // that the cleanup code should not destroy the variable.
1680 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1681 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1682 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1683 // Make sure not to return anything, but evaluate the expression
1684 // for side effects.
1685 if (RV) {
1686 EmitAnyExpr(RV);
1687 }
1688 } else if (!RV) {
1689 // Do nothing (return value is left uninitialized)
1690 } else if (FnRetTy->isReferenceType()) {
1691 // If this function returns a reference, take the address of the expression
1692 // rather than the value.
1694 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1695 addInstToCurrentSourceAtom(I, I->getValueOperand());
1696 } else {
1697 switch (getEvaluationKind(RV->getType())) {
1698 case TEK_Scalar: {
1699 llvm::Value *Ret = EmitScalarExpr(RV);
1700 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1702 /*isInit*/ true);
1703 } else {
1704 auto *I = Builder.CreateStore(Ret, ReturnValue);
1705 addInstToCurrentSourceAtom(I, I->getValueOperand());
1706 }
1707 break;
1708 }
1709 case TEK_Complex:
1711 /*isInit*/ true);
1712 break;
1713 case TEK_Aggregate:
1720 break;
1721 }
1722 }
1723
1724 ++NumReturnExprs;
1725 if (!RV || RV->isEvaluatable(getContext()))
1726 ++NumSimpleReturnExprs;
1727
1728 cleanupScope.ForceCleanup();
1730}
1731
1733 // As long as debug info is modeled with instructions, we have to ensure we
1734 // have a place to insert here and write the stop point here.
1735 if (HaveInsertPoint())
1736 EmitStopPoint(&S);
1737
1738 for (const auto *I : S.decls())
1739 EmitDecl(*I, /*EvaluateConditionDecl=*/true);
1740}
1741
1743 -> const BreakContinue * {
1744 if (!S.hasLabelTarget())
1745 return &BreakContinueStack.back();
1746
1747 const Stmt *LoopOrSwitch = S.getNamedLoopOrSwitch();
1748 assert(LoopOrSwitch && "break/continue target not set?");
1749 for (const BreakContinue &BC : llvm::reverse(BreakContinueStack))
1750 if (BC.LoopOrSwitch == LoopOrSwitch)
1751 return &BC;
1752
1753 llvm_unreachable("break/continue target not found");
1754}
1755
1757 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1758
1759 // If this code is reachable then emit a stop point (if generating
1760 // debug info). We have to do this ourselves because we are on the
1761 // "simple" statement path.
1762 if (HaveInsertPoint())
1763 EmitStopPoint(&S);
1764
1767}
1768
1770 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1771
1772 // If this code is reachable then emit a stop point (if generating
1773 // debug info). We have to do this ourselves because we are on the
1774 // "simple" statement path.
1775 if (HaveInsertPoint())
1776 EmitStopPoint(&S);
1777
1780}
1781
1782/// EmitCaseStmtRange - If case statement range is not too big then
1783/// add multiple cases to switch instruction, one for each value within
1784/// the range. If range is too big then emit "if" condition check.
1786 ArrayRef<const Attr *> Attrs) {
1787 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1788
1789 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1790 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1791
1792 // Emit the code for this case. We do this first to make sure it is
1793 // properly chained from our predecessor before generating the
1794 // switch machinery to enter this block.
1795 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1796 EmitBlockWithFallThrough(CaseDest, &S);
1797 EmitStmt(S.getSubStmt());
1798
1799 // If range is empty, do nothing.
1800 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1801 return;
1802
1804 llvm::APInt Range = RHS - LHS;
1805 // FIXME: parameters such as this should not be hardcoded.
1806 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1807 // Range is small enough to add multiple switch instruction cases.
1808 uint64_t Total = getProfileCount(&S);
1809 unsigned NCases = Range.getZExtValue() + 1;
1810 // We only have one region counter for the entire set of cases here, so we
1811 // need to divide the weights evenly between the generated cases, ensuring
1812 // that the total weight is preserved. E.g., a weight of 5 over three cases
1813 // will be distributed as weights of 2, 2, and 1.
1814 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1815 for (unsigned I = 0; I != NCases; ++I) {
1816 if (SwitchWeights)
1817 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1818 else if (SwitchLikelihood)
1819 SwitchLikelihood->push_back(LH);
1820
1821 if (Rem)
1822 Rem--;
1823 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1824 ++LHS;
1825 }
1826 return;
1827 }
1828
1829 // The range is too big. Emit "if" condition into a new block,
1830 // making sure to save and restore the current insertion point.
1831 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1832
1833 // Push this test onto the chain of range checks (which terminates
1834 // in the default basic block). The switch's default will be changed
1835 // to the top of this chain after switch emission is complete.
1836 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1837 CaseRangeBlock = createBasicBlock("sw.caserange");
1838
1839 CurFn->insert(CurFn->end(), CaseRangeBlock);
1840 Builder.SetInsertPoint(CaseRangeBlock);
1841
1842 // Emit range check.
1843 llvm::Value *Diff =
1844 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1845 llvm::Value *Cond =
1846 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1847
1848 llvm::MDNode *Weights = nullptr;
1849 if (SwitchWeights) {
1850 uint64_t ThisCount = getProfileCount(&S);
1851 uint64_t DefaultCount = (*SwitchWeights)[0];
1852 Weights = createProfileWeights(ThisCount, DefaultCount);
1853
1854 // Since we're chaining the switch default through each large case range, we
1855 // need to update the weight for the default, ie, the first case, to include
1856 // this case.
1857 (*SwitchWeights)[0] += ThisCount;
1858 } else if (SwitchLikelihood)
1859 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1860
1861 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1862
1863 // Restore the appropriate insertion point.
1864 if (RestoreBB)
1865 Builder.SetInsertPoint(RestoreBB);
1866 else
1867 Builder.ClearInsertionPoint();
1868}
1869
1871 ArrayRef<const Attr *> Attrs) {
1872 // If there is no enclosing switch instance that we're aware of, then this
1873 // case statement and its block can be elided. This situation only happens
1874 // when we've constant-folded the switch, are emitting the constant case,
1875 // and part of the constant case includes another case statement. For
1876 // instance: switch (4) { case 4: do { case 5: } while (1); }
1877 if (!SwitchInsn) {
1878 EmitStmt(S.getSubStmt());
1879 return;
1880 }
1881
1882 // Handle case ranges.
1883 if (S.getRHS()) {
1884 EmitCaseStmtRange(S, Attrs);
1885 return;
1886 }
1887
1888 llvm::ConstantInt *CaseVal =
1890
1891 // Emit debuginfo for the case value if it is an enum value.
1892 const ConstantExpr *CE;
1893 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1894 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1895 else
1896 CE = dyn_cast<ConstantExpr>(S.getLHS());
1897 if (CE) {
1898 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1899 if (CGDebugInfo *Dbg = getDebugInfo())
1900 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1901 Dbg->EmitGlobalVariable(DE->getDecl(),
1902 APValue(llvm::APSInt(CaseVal->getValue())));
1903 }
1904
1905 if (SwitchLikelihood)
1906 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1907
1908 // If the body of the case is just a 'break', try to not emit an empty block.
1909 // If we're profiling or we're not optimizing, leave the block in for better
1910 // debug and coverage analysis.
1911 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1912 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1914 JumpDest Block = BreakContinueStack.back().BreakBlock;
1915
1916 // Only do this optimization if there are no cleanups that need emitting.
1918 if (SwitchWeights)
1919 SwitchWeights->push_back(getProfileCount(&S));
1920 SwitchInsn->addCase(CaseVal, Block.getBlock());
1921
1922 // If there was a fallthrough into this case, make sure to redirect it to
1923 // the end of the switch as well.
1924 if (Builder.GetInsertBlock()) {
1925 Builder.CreateBr(Block.getBlock());
1926 Builder.ClearInsertionPoint();
1927 }
1928 return;
1929 }
1930 }
1931
1932 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1933 EmitBlockWithFallThrough(CaseDest, &S);
1934 if (SwitchWeights)
1935 SwitchWeights->push_back(getProfileCount(&S));
1936 SwitchInsn->addCase(CaseVal, CaseDest);
1937
1938 // Recursively emitting the statement is acceptable, but is not wonderful for
1939 // code where we have many case statements nested together, i.e.:
1940 // case 1:
1941 // case 2:
1942 // case 3: etc.
1943 // Handling this recursively will create a new block for each case statement
1944 // that falls through to the next case which is IR intensive. It also causes
1945 // deep recursion which can run into stack depth limitations. Handle
1946 // sequential non-range case statements specially.
1947 //
1948 // TODO When the next case has a likelihood attribute the code returns to the
1949 // recursive algorithm. Maybe improve this case if it becomes common practice
1950 // to use a lot of attributes.
1951 const CaseStmt *CurCase = &S;
1952 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1953
1954 // Otherwise, iteratively add consecutive cases to this switch stmt.
1955 while (NextCase && NextCase->getRHS() == nullptr) {
1956 CurCase = NextCase;
1957 llvm::ConstantInt *CaseVal =
1958 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1959
1960 if (SwitchWeights)
1961 SwitchWeights->push_back(getProfileCount(NextCase));
1962 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1963 CaseDest = createBasicBlock("sw.bb");
1964 EmitBlockWithFallThrough(CaseDest, CurCase);
1965 }
1966 // Since this loop is only executed when the CaseStmt has no attributes
1967 // use a hard-coded value.
1968 if (SwitchLikelihood)
1969 SwitchLikelihood->push_back(Stmt::LH_None);
1970
1971 SwitchInsn->addCase(CaseVal, CaseDest);
1972 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1973 }
1974
1975 // Generate a stop point for debug info if the case statement is
1976 // followed by a default statement. A fallthrough case before a
1977 // default case gets its own branch target.
1978 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1979 EmitStopPoint(CurCase);
1980
1981 // Normal default recursion for non-cases.
1982 EmitStmt(CurCase->getSubStmt());
1983}
1984
1986 ArrayRef<const Attr *> Attrs) {
1987 // If there is no enclosing switch instance that we're aware of, then this
1988 // default statement can be elided. This situation only happens when we've
1989 // constant-folded the switch.
1990 if (!SwitchInsn) {
1991 EmitStmt(S.getSubStmt());
1992 return;
1993 }
1994
1995 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1996 assert(DefaultBlock->empty() &&
1997 "EmitDefaultStmt: Default block already defined?");
1998
1999 if (SwitchLikelihood)
2000 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
2001
2002 EmitBlockWithFallThrough(DefaultBlock, &S);
2003
2004 EmitStmt(S.getSubStmt());
2005}
2006
2007namespace {
2008struct EmitDeferredStatement final : EHScopeStack::Cleanup {
2009 const DeferStmt &Stmt;
2010 EmitDeferredStatement(const DeferStmt *Stmt) : Stmt(*Stmt) {}
2011
2012 void Emit(CodeGenFunction &CGF, Flags) override {
2013 // Take care that any cleanups pushed by the body of a '_Defer' statement
2014 // don't clobber the current cleanup slot value.
2015 //
2016 // Assume we have a scope that pushes a cleanup; when that scope is exited,
2017 // we need to run that cleanup; this is accomplished by emitting the cleanup
2018 // into a separate block and then branching to that block at scope exit.
2019 //
2020 // Where this gets complicated is if we exit the scope in multiple different
2021 // ways; e.g. in a 'for' loop, we may exit the scope of its body by falling
2022 // off the end (in which case we need to run the cleanup and then branch to
2023 // the increment), or by 'break'ing out of the loop (in which case we need
2024 // to run the cleanup and then branch to the loop exit block); in both cases
2025 // we first branch to the cleanup block to run the cleanup, but the block we
2026 // need to jump to *after* running the cleanup is different.
2027 //
2028 // This is accomplished using a local integer variable called the 'cleanup
2029 // slot': before branching to the cleanup block, we store a value into that
2030 // slot. Then, in the cleanup block, after running the cleanup, we load the
2031 // value of that variable and 'switch' on it to branch to the appropriate
2032 // continuation block.
2033 //
2034 // The problem that arises once '_Defer' statements are involved is that the
2035 // body of a '_Defer' is an arbitrary statement which itself can create more
2036 // cleanups. This means we may end up overwriting the cleanup slot before we
2037 // ever have a chance to 'switch' on it, which means that once we *do* get
2038 // to the 'switch', we end up in whatever block the cleanup code happened to
2039 // pick as the default 'switch' exit label!
2040 //
2041 // That is, what is normally supposed to happen is something like:
2042 //
2043 // 1. Store 'X' to cleanup slot.
2044 // 2. Branch to cleanup block.
2045 // 3. Execute cleanup.
2046 // 4. Read value from cleanup slot.
2047 // 5. Branch to the block associated with 'X'.
2048 //
2049 // But if we encounter a _Defer' statement that contains a cleanup, then
2050 // what might instead happen is:
2051 //
2052 // 1. Store 'X' to cleanup slot.
2053 // 2. Branch to cleanup block.
2054 // 3. Execute cleanup; this ends up pushing another cleanup, so:
2055 // 3a. Store 'Y' to cleanup slot.
2056 // 3b. Run steps 2–5 recursively.
2057 // 4. Read value from cleanup slot, which is now 'Y' instead of 'X'.
2058 // 5. Branch to the block associated with 'Y'... which doesn't even
2059 // exist because the value 'Y' is only meaningful for the inner
2060 // cleanup. The result is we just branch 'somewhere random'.
2061 //
2062 // The rest of the cleanup code simply isn't prepared to handle this case
2063 // because most other cleanups can't push more cleanups, and thus, emitting
2064 // other cleanups generally cannot clobber the cleanup slot.
2065 //
2066 // To prevent this from happening, save the current cleanup slot value and
2067 // restore it after emitting the '_Defer' statement.
2068 llvm::Value *SavedCleanupDest = nullptr;
2069 if (CGF.NormalCleanupDest.isValid())
2070 SavedCleanupDest =
2071 CGF.Builder.CreateLoad(CGF.NormalCleanupDest, "cleanup.dest.saved");
2072
2073 CGF.EmitStmt(Stmt.getBody());
2074
2075 if (SavedCleanupDest && CGF.HaveInsertPoint())
2076 CGF.Builder.CreateStore(SavedCleanupDest, CGF.NormalCleanupDest);
2077
2078 // Cleanups must end with an insert point.
2079 CGF.EnsureInsertPoint();
2080 }
2081};
2082} // namespace
2083
2085 EHStack.pushCleanup<EmitDeferredStatement>(NormalAndEHCleanup, &S);
2086}
2087
2088/// CollectStatementsForCase - Given the body of a 'switch' statement and a
2089/// constant value that is being switched on, see if we can dead code eliminate
2090/// the body of the switch to a simple series of statements to emit. Basically,
2091/// on a switch (5) we want to find these statements:
2092/// case 5:
2093/// printf(...); <--
2094/// ++i; <--
2095/// break;
2096///
2097/// and add them to the ResultStmts vector. If it is unsafe to do this
2098/// transformation (for example, one of the elided statements contains a label
2099/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
2100/// should include statements after it (e.g. the printf() line is a substmt of
2101/// the case) then return CSFC_FallThrough. If we handled it and found a break
2102/// statement, then return CSFC_Success.
2103///
2104/// If Case is non-null, then we are looking for the specified case, checking
2105/// that nothing we jump over contains labels. If Case is null, then we found
2106/// the case and are looking for the break.
2107///
2108/// If the recursive walk actually finds our Case, then we set FoundCase to
2109/// true.
2110///
2113 const SwitchCase *Case,
2114 bool &FoundCase,
2115 SmallVectorImpl<const Stmt*> &ResultStmts) {
2116 // If this is a null statement, just succeed.
2117 if (!S)
2118 return Case ? CSFC_Success : CSFC_FallThrough;
2119
2120 // If this is the switchcase (case 4: or default) that we're looking for, then
2121 // we're in business. Just add the substatement.
2122 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
2123 if (S == Case) {
2124 FoundCase = true;
2125 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
2126 ResultStmts);
2127 }
2128
2129 // Otherwise, this is some other case or default statement, just ignore it.
2130 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
2131 ResultStmts);
2132 }
2133
2134 // If we are in the live part of the code and we found our break statement,
2135 // return a success!
2136 if (!Case && isa<BreakStmt>(S))
2137 return CSFC_Success;
2138
2139 // If this is a switch statement, then it might contain the SwitchCase, the
2140 // break, or neither.
2141 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
2142 // Handle this as two cases: we might be looking for the SwitchCase (if so
2143 // the skipped statements must be skippable) or we might already have it.
2144 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2145 bool StartedInLiveCode = FoundCase;
2146 unsigned StartSize = ResultStmts.size();
2147
2148 // If we've not found the case yet, scan through looking for it.
2149 if (Case) {
2150 // Keep track of whether we see a skipped declaration. The code could be
2151 // using the declaration even if it is skipped, so we can't optimize out
2152 // the decl if the kept statements might refer to it.
2153 bool HadSkippedDecl = false;
2154
2155 // If we're looking for the case, just see if we can skip each of the
2156 // substatements.
2157 for (; Case && I != E; ++I) {
2158 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
2159
2160 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
2161 case CSFC_Failure: return CSFC_Failure;
2162 case CSFC_Success:
2163 // A successful result means that either 1) that the statement doesn't
2164 // have the case and is skippable, or 2) does contain the case value
2165 // and also contains the break to exit the switch. In the later case,
2166 // we just verify the rest of the statements are elidable.
2167 if (FoundCase) {
2168 // If we found the case and skipped declarations, we can't do the
2169 // optimization.
2170 if (HadSkippedDecl)
2171 return CSFC_Failure;
2172
2173 for (++I; I != E; ++I)
2174 if (CodeGenFunction::ContainsLabel(*I, true))
2175 return CSFC_Failure;
2176 return CSFC_Success;
2177 }
2178 break;
2179 case CSFC_FallThrough:
2180 // If we have a fallthrough condition, then we must have found the
2181 // case started to include statements. Consider the rest of the
2182 // statements in the compound statement as candidates for inclusion.
2183 assert(FoundCase && "Didn't find case but returned fallthrough?");
2184 // We recursively found Case, so we're not looking for it anymore.
2185 Case = nullptr;
2186
2187 // If we found the case and skipped declarations, we can't do the
2188 // optimization.
2189 if (HadSkippedDecl)
2190 return CSFC_Failure;
2191 break;
2192 }
2193 }
2194
2195 if (!FoundCase)
2196 return CSFC_Success;
2197
2198 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2199 }
2200
2201 // If we have statements in our range, then we know that the statements are
2202 // live and need to be added to the set of statements we're tracking.
2203 bool AnyDecls = false;
2204 for (; I != E; ++I) {
2206
2207 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2208 case CSFC_Failure: return CSFC_Failure;
2209 case CSFC_FallThrough:
2210 // A fallthrough result means that the statement was simple and just
2211 // included in ResultStmt, keep adding them afterwards.
2212 break;
2213 case CSFC_Success:
2214 // A successful result means that we found the break statement and
2215 // stopped statement inclusion. We just ensure that any leftover stmts
2216 // are skippable and return success ourselves.
2217 for (++I; I != E; ++I)
2218 if (CodeGenFunction::ContainsLabel(*I, true))
2219 return CSFC_Failure;
2220 return CSFC_Success;
2221 }
2222 }
2223
2224 // If we're about to fall out of a scope without hitting a 'break;', we
2225 // can't perform the optimization if there were any decls in that scope
2226 // (we'd lose their end-of-lifetime).
2227 if (AnyDecls) {
2228 // If the entire compound statement was live, there's one more thing we
2229 // can try before giving up: emit the whole thing as a single statement.
2230 // We can do that unless the statement contains a 'break;'.
2231 // FIXME: Such a break must be at the end of a construct within this one.
2232 // We could emit this by just ignoring the BreakStmts entirely.
2233 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2234 ResultStmts.resize(StartSize);
2235 ResultStmts.push_back(S);
2236 } else {
2237 return CSFC_Failure;
2238 }
2239 }
2240
2241 return CSFC_FallThrough;
2242 }
2243
2244 // Okay, this is some other statement that we don't handle explicitly, like a
2245 // for statement or increment etc. If we are skipping over this statement,
2246 // just verify it doesn't have labels, which would make it invalid to elide.
2247 if (Case) {
2248 if (CodeGenFunction::ContainsLabel(S, true))
2249 return CSFC_Failure;
2250 return CSFC_Success;
2251 }
2252
2253 // Otherwise, we want to include this statement. Everything is cool with that
2254 // so long as it doesn't contain a break out of the switch we're in.
2256
2257 // Otherwise, everything is great. Include the statement and tell the caller
2258 // that we fall through and include the next statement as well.
2259 ResultStmts.push_back(S);
2260 return CSFC_FallThrough;
2261}
2262
2263/// FindCaseStatementsForValue - Find the case statement being jumped to and
2264/// then invoke CollectStatementsForCase to find the list of statements to emit
2265/// for a switch on constant. See the comment above CollectStatementsForCase
2266/// for more details.
2268 const llvm::APSInt &ConstantCondValue,
2269 SmallVectorImpl<const Stmt*> &ResultStmts,
2270 ASTContext &C,
2271 const SwitchCase *&ResultCase) {
2272 // First step, find the switch case that is being branched to. We can do this
2273 // efficiently by scanning the SwitchCase list.
2274 const SwitchCase *Case = S.getSwitchCaseList();
2275 const DefaultStmt *DefaultCase = nullptr;
2276
2277 for (; Case; Case = Case->getNextSwitchCase()) {
2278 // It's either a default or case. Just remember the default statement in
2279 // case we're not jumping to any numbered cases.
2280 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2281 DefaultCase = DS;
2282 continue;
2283 }
2284
2285 // Check to see if this case is the one we're looking for.
2286 const CaseStmt *CS = cast<CaseStmt>(Case);
2287 // Don't handle case ranges yet.
2288 if (CS->getRHS()) return false;
2289
2290 // If we found our case, remember it as 'case'.
2291 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2292 break;
2293 }
2294
2295 // If we didn't find a matching case, we use a default if it exists, or we
2296 // elide the whole switch body!
2297 if (!Case) {
2298 // It is safe to elide the body of the switch if it doesn't contain labels
2299 // etc. If it is safe, return successfully with an empty ResultStmts list.
2300 if (!DefaultCase)
2302 Case = DefaultCase;
2303 }
2304
2305 // Ok, we know which case is being jumped to, try to collect all the
2306 // statements that follow it. This can fail for a variety of reasons. Also,
2307 // check to see that the recursive walk actually found our case statement.
2308 // Insane cases like this can fail to find it in the recursive walk since we
2309 // don't handle every stmt kind:
2310 // switch (4) {
2311 // while (1) {
2312 // case 4: ...
2313 bool FoundCase = false;
2314 ResultCase = Case;
2315 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2316 ResultStmts) != CSFC_Failure &&
2317 FoundCase;
2318}
2319
2320static std::optional<SmallVector<uint64_t, 16>>
2322 // Are there enough branches to weight them?
2323 if (Likelihoods.size() <= 1)
2324 return std::nullopt;
2325
2326 uint64_t NumUnlikely = 0;
2327 uint64_t NumNone = 0;
2328 uint64_t NumLikely = 0;
2329 for (const auto LH : Likelihoods) {
2330 switch (LH) {
2331 case Stmt::LH_Unlikely:
2332 ++NumUnlikely;
2333 break;
2334 case Stmt::LH_None:
2335 ++NumNone;
2336 break;
2337 case Stmt::LH_Likely:
2338 ++NumLikely;
2339 break;
2340 }
2341 }
2342
2343 // Is there a likelihood attribute used?
2344 if (NumUnlikely == 0 && NumLikely == 0)
2345 return std::nullopt;
2346
2347 // When multiple cases share the same code they can be combined during
2348 // optimization. In that case the weights of the branch will be the sum of
2349 // the individual weights. Make sure the combined sum of all neutral cases
2350 // doesn't exceed the value of a single likely attribute.
2351 // The additions both avoid divisions by 0 and make sure the weights of None
2352 // don't exceed the weight of Likely.
2353 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2354 const uint64_t None = Likely / (NumNone + 1);
2355 const uint64_t Unlikely = 0;
2356
2358 Result.reserve(Likelihoods.size());
2359 for (const auto LH : Likelihoods) {
2360 switch (LH) {
2361 case Stmt::LH_Unlikely:
2362 Result.push_back(Unlikely);
2363 break;
2364 case Stmt::LH_None:
2365 Result.push_back(None);
2366 break;
2367 case Stmt::LH_Likely:
2368 Result.push_back(Likely);
2369 break;
2370 }
2371 }
2372
2373 return Result;
2374}
2375
2377 // Handle nested switch statements.
2378 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2379 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2380 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2381 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2382
2383 // See if we can constant fold the condition of the switch and therefore only
2384 // emit the live case statement (if any) of the switch.
2385 llvm::APSInt ConstantCondValue;
2386 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2388 const SwitchCase *Case = nullptr;
2389 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2390 getContext(), Case)) {
2391 if (Case)
2393 RunCleanupsScope ExecutedScope(*this);
2394
2395 if (S.getInit())
2396 EmitStmt(S.getInit());
2397
2398 // Emit the condition variable if needed inside the entire cleanup scope
2399 // used by this special case for constant folded switches.
2400 if (S.getConditionVariable())
2401 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2402
2403 // At this point, we are no longer "within" a switch instance, so
2404 // we can temporarily enforce this to ensure that any embedded case
2405 // statements are not emitted.
2406 SwitchInsn = nullptr;
2407
2408 // Okay, we can dead code eliminate everything except this case. Emit the
2409 // specified series of statements and we're good.
2410 for (const Stmt *CaseStmt : CaseStmts)
2413 PGO->markStmtMaybeUsed(S.getBody());
2414
2415 // Now we want to restore the saved switch instance so that nested
2416 // switches continue to function properly
2417 SwitchInsn = SavedSwitchInsn;
2418
2419 return;
2420 }
2421 }
2422
2423 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2424
2425 RunCleanupsScope ConditionScope(*this);
2426
2427 if (S.getInit())
2428 EmitStmt(S.getInit());
2429
2430 if (S.getConditionVariable())
2432 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2434
2435 // Create basic block to hold stuff that comes after switch
2436 // statement. We also need to create a default block now so that
2437 // explicit case ranges tests can have a place to jump to on
2438 // failure.
2439 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2440 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2441 addInstToNewSourceAtom(SwitchInsn, CondV);
2442
2443 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2444 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2445 llvm::ConstantInt *BranchHintConstant =
2447 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2448 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2449 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2450 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"),
2451 MDHelper.createConstant(BranchHintConstant)};
2452 SwitchInsn->setMetadata("hlsl.controlflow.hint",
2453 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2454 }
2455
2456 if (PGO->haveRegionCounts()) {
2457 // Walk the SwitchCase list to find how many there are.
2458 uint64_t DefaultCount = 0;
2459 unsigned NumCases = 0;
2460 for (const SwitchCase *Case = S.getSwitchCaseList();
2461 Case;
2462 Case = Case->getNextSwitchCase()) {
2463 if (isa<DefaultStmt>(Case))
2464 DefaultCount = getProfileCount(Case);
2465 NumCases += 1;
2466 }
2467 SwitchWeights = new SmallVector<uint64_t, 16>();
2468 SwitchWeights->reserve(NumCases);
2469 // The default needs to be first. We store the edge count, so we already
2470 // know the right weight.
2471 SwitchWeights->push_back(DefaultCount);
2472 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2473 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2474 // Initialize the default case.
2475 SwitchLikelihood->push_back(Stmt::LH_None);
2476 }
2477
2478 CaseRangeBlock = DefaultBlock;
2479
2480 // Clear the insertion point to indicate we are in unreachable code.
2481 Builder.ClearInsertionPoint();
2482
2483 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2484 // then reuse last ContinueBlock.
2485 JumpDest OuterContinue;
2486 if (!BreakContinueStack.empty())
2487 OuterContinue = BreakContinueStack.back().ContinueBlock;
2488
2489 BreakContinueStack.push_back(BreakContinue(S, SwitchExit, OuterContinue));
2490
2491 // Emit switch body.
2492 EmitStmt(S.getBody());
2493
2494 BreakContinueStack.pop_back();
2495
2496 // Update the default block in case explicit case range tests have
2497 // been chained on top.
2498 SwitchInsn->setDefaultDest(CaseRangeBlock);
2499
2500 // If a default was never emitted:
2501 if (!DefaultBlock->getParent()) {
2502 // If we have cleanups, emit the default block so that there's a
2503 // place to jump through the cleanups from.
2504 if (ConditionScope.requiresCleanups()) {
2505 EmitBlock(DefaultBlock);
2506
2507 // Otherwise, just forward the default block to the switch end.
2508 } else {
2509 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2510 delete DefaultBlock;
2511 }
2512 }
2513
2514 ConditionScope.ForceCleanup();
2515
2516 // Emit continuation.
2517 EmitBlock(SwitchExit.getBlock(), true);
2519
2520 // If the switch has a condition wrapped by __builtin_unpredictable,
2521 // create metadata that specifies that the switch is unpredictable.
2522 // Don't bother if not optimizing because that metadata would not be used.
2523 auto *Call = dyn_cast<CallExpr>(S.getCond());
2524 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2525 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2526 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2527 llvm::MDBuilder MDHelper(getLLVMContext());
2528 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2529 MDHelper.createUnpredictable());
2530 }
2531 }
2532
2533 if (SwitchWeights) {
2534 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2535 "switch weights do not match switch cases");
2536 // If there's only one jump destination there's no sense weighting it.
2537 if (SwitchWeights->size() > 1)
2538 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2539 createProfileWeights(*SwitchWeights));
2540 delete SwitchWeights;
2541 } else if (SwitchLikelihood) {
2542 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2543 "switch likelihoods do not match switch cases");
2544 std::optional<SmallVector<uint64_t, 16>> LHW =
2545 getLikelihoodWeights(*SwitchLikelihood);
2546 if (LHW) {
2547 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2548 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2549 createProfileWeights(*LHW));
2550 }
2551 delete SwitchLikelihood;
2552 }
2553 SwitchInsn = SavedSwitchInsn;
2554 SwitchWeights = SavedSwitchWeights;
2555 SwitchLikelihood = SavedSwitchLikelihood;
2556 CaseRangeBlock = SavedCRBlock;
2557}
2558
2559/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2560/// as using a particular register add that as a constraint that will be used
2561/// in this asm stmt.
2562static std::string
2563AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2565 const AsmStmt &Stmt, const bool EarlyClobber,
2566 std::string *GCCReg = nullptr) {
2567 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2568 if (!AsmDeclRef)
2569 return Constraint;
2570 const ValueDecl &Value = *AsmDeclRef->getDecl();
2571 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2572 if (!Variable)
2573 return Constraint;
2574 if (Variable->getStorageClass() != SC_Register)
2575 return Constraint;
2576 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2577 if (!Attr)
2578 return Constraint;
2579 StringRef Register = Attr->getLabel();
2580 assert(Target.isValidGCCRegisterName(Register));
2581 // We're using validateOutputConstraint here because we only care if
2582 // this is a register constraint.
2583 TargetInfo::ConstraintInfo Info(Constraint, "");
2584 if (Target.validateOutputConstraint(Info) &&
2585 !Info.allowsRegister()) {
2586 CGM.ErrorUnsupported(&Stmt, "__asm__");
2587 return Constraint;
2588 }
2589 // Canonicalize the register here before returning it.
2590 Register = Target.getNormalizedGCCRegisterName(Register);
2591 if (GCCReg != nullptr)
2592 *GCCReg = Register.str();
2593 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2594}
2595
2596std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2597 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2598 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2599 if (Info.allowsRegister() || !Info.allowsMemory()) {
2601 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2602
2603 llvm::Type *Ty = ConvertType(InputType);
2604 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2605 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2606 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2607 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2608
2609 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2610 nullptr};
2611 }
2612 }
2613
2614 Address Addr = InputValue.getAddress();
2615 ConstraintStr += '*';
2616 return {InputValue.getPointer(*this), Addr.getElementType()};
2617}
2618
2619std::pair<llvm::Value *, llvm::Type *>
2620CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2621 const Expr *InputExpr,
2622 std::string &ConstraintStr) {
2623 // If this can't be a register or memory, i.e., has to be a constant
2624 // (immediate or symbolic), try to emit it as such.
2625 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2626 if (Info.requiresImmediateConstant()) {
2627 Expr::EvalResult EVResult;
2628 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2629
2630 llvm::APSInt IntResult;
2631 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2632 getContext()))
2633 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2634 }
2635
2636 Expr::EvalResult Result;
2637 if (InputExpr->EvaluateAsInt(Result, getContext()))
2638 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2639 nullptr};
2640 }
2641
2642 if (Info.allowsRegister() || !Info.allowsMemory())
2644 return {EmitScalarExpr(InputExpr), nullptr};
2645 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2646 return {EmitScalarExpr(InputExpr), nullptr};
2647 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2648 LValue Dest = EmitLValue(InputExpr);
2649 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2650 InputExpr->getExprLoc());
2651}
2652
2653/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2654/// asm call instruction. The !srcloc MDNode contains a list of constant
2655/// integers which are the source locations of the start of each line in the
2656/// asm.
2657static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2658 CodeGenFunction &CGF) {
2660 // Add the location of the first line to the MDNode.
2661 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2662 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2663 StringRef StrVal = Str->getString();
2664 if (!StrVal.empty()) {
2666 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2667 unsigned StartToken = 0;
2668 unsigned ByteOffset = 0;
2669
2670 // Add the location of the start of each subsequent line of the asm to the
2671 // MDNode.
2672 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2673 if (StrVal[i] != '\n') continue;
2674 SourceLocation LineLoc = Str->getLocationOfByte(
2675 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2676 Locs.push_back(llvm::ConstantAsMetadata::get(
2677 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2678 }
2679 }
2680
2681 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2682}
2683
2684static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2685 bool HasUnwindClobber, bool ReadOnly,
2686 bool ReadNone, bool NoMerge, bool NoConvergent,
2687 const AsmStmt &S,
2688 const std::vector<llvm::Type *> &ResultRegTypes,
2689 const std::vector<llvm::Type *> &ArgElemTypes,
2690 CodeGenFunction &CGF,
2691 std::vector<llvm::Value *> &RegResults) {
2692 if (!HasUnwindClobber)
2693 Result.addFnAttr(llvm::Attribute::NoUnwind);
2694
2695 if (NoMerge)
2696 Result.addFnAttr(llvm::Attribute::NoMerge);
2697 // Attach readnone and readonly attributes.
2698 if (!HasSideEffect) {
2699 if (ReadNone)
2700 Result.setDoesNotAccessMemory();
2701 else if (ReadOnly)
2702 Result.setOnlyReadsMemory();
2703 }
2704
2705 // Add elementtype attribute for indirect constraints.
2706 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2707 if (Pair.value()) {
2708 auto Attr = llvm::Attribute::get(
2709 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2710 Result.addParamAttr(Pair.index(), Attr);
2711 }
2712 }
2713
2714 // Slap the source location of the inline asm into a !srcloc metadata on the
2715 // call.
2716 const StringLiteral *SL;
2717 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S);
2718 gccAsmStmt &&
2719 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) {
2720 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF));
2721 } else {
2722 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2723 // strings.
2724 llvm::Constant *Loc =
2725 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2726 Result.setMetadata("srcloc",
2727 llvm::MDNode::get(CGF.getLLVMContext(),
2728 llvm::ConstantAsMetadata::get(Loc)));
2729 }
2730
2731 // Make inline-asm calls Key for the debug info feature Key Instructions.
2732 CGF.addInstToNewSourceAtom(&Result, nullptr);
2733
2734 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2735 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2736 // convergent (meaning, they may call an intrinsically convergent op, such
2737 // as bar.sync, and so can't have certain optimizations applied around
2738 // them) unless it's explicitly marked 'noconvergent'.
2739 Result.addFnAttr(llvm::Attribute::Convergent);
2740 // Extract all of the register value results from the asm.
2741 if (ResultRegTypes.size() == 1) {
2742 RegResults.push_back(&Result);
2743 } else {
2744 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2745 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2746 RegResults.push_back(Tmp);
2747 }
2748 }
2749}
2750
2751static void
2753 const llvm::ArrayRef<llvm::Value *> RegResults,
2754 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2755 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2756 const llvm::ArrayRef<LValue> ResultRegDests,
2757 const llvm::ArrayRef<QualType> ResultRegQualTys,
2758 const llvm::BitVector &ResultTypeRequiresCast,
2759 const std::vector<std::optional<std::pair<unsigned, unsigned>>>
2760 &ResultBounds) {
2761 CGBuilderTy &Builder = CGF.Builder;
2762 CodeGenModule &CGM = CGF.CGM;
2763 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2764
2765 assert(RegResults.size() == ResultRegTypes.size());
2766 assert(RegResults.size() == ResultTruncRegTypes.size());
2767 assert(RegResults.size() == ResultRegDests.size());
2768 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2769 // in which case its size may grow.
2770 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2771 assert(ResultBounds.size() <= ResultRegDests.size());
2772
2773 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2774 llvm::Value *Tmp = RegResults[i];
2775 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2776
2777 if ((i < ResultBounds.size()) && ResultBounds[i].has_value()) {
2778 const auto [LowerBound, UpperBound] = ResultBounds[i].value();
2779 // FIXME: Support for nonzero lower bounds not yet implemented.
2780 assert(LowerBound == 0 && "Output operand lower bound is not zero.");
2781 llvm::Constant *UpperBoundConst =
2782 llvm::ConstantInt::get(Tmp->getType(), UpperBound);
2783 llvm::Value *IsBooleanValue =
2784 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, UpperBoundConst);
2785 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2786 Builder.CreateCall(FnAssume, IsBooleanValue);
2787 }
2788
2789 // If the result type of the LLVM IR asm doesn't match the result type of
2790 // the expression, do the conversion.
2791 if (ResultRegTypes[i] != TruncTy) {
2792
2793 // Truncate the integer result to the right size, note that TruncTy can be
2794 // a pointer.
2795 if (TruncTy->isFloatingPointTy())
2796 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2797 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2798 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2799 Tmp = Builder.CreateTrunc(
2800 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2801 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2802 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2803 uint64_t TmpSize =
2804 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2805 Tmp = Builder.CreatePtrToInt(
2806 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2807 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2808 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2809 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2810 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2811 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2812 }
2813 }
2814
2815 ApplyAtomGroup Grp(CGF.getDebugInfo());
2816 LValue Dest = ResultRegDests[i];
2817 // ResultTypeRequiresCast elements correspond to the first
2818 // ResultTypeRequiresCast.size() elements of RegResults.
2819 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2820 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2821 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2822 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2823 llvm::StoreInst *S = Builder.CreateStore(Tmp, A);
2824 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand());
2825 continue;
2826 }
2827
2828 QualType Ty =
2829 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2830 if (Ty.isNull()) {
2831 const Expr *OutExpr = S.getOutputExpr(i);
2832 CGM.getDiags().Report(OutExpr->getExprLoc(),
2833 diag::err_store_value_to_reg);
2834 return;
2835 }
2836 Dest = CGF.MakeAddrLValue(A, Ty);
2837 }
2838 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2839 }
2840}
2841
2843 const AsmStmt &S) {
2844 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2845
2846 std::string Asm;
2847 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2848 Asm = GCCAsm->getAsmString();
2849
2850 auto &Ctx = CGF->CGM.getLLVMContext();
2851
2852 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2853 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2854 {StrTy->getType()}, false);
2855 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2856
2857 CGF->Builder.CreateCall(UBF, {StrTy});
2858}
2859
2861 // Pop all cleanup blocks at the end of the asm statement.
2862 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2863
2864 // Assemble the final asm string.
2865 std::string AsmString = S.generateAsmString(getContext());
2866
2867 // Get all the output and input constraints together.
2868 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2869 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2870
2871 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2872 bool IsValidTargetAsm = true;
2873 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2874 StringRef Name;
2875 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2876 Name = GAS->getOutputName(i);
2878 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2879 if (IsHipStdPar && !IsValid)
2880 IsValidTargetAsm = false;
2881 else
2882 assert(IsValid && "Failed to parse output constraint");
2883 OutputConstraintInfos.push_back(Info);
2884 }
2885
2886 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2887 StringRef Name;
2888 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2889 Name = GAS->getInputName(i);
2891 bool IsValid =
2892 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2893 if (IsHipStdPar && !IsValid)
2894 IsValidTargetAsm = false;
2895 else
2896 assert(IsValid && "Failed to parse input constraint");
2897 InputConstraintInfos.push_back(Info);
2898 }
2899
2900 if (!IsValidTargetAsm)
2901 return EmitHipStdParUnsupportedAsm(this, S);
2902
2903 std::string Constraints;
2904
2905 std::vector<LValue> ResultRegDests;
2906 std::vector<QualType> ResultRegQualTys;
2907 std::vector<llvm::Type *> ResultRegTypes;
2908 std::vector<llvm::Type *> ResultTruncRegTypes;
2909 std::vector<llvm::Type *> ArgTypes;
2910 std::vector<llvm::Type *> ArgElemTypes;
2911 std::vector<llvm::Value*> Args;
2912 llvm::BitVector ResultTypeRequiresCast;
2913 std::vector<std::optional<std::pair<unsigned, unsigned>>> ResultBounds;
2914
2915 // Keep track of inout constraints.
2916 std::string InOutConstraints;
2917 std::vector<llvm::Value*> InOutArgs;
2918 std::vector<llvm::Type*> InOutArgTypes;
2919 std::vector<llvm::Type*> InOutArgElemTypes;
2920
2921 // Keep track of out constraints for tied input operand.
2922 std::vector<std::string> OutputConstraints;
2923
2924 // Keep track of defined physregs.
2925 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2926
2927 // An inline asm can be marked readonly if it meets the following conditions:
2928 // - it doesn't have any sideeffects
2929 // - it doesn't clobber memory
2930 // - it doesn't return a value by-reference
2931 // It can be marked readnone if it doesn't have any input memory constraints
2932 // in addition to meeting the conditions listed above.
2933 bool ReadOnly = true, ReadNone = true;
2934
2935 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2936 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2937
2938 // Simplify the output constraint.
2939 std::string OutputConstraint(S.getOutputConstraint(i));
2940 OutputConstraint = getTarget().simplifyConstraint(
2941 StringRef(OutputConstraint).substr(1), &OutputConstraintInfos);
2942
2943 const Expr *OutExpr = S.getOutputExpr(i);
2944 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2945
2946 std::string GCCReg;
2947 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2948 getTarget(), CGM, S,
2949 Info.earlyClobber(),
2950 &GCCReg);
2951 // Give an error on multiple outputs to same physreg.
2952 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2953 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2954
2955 OutputConstraints.push_back(OutputConstraint);
2956 LValue Dest = EmitLValue(OutExpr);
2957 if (!Constraints.empty())
2958 Constraints += ',';
2959
2960 // If this is a register output, then make the inline asm return it
2961 // by-value. If this is a memory result, return the value by-reference.
2962 QualType QTy = OutExpr->getType();
2963 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2965 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2966
2967 Constraints += "=" + OutputConstraint;
2968 ResultRegQualTys.push_back(QTy);
2969 ResultRegDests.push_back(Dest);
2970
2971 ResultBounds.emplace_back(Info.getOutputOperandBounds());
2972
2973 llvm::Type *Ty = ConvertTypeForMem(QTy);
2974 const bool RequiresCast = Info.allowsRegister() &&
2976 Ty->isAggregateType());
2977
2978 ResultTruncRegTypes.push_back(Ty);
2979 ResultTypeRequiresCast.push_back(RequiresCast);
2980
2981 if (RequiresCast) {
2982 unsigned Size = getContext().getTypeSize(QTy);
2983 if (Size)
2984 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2985 else
2986 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2987 }
2988 ResultRegTypes.push_back(Ty);
2989 // If this output is tied to an input, and if the input is larger, then
2990 // we need to set the actual result type of the inline asm node to be the
2991 // same as the input type.
2992 if (Info.hasMatchingInput()) {
2993 unsigned InputNo;
2994 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2995 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2996 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2997 break;
2998 }
2999 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
3000
3001 QualType InputTy = S.getInputExpr(InputNo)->getType();
3002 QualType OutputType = OutExpr->getType();
3003
3004 uint64_t InputSize = getContext().getTypeSize(InputTy);
3005 if (getContext().getTypeSize(OutputType) < InputSize) {
3006 // Form the asm to return the value as a larger integer or fp type.
3007 ResultRegTypes.back() = ConvertType(InputTy);
3008 }
3009 }
3010 if (llvm::Type* AdjTy =
3011 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
3012 ResultRegTypes.back()))
3013 ResultRegTypes.back() = AdjTy;
3014 else {
3015 CGM.getDiags().Report(S.getAsmLoc(),
3016 diag::err_asm_invalid_type_in_input)
3017 << OutExpr->getType() << OutputConstraint;
3018 }
3019
3020 // Update largest vector width for any vector types.
3021 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
3022 LargestVectorWidth =
3023 std::max((uint64_t)LargestVectorWidth,
3024 VT->getPrimitiveSizeInBits().getKnownMinValue());
3025 } else {
3026 Address DestAddr = Dest.getAddress();
3027 // Matrix types in memory are represented by arrays, but accessed through
3028 // vector pointers, with the alignment specified on the access operation.
3029 // For inline assembly, update pointer arguments to use vector pointers.
3030 // Otherwise there will be a mis-match if the matrix is also an
3031 // input-argument which is represented as vector.
3032 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
3033 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
3034
3035 ArgTypes.push_back(DestAddr.getType());
3036 ArgElemTypes.push_back(DestAddr.getElementType());
3037 Args.push_back(DestAddr.emitRawPointer(*this));
3038 Constraints += "=*";
3039 Constraints += OutputConstraint;
3040 ReadOnly = ReadNone = false;
3041 }
3042
3043 if (Info.isReadWrite()) {
3044 InOutConstraints += ',';
3045
3046 const Expr *InputExpr = S.getOutputExpr(i);
3047 llvm::Value *Arg;
3048 llvm::Type *ArgElemType;
3049 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
3050 Info, Dest, InputExpr->getType(), InOutConstraints,
3051 InputExpr->getExprLoc());
3052
3053 if (llvm::Type* AdjTy =
3054 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
3055 Arg->getType()))
3056 Arg = Builder.CreateBitCast(Arg, AdjTy);
3057
3058 // Update largest vector width for any vector types.
3059 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3060 LargestVectorWidth =
3061 std::max((uint64_t)LargestVectorWidth,
3062 VT->getPrimitiveSizeInBits().getKnownMinValue());
3063 // Only tie earlyclobber physregs.
3064 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
3065 InOutConstraints += llvm::utostr(i);
3066 else
3067 InOutConstraints += OutputConstraint;
3068
3069 InOutArgTypes.push_back(Arg->getType());
3070 InOutArgElemTypes.push_back(ArgElemType);
3071 InOutArgs.push_back(Arg);
3072 }
3073 }
3074
3075 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
3076 // to the return value slot. Only do this when returning in registers.
3077 if (isa<MSAsmStmt>(&S)) {
3078 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
3079 if (RetAI.isDirect() || RetAI.isExtend()) {
3080 // Make a fake lvalue for the return value slot.
3082 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
3083 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3084 ResultRegDests, AsmString, S.getNumOutputs());
3085 SawAsmBlock = true;
3086 }
3087 }
3088
3089 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3090 const Expr *InputExpr = S.getInputExpr(i);
3091
3092 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3093
3094 if (Info.allowsMemory())
3095 ReadNone = false;
3096
3097 if (!Constraints.empty())
3098 Constraints += ',';
3099
3100 // Simplify the input constraint.
3101 std::string InputConstraint(S.getInputConstraint(i));
3102 InputConstraint =
3103 getTarget().simplifyConstraint(InputConstraint, &OutputConstraintInfos);
3104
3105 InputConstraint = AddVariableConstraints(
3106 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
3107 getTarget(), CGM, S, false /* No EarlyClobber */);
3108
3109 std::string ReplaceConstraint (InputConstraint);
3110 llvm::Value *Arg;
3111 llvm::Type *ArgElemType;
3112 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
3113
3114 // If this input argument is tied to a larger output result, extend the
3115 // input to be the same size as the output. The LLVM backend wants to see
3116 // the input and output of a matching constraint be the same size. Note
3117 // that GCC does not define what the top bits are here. We use zext because
3118 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3119 if (Info.hasTiedOperand()) {
3120 unsigned Output = Info.getTiedOperand();
3121 QualType OutputType = S.getOutputExpr(Output)->getType();
3122 QualType InputTy = InputExpr->getType();
3123
3124 if (getContext().getTypeSize(OutputType) >
3125 getContext().getTypeSize(InputTy)) {
3126 // Use ptrtoint as appropriate so that we can do our extension.
3127 if (isa<llvm::PointerType>(Arg->getType()))
3128 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
3129 llvm::Type *OutputTy = ConvertType(OutputType);
3130 if (isa<llvm::IntegerType>(OutputTy))
3131 Arg = Builder.CreateZExt(Arg, OutputTy);
3132 else if (isa<llvm::PointerType>(OutputTy))
3133 Arg = Builder.CreateZExt(Arg, IntPtrTy);
3134 else if (OutputTy->isFloatingPointTy())
3135 Arg = Builder.CreateFPExt(Arg, OutputTy);
3136 }
3137 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3138 ReplaceConstraint = OutputConstraints[Output];
3139 }
3140 if (llvm::Type* AdjTy =
3141 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
3142 Arg->getType()))
3143 Arg = Builder.CreateBitCast(Arg, AdjTy);
3144 else
3145 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
3146 << InputExpr->getType() << InputConstraint;
3147
3148 // Update largest vector width for any vector types.
3149 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3150 LargestVectorWidth =
3151 std::max((uint64_t)LargestVectorWidth,
3152 VT->getPrimitiveSizeInBits().getKnownMinValue());
3153
3154 ArgTypes.push_back(Arg->getType());
3155 ArgElemTypes.push_back(ArgElemType);
3156 Args.push_back(Arg);
3157 Constraints += InputConstraint;
3158 }
3159
3160 // Append the "input" part of inout constraints.
3161 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3162 ArgTypes.push_back(InOutArgTypes[i]);
3163 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3164 Args.push_back(InOutArgs[i]);
3165 }
3166 Constraints += InOutConstraints;
3167
3168 // Labels
3170 llvm::BasicBlock *Fallthrough = nullptr;
3171 bool IsGCCAsmGoto = false;
3172 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3173 IsGCCAsmGoto = GS->isAsmGoto();
3174 if (IsGCCAsmGoto) {
3175 for (const auto *E : GS->labels()) {
3176 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3177 Transfer.push_back(Dest.getBlock());
3178 if (!Constraints.empty())
3179 Constraints += ',';
3180 Constraints += "!i";
3181 }
3182 Fallthrough = createBasicBlock("asm.fallthrough");
3183 }
3184 }
3185
3186 bool HasUnwindClobber = false;
3187
3188 // Clobbers
3189 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3190 std::string Clobber = S.getClobber(i);
3191
3192 if (Clobber == "memory")
3193 ReadOnly = ReadNone = false;
3194 else if (Clobber == "unwind") {
3195 HasUnwindClobber = true;
3196 continue;
3197 } else if (Clobber != "cc") {
3198 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3199 if (CGM.getCodeGenOpts().StackClashProtector &&
3200 getTarget().isSPRegName(Clobber)) {
3201 CGM.getDiags().Report(S.getAsmLoc(),
3202 diag::warn_stack_clash_protection_inline_asm);
3203 }
3204 }
3205
3206 if (isa<MSAsmStmt>(&S)) {
3207 if (Clobber == "eax" || Clobber == "edx") {
3208 if (Constraints.find("=&A") != std::string::npos)
3209 continue;
3210 std::string::size_type position1 =
3211 Constraints.find("={" + Clobber + "}");
3212 if (position1 != std::string::npos) {
3213 Constraints.insert(position1 + 1, "&");
3214 continue;
3215 }
3216 std::string::size_type position2 = Constraints.find("=A");
3217 if (position2 != std::string::npos) {
3218 Constraints.insert(position2 + 1, "&");
3219 continue;
3220 }
3221 }
3222 }
3223 if (!Constraints.empty())
3224 Constraints += ',';
3225
3226 Constraints += "~{";
3227 Constraints += Clobber;
3228 Constraints += '}';
3229 }
3230
3231 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3232 "unwind clobber can't be used with asm goto");
3233
3234 // Add machine specific clobbers
3235 std::string_view MachineClobbers = getTarget().getClobbers();
3236 if (!MachineClobbers.empty()) {
3237 if (!Constraints.empty())
3238 Constraints += ',';
3239 Constraints += MachineClobbers;
3240 }
3241
3242 llvm::Type *ResultType;
3243 if (ResultRegTypes.empty())
3244 ResultType = VoidTy;
3245 else if (ResultRegTypes.size() == 1)
3246 ResultType = ResultRegTypes[0];
3247 else
3248 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3249
3250 llvm::FunctionType *FTy =
3251 llvm::FunctionType::get(ResultType, ArgTypes, false);
3252
3253 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3254
3255 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3256 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3257 ? llvm::InlineAsm::AD_ATT
3258 : llvm::InlineAsm::AD_Intel;
3259 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3260 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3261
3262 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3263 FTy, AsmString, Constraints, HasSideEffect,
3264 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3265 std::vector<llvm::Value*> RegResults;
3266 llvm::CallBrInst *CBR;
3267 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3268 CBRRegResults;
3269 if (IsGCCAsmGoto) {
3270 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3271 EmitBlock(Fallthrough);
3272 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3273 ReadNone, InNoMergeAttributedStmt,
3274 InNoConvergentAttributedStmt, S, ResultRegTypes,
3275 ArgElemTypes, *this, RegResults);
3276 // Because we are emitting code top to bottom, we don't have enough
3277 // information at this point to know precisely whether we have a critical
3278 // edge. If we have outputs, split all indirect destinations.
3279 if (!RegResults.empty()) {
3280 unsigned i = 0;
3281 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3282 llvm::Twine SynthName = Dest->getName() + ".split";
3283 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3284 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3285 Builder.SetInsertPoint(SynthBB);
3286
3287 if (ResultRegTypes.size() == 1) {
3288 CBRRegResults[SynthBB].push_back(CBR);
3289 } else {
3290 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3291 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3292 CBRRegResults[SynthBB].push_back(Tmp);
3293 }
3294 }
3295
3296 EmitBranch(Dest);
3297 EmitBlock(SynthBB);
3298 CBR->setIndirectDest(i++, SynthBB);
3299 }
3300 }
3301 } else if (HasUnwindClobber) {
3302 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3303 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3304 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3305 InNoConvergentAttributedStmt, S, ResultRegTypes,
3306 ArgElemTypes, *this, RegResults);
3307 } else {
3308 llvm::CallInst *Result =
3309 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3310 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3311 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3312 InNoConvergentAttributedStmt, S, ResultRegTypes,
3313 ArgElemTypes, *this, RegResults);
3314 }
3315
3316 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3317 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3318 ResultBounds);
3319
3320 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3321 // different insertion point; one for each indirect destination and with
3322 // CBRRegResults rather than RegResults.
3323 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3324 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3325 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3326 Builder.SetInsertPoint(Succ, --(Succ->end()));
3327 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3328 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3329 ResultTypeRequiresCast, ResultBounds);
3330 }
3331 }
3332}
3333
3335 const RecordDecl *RD = S.getCapturedRecordDecl();
3337
3338 // Initialize the captured struct.
3339 LValue SlotLV =
3340 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3341
3342 RecordDecl::field_iterator CurField = RD->field_begin();
3344 E = S.capture_init_end();
3345 I != E; ++I, ++CurField) {
3346 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3347 if (CurField->hasCapturedVLAType()) {
3348 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3349 } else {
3350 EmitInitializerForField(*CurField, LV, *I);
3351 }
3352 }
3353
3354 return SlotLV;
3355}
3356
3357/// Generate an outlined function for the body of a CapturedStmt, store any
3358/// captured variables into the captured struct, and call the outlined function.
3359llvm::Function *
3361 LValue CapStruct = InitCapturedStruct(S);
3362
3363 // Emit the CapturedDecl
3364 CodeGenFunction CGF(CGM, true);
3365 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3366 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3367 delete CGF.CapturedStmtInfo;
3368
3369 // Emit call to the helper function.
3370 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3371
3372 return F;
3373}
3374
3376 LValue CapStruct = InitCapturedStruct(S);
3377 return CapStruct.getAddress();
3378}
3379
3380/// Creates the outlined function for a CapturedStmt.
3381llvm::Function *
3383 assert(CapturedStmtInfo &&
3384 "CapturedStmtInfo should be set when generating the captured function");
3385 const CapturedDecl *CD = S.getCapturedDecl();
3386 const RecordDecl *RD = S.getCapturedRecordDecl();
3387 SourceLocation Loc = S.getBeginLoc();
3388 assert(CD->hasBody() && "missing CapturedDecl body");
3389
3390 // Build the argument list.
3391 ASTContext &Ctx = CGM.getContext();
3392 FunctionArgList Args;
3393 Args.append(CD->param_begin(), CD->param_end());
3394
3395 // Create the function declaration.
3396 const CGFunctionInfo &FuncInfo =
3397 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
3398 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3399
3400 llvm::Function *F =
3401 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3402 CapturedStmtInfo->getHelperName(), &CGM.getModule());
3403 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3404 if (CD->isNothrow())
3405 F->addFnAttr(llvm::Attribute::NoUnwind);
3406
3407 // Generate the function.
3408 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3409 CD->getBody()->getBeginLoc());
3410 // Set the context parameter in CapturedStmtInfo.
3411 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3412 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
3413
3414 // Initialize variable-length arrays.
3416 CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
3417 for (auto *FD : RD->fields()) {
3418 if (FD->hasCapturedVLAType()) {
3419 auto *ExprArg =
3421 .getScalarVal();
3422 auto VAT = FD->getCapturedVLAType();
3423 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3424 }
3425 }
3426
3427 // If 'this' is captured, load it into CXXThisValue.
3428 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3429 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3430 LValue ThisLValue = EmitLValueForField(Base, FD);
3431 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3432 }
3433
3434 PGO->assignRegionCounters(GlobalDecl(CD), F);
3435 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3437
3438 return F;
3439}
3440
3441// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3442// std::nullptr otherwise.
3443static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3444 for (auto &I : *BB) {
3445 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3446 return CI;
3447 }
3448 return nullptr;
3449}
3450
3451llvm::CallBase *
3452CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3453 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3454 assert(ParentToken);
3455
3456 llvm::Value *bundleArgs[] = {ParentToken};
3457 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3458 auto *Output = llvm::CallBase::addOperandBundle(
3459 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3460 Input->replaceAllUsesWith(Output);
3461 Input->eraseFromParent();
3462 return Output;
3463}
3464
3465llvm::ConvergenceControlInst *
3466CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3467 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3468 assert(ParentToken);
3469 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken);
3470}
3471
3472llvm::ConvergenceControlInst *
3473CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3474 llvm::BasicBlock *BB = &F->getEntryBlock();
3475 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3476 if (Token)
3477 return Token;
3478
3479 // Adding a convergence token requires the function to be marked as
3480 // convergent.
3481 F->setConvergent();
3482 return llvm::ConvergenceControlInst::CreateEntry(*BB);
3483}
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition CGStmt.cpp:2563
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition CGStmt.cpp:2267
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition CGStmt.cpp:3443
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition CGStmt.cpp:2842
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition CGStmt.cpp:2321
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition CGStmt.cpp:2657
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition CGStmt.cpp:1591
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition CGStmt.cpp:2112
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const std::vector< std::optional< std::pair< unsigned, unsigned > > > &ResultBounds)
Definition CGStmt.cpp:2752
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition CGStmt.cpp:1066
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition CGStmt.cpp:2111
@ CSFC_Failure
Definition CGStmt.cpp:2111
@ CSFC_Success
Definition CGStmt.cpp:2111
@ CSFC_FallThrough
Definition CGStmt.cpp:2111
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition CGStmt.cpp:2684
llvm::MachO::Target Target
Definition MachO.h:51
#define SM(sm)
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition APValue.cpp:963
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:852
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CanQualType VoidTy
CanQualType getCanonicalTagType(const TagDecl *TD) const
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3267
std::string getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition Stmt.cpp:477
bool isVolatile() const
Definition Stmt.h:3303
std::string getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition Stmt.cpp:461
SourceLocation getAsmLoc() const
Definition Stmt.h:3297
const Expr * getInputExpr(unsigned i) const
Definition Stmt.cpp:485
unsigned getNumClobbers() const
Definition Stmt.h:3348
const Expr * getOutputExpr(unsigned i) const
Definition Stmt.cpp:469
unsigned getNumOutputs() const
Definition Stmt.h:3316
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition Stmt.cpp:453
unsigned getNumInputs() const
Definition Stmt.h:3338
std::string getClobber(unsigned i) const
Definition Stmt.cpp:493
Attr - This represents one attribute.
Definition Attr.h:45
Represents an attribute applied to a statement.
Definition Stmt.h:2193
Stmt * getSubStmt()
Definition Stmt.h:2229
ArrayRef< const Attr * > getAttrs() const
Definition Stmt.h:2225
BreakStmt - This represents a break.
Definition Stmt.h:3125
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
DeclStmt * getBeginStmt()
Definition StmtCXX.h:163
DeclStmt * getLoopVarStmt()
Definition StmtCXX.h:169
DeclStmt * getEndStmt()
Definition StmtCXX.h:166
DeclStmt * getRangeStmt()
Definition StmtCXX.h:162
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getCallee()
Definition Expr.h:3024
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition Decl.h:4940
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition Decl.h:4998
bool isNothrow() const
Definition Decl.cpp:5635
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition Decl.h:5015
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition Decl.h:5013
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition Decl.cpp:5632
This captures a statement into a function.
Definition Stmt.h:3917
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition Stmt.cpp:1455
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition Stmt.h:4038
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition Stmt.h:4094
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:4112
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition Stmt.h:4104
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition Stmt.h:4081
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition Stmt.cpp:1470
CaseStmt - Represent a case statement.
Definition Stmt.h:1910
Stmt * getSubStmt()
Definition Stmt.h:2023
Expr * getLHS()
Definition Stmt.h:1993
Expr * getRHS()
Definition Stmt.h:2005
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:505
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:588
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
API for captured statement code generation.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition CGStmt.cpp:758
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1452
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition CGStmt.cpp:3334
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition CGCall.cpp:5102
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:710
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition CGObjC.cpp:2129
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3767
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition CGStmt.cpp:509
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:686
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition CGStmt.cpp:693
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition CGStmt.cpp:634
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
void EmitOMPScopeDirective(const OMPScopeDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5383
const TargetInfo & getTarget() const
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:585
void EmitGotoStmt(const GotoStmt &S)
Definition CGStmt.cpp:846
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:244
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2399
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1294
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition CGObjC.cpp:3688
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:225
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1079
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition CGStmt.cpp:1016
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5557
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition CGObjC.cpp:2125
const TargetCodeGenInfo & getTargetHooks() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition CGCall.cpp:5030
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition CGStmt.cpp:51
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition CGStmt.cpp:882
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitDeferStmt(const DeferStmt &S)
Definition CGStmt.cpp:2084
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2596
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition CGObjC.cpp:2133
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:573
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPFuseDirective(const OMPFuseDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition CGClass.cpp:682
void EmitAsmStmt(const AsmStmt &S)
Definition CGStmt.cpp:2860
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1985
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition CGStmt.cpp:2376
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:295
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:266
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:61
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition CGStmt.cpp:3360
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1870
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition CGStmt.cpp:1756
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1206
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition CGStmt.cpp:3375
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:676
void EmitOMPSimdDirective(const OMPSimdDirective &S)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:188
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
RawAddress NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
const BreakContinue * GetDestForLoopControlStmt(const LoopControlStmt &S)
Definition CGStmt.cpp:1742
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPStripeDirective(const OMPStripeDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition CGStmt.cpp:1785
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition CGStmt.cpp:1617
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition CGStmt.cpp:3382
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition CGStmt.cpp:1732
void EmitLabelStmt(const LabelStmt &S)
Definition CGStmt.cpp:779
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition CGDecl.cpp:52
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1690
void EmitAttributedStmt(const AttributedStmt &S)
Definition CGStmt.cpp:789
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition CGObjC.cpp:1804
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition CGStmt.cpp:858
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2074
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition CGStmt.cpp:721
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition CGStmt.cpp:1769
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
A saved depth on the scope stack.
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:362
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition TargetInfo.h:204
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1730
Stmt *const * const_body_iterator
Definition Stmt.h:1802
body_iterator body_end()
Definition Stmt.h:1795
SourceLocation getLBracLoc() const
Definition Stmt.h:1847
body_iterator body_begin()
Definition Stmt.h:1794
Stmt * body_back()
Definition Stmt.h:1798
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1082
ContinueStmt - This represents a continue.
Definition Stmt.h:3109
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
ValueDecl * getDecl()
Definition Expr.h:1338
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1621
decl_range decls()
Definition Stmt.h:1669
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition DeclBase.h:1093
SourceLocation getLocation() const
Definition DeclBase.h:439
Stmt * getSubStmt()
Definition Stmt.h:2071
DeferStmt - This represents a deferred statement.
Definition Stmt.h:3226
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2822
Stmt * getBody()
Definition Stmt.h:2847
Expr * getCond()
Definition Stmt.h:2840
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3116
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2878
Stmt * getInit()
Definition Stmt.h:2893
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition Stmt.cpp:1082
Stmt * getBody()
Definition Stmt.h:2922
Expr * getInc()
Definition Stmt.h:2921
Expr * getCond()
Definition Stmt.h:2920
const Expr * getSubExpr() const
Definition Expr.h:1062
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4450
CallingConv getCallConv() const
Definition TypeBase.h:4805
This represents a GCC inline-assembly statement extension.
Definition Stmt.h:3426
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2959
LabelDecl * getLabel() const
Definition Stmt.h:2972
IfStmt - This represents an if/then/else.
Definition Stmt.h:2249
Stmt * getThen()
Definition Stmt.h:2338
Stmt * getInit()
Definition Stmt.h:2399
Expr * getCond()
Definition Stmt.h:2326
bool isConstexpr() const
Definition Stmt.h:2442
bool isNegatedConsteval() const
Definition Stmt.h:2438
Stmt * getElse()
Definition Stmt.h:2347
bool isConsteval() const
Definition Stmt.h:2429
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition Stmt.cpp:1030
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:2998
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition Stmt.cpp:1231
Represents the declaration of a label.
Definition Decl.h:524
LabelStmt * getStmt() const
Definition Decl.h:548
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2136
LabelDecl * getDecl() const
Definition Stmt.h:2154
bool isSideEntry() const
Definition Stmt.h:2183
Stmt * getSubStmt()
Definition Stmt.h:2158
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
bool assumeFunctionsAreConvergent() const
Base class for BreakStmt and ContinueStmt.
Definition Stmt.h:3047
Represents a point when we exit a loop.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
QualType getCanonicalType() const
Definition TypeBase.h:8330
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Represents a struct/union/class.
Definition Decl.h:4321
field_range fields() const
Definition Decl.h:4524
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4521
field_iterator field_begin() const
Definition Decl.cpp:5209
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3150
SourceLocation getBeginLoc() const
Definition Stmt.h:3202
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization.
Definition Stmt.h:3186
Expr * getRetValue()
Definition Stmt.h:3177
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
@ NoStmtClass
Definition Stmt.h:88
StmtClass getStmtClass() const
Definition Stmt.h:1483
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1426
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1427
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1428
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1430
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition Stmt.cpp:171
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition Stmt.cpp:163
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:1973
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition Expr.cpp:1325
StringRef getString() const
Definition Expr.h:1867
const SwitchCase * getNextSwitchCase() const
Definition Stmt.h:1883
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2499
Expr * getCond()
Definition Stmt.h:2562
Stmt * getBody()
Definition Stmt.h:2574
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition Stmt.cpp:1148
Stmt * getInit()
Definition Stmt.h:2579
SwitchCase * getSwitchCaseList()
Definition Stmt.h:2630
Exposes information about the current target.
Definition TargetInfo.h:226
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
std::string simplifyConstraint(StringRef Constraint, SmallVectorImpl< ConstraintInfo > *OutCons=nullptr) const
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
bool validateOutputConstraint(ConstraintInfo &Info) const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition TypeBase.h:8871
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
Represents a variable declaration or definition.
Definition Decl.h:926
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO).
Definition Decl.h:1512
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2687
Expr * getCond()
Definition Stmt.h:2739
SourceLocation getWhileLoc() const
Definition Stmt.h:2792
SourceLocation getRParenLoc() const
Definition Stmt.h:2797
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition Stmt.cpp:1209
Stmt * getBody()
Definition Stmt.h:2751
Defines the clang::TargetInfo interface.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ CPlusPlus11
CapturedRegionKind
The different kinds of captured statement.
@ SC_Register
Definition Specifiers.h:257
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ CC_SwiftAsync
Definition Specifiers.h:294
U cast(CodeGen::Address addr)
Definition Address.h:327
@ None
The alignment was not explicit in code.
Definition ASTContext.h:178
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
std::optional< std::pair< unsigned, unsigned > > getOutputOperandBounds() const
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.