27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
40using namespace CodeGen;
50void CodeGenFunction::EmitStopPoint(
const Stmt *S) {
53 Loc = S->getBeginLoc();
61 assert(S &&
"Null statement?");
78 assert(!isa<DeclStmt>(*S) &&
"Unexpected DeclStmt!");
92 if (
const auto *
D = dyn_cast<OMPExecutableDirective>(S)) {
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable(
"invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable(
"should have emitted these statements as simple");
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
125 llvm::BasicBlock *incoming =
Builder.GetInsertBlock();
126 assert(incoming &&
"expression emission must have an insertion point");
130 llvm::BasicBlock *outgoing =
Builder.GetInsertBlock();
131 assert(outgoing &&
"expression emission cleared block!");
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
152 case Stmt::IndirectGotoStmtClass:
155 case Stmt::IfStmtClass:
EmitIfStmt(cast<IfStmt>(*S));
break;
156 case Stmt::WhileStmtClass:
EmitWhileStmt(cast<WhileStmt>(*S), Attrs);
break;
157 case Stmt::DoStmtClass:
EmitDoStmt(cast<DoStmt>(*S), Attrs);
break;
158 case Stmt::ForStmtClass:
EmitForStmt(cast<ForStmt>(*S), Attrs);
break;
160 case Stmt::ReturnStmtClass:
EmitReturnStmt(cast<ReturnStmt>(*S));
break;
162 case Stmt::SwitchStmtClass:
EmitSwitchStmt(cast<SwitchStmt>(*S));
break;
163 case Stmt::GCCAsmStmtClass:
164 case Stmt::MSAsmStmtClass:
EmitAsmStmt(cast<AsmStmt>(*S));
break;
165 case Stmt::CoroutineBodyStmtClass:
168 case Stmt::CoreturnStmtClass:
171 case Stmt::CapturedStmtClass: {
176 case Stmt::ObjCAtTryStmtClass:
179 case Stmt::ObjCAtCatchStmtClass:
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
188 case Stmt::ObjCAtSynchronizedStmtClass:
191 case Stmt::ObjCForCollectionStmtClass:
194 case Stmt::ObjCAutoreleasePoolStmtClass:
198 case Stmt::CXXTryStmtClass:
201 case Stmt::CXXForRangeStmtClass:
204 case Stmt::SEHTryStmtClass:
207 case Stmt::OMPMetaDirectiveClass:
210 case Stmt::OMPCanonicalLoopClass:
213 case Stmt::OMPParallelDirectiveClass:
216 case Stmt::OMPSimdDirectiveClass:
219 case Stmt::OMPTileDirectiveClass:
222 case Stmt::OMPUnrollDirectiveClass:
225 case Stmt::OMPReverseDirectiveClass:
228 case Stmt::OMPInterchangeDirectiveClass:
231 case Stmt::OMPForDirectiveClass:
234 case Stmt::OMPForSimdDirectiveClass:
237 case Stmt::OMPSectionsDirectiveClass:
240 case Stmt::OMPSectionDirectiveClass:
243 case Stmt::OMPSingleDirectiveClass:
246 case Stmt::OMPMasterDirectiveClass:
249 case Stmt::OMPCriticalDirectiveClass:
252 case Stmt::OMPParallelForDirectiveClass:
255 case Stmt::OMPParallelForSimdDirectiveClass:
258 case Stmt::OMPParallelMasterDirectiveClass:
261 case Stmt::OMPParallelSectionsDirectiveClass:
264 case Stmt::OMPTaskDirectiveClass:
267 case Stmt::OMPTaskyieldDirectiveClass:
270 case Stmt::OMPErrorDirectiveClass:
273 case Stmt::OMPBarrierDirectiveClass:
276 case Stmt::OMPTaskwaitDirectiveClass:
279 case Stmt::OMPTaskgroupDirectiveClass:
282 case Stmt::OMPFlushDirectiveClass:
285 case Stmt::OMPDepobjDirectiveClass:
288 case Stmt::OMPScanDirectiveClass:
291 case Stmt::OMPOrderedDirectiveClass:
294 case Stmt::OMPAtomicDirectiveClass:
297 case Stmt::OMPTargetDirectiveClass:
300 case Stmt::OMPTeamsDirectiveClass:
303 case Stmt::OMPCancellationPointDirectiveClass:
306 case Stmt::OMPCancelDirectiveClass:
309 case Stmt::OMPTargetDataDirectiveClass:
312 case Stmt::OMPTargetEnterDataDirectiveClass:
315 case Stmt::OMPTargetExitDataDirectiveClass:
318 case Stmt::OMPTargetParallelDirectiveClass:
321 case Stmt::OMPTargetParallelForDirectiveClass:
324 case Stmt::OMPTaskLoopDirectiveClass:
327 case Stmt::OMPTaskLoopSimdDirectiveClass:
330 case Stmt::OMPMasterTaskLoopDirectiveClass:
333 case Stmt::OMPMaskedTaskLoopDirectiveClass:
334 llvm_unreachable(
"masked taskloop directive not supported yet.");
336 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
338 cast<OMPMasterTaskLoopSimdDirective>(*S));
340 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
341 llvm_unreachable(
"masked taskloop simd directive not supported yet.");
343 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
345 cast<OMPParallelMasterTaskLoopDirective>(*S));
347 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
348 llvm_unreachable(
"parallel masked taskloop directive not supported yet.");
350 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
352 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
354 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
356 "parallel masked taskloop simd directive not supported yet.");
358 case Stmt::OMPDistributeDirectiveClass:
361 case Stmt::OMPTargetUpdateDirectiveClass:
364 case Stmt::OMPDistributeParallelForDirectiveClass:
366 cast<OMPDistributeParallelForDirective>(*S));
368 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
370 cast<OMPDistributeParallelForSimdDirective>(*S));
372 case Stmt::OMPDistributeSimdDirectiveClass:
375 case Stmt::OMPTargetParallelForSimdDirectiveClass:
377 cast<OMPTargetParallelForSimdDirective>(*S));
379 case Stmt::OMPTargetSimdDirectiveClass:
382 case Stmt::OMPTeamsDistributeDirectiveClass:
385 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
387 cast<OMPTeamsDistributeSimdDirective>(*S));
389 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
391 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
393 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
395 cast<OMPTeamsDistributeParallelForDirective>(*S));
397 case Stmt::OMPTargetTeamsDirectiveClass:
400 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
402 cast<OMPTargetTeamsDistributeDirective>(*S));
404 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
406 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
408 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
410 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
412 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
414 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
416 case Stmt::OMPInteropDirectiveClass:
419 case Stmt::OMPDispatchDirectiveClass:
422 case Stmt::OMPScopeDirectiveClass:
425 case Stmt::OMPMaskedDirectiveClass:
428 case Stmt::OMPGenericLoopDirectiveClass:
431 case Stmt::OMPTeamsGenericLoopDirectiveClass:
434 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
436 cast<OMPTargetTeamsGenericLoopDirective>(*S));
438 case Stmt::OMPParallelGenericLoopDirectiveClass:
440 cast<OMPParallelGenericLoopDirective>(*S));
442 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
444 cast<OMPTargetParallelGenericLoopDirective>(*S));
446 case Stmt::OMPParallelMaskedDirectiveClass:
449 case Stmt::OpenACCComputeConstructClass:
452 case Stmt::OpenACCLoopConstructClass:
460 switch (S->getStmtClass()) {
463 case Stmt::NullStmtClass:
465 case Stmt::CompoundStmtClass:
468 case Stmt::DeclStmtClass:
471 case Stmt::LabelStmtClass:
474 case Stmt::AttributedStmtClass:
477 case Stmt::GotoStmtClass:
480 case Stmt::BreakStmtClass:
483 case Stmt::ContinueStmtClass:
486 case Stmt::DefaultStmtClass:
489 case Stmt::CaseStmtClass:
492 case Stmt::SEHLeaveStmtClass:
505 "LLVM IR generation of compound statement ('{}')");
508 LexicalScope
Scope(*
this, S.getSourceRange());
519 assert((!GetLast || (GetLast &&
ExprResult)) &&
520 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
524 for (
auto *CurStmt : S.body()) {
532 if (
const auto *LS = dyn_cast<LabelStmt>(
ExprResult)) {
535 }
else if (
const auto *AS = dyn_cast<AttributedStmt>(
ExprResult)) {
540 llvm_unreachable(
"unknown value statement");
567 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
576 if (!BI || !BI->isUnconditional())
580 if (BI->getIterator() != BB->begin())
583 BB->replaceAllUsesWith(BI->getSuccessor(0));
584 BI->eraseFromParent();
585 BB->eraseFromParent();
589 llvm::BasicBlock *CurBB =
Builder.GetInsertBlock();
594 if (IsFinished && BB->use_empty()) {
601 if (CurBB && CurBB->getParent())
602 CurFn->insert(std::next(CurBB->getIterator()), BB);
612 llvm::BasicBlock *CurBB =
Builder.GetInsertBlock();
614 if (!CurBB || CurBB->getTerminator()) {
626 bool inserted =
false;
627 for (llvm::User *u : block->users()) {
628 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
629 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
641CodeGenFunction::JumpDest
643 JumpDest &Dest = LabelMap[
D];
644 if (Dest.isValid())
return Dest;
660 JumpDest &Dest = LabelMap[
D];
664 if (!Dest.isValid()) {
670 assert(!Dest.getScopeDepth().isValid() &&
"already emitted label!");
691 assert(!Labels.empty());
697 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
698 assert(
CGF.LabelMap.count(*i));
707 ParentScope->Labels.append(Labels.begin(), Labels.end());
723 bool nomerge =
false;
724 bool noinline =
false;
725 bool alwaysinline =
false;
728 for (
const auto *A : S.getAttrs()) {
729 switch (A->getKind()) {
738 case attr::AlwaysInline:
741 case attr::MustTail: {
742 const Stmt *Sub = S.getSubStmt();
746 case attr::CXXAssume: {
747 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
751 Builder.CreateAssumption(AssumptionVal);
760 EmitStmt(S.getSubStmt(), S.getAttrs());
783 llvm::BasicBlock *CurBB =
Builder.GetInsertBlock();
790 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(
V, CurBB);
798 if (S.isConsteval()) {
799 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
801 RunCleanupsScope ExecutedScope(*
this);
809 LexicalScope ConditionScope(*
this, S.getCond()->getSourceRange());
814 if (S.getConditionVariable())
815 EmitDecl(*S.getConditionVariable());
823 const Stmt *Executed = S.getThen();
824 const Stmt *Skipped = S.getElse();
826 std::swap(Executed, Skipped);
834 RunCleanupsScope ExecutedScope(*
this);
845 llvm::BasicBlock *ElseBlock = ContBlock;
876 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
886 RunCleanupsScope ThenScope(*
this);
892 if (
const Stmt *Else = S.getElse()) {
902 RunCleanupsScope ElseScope(*
this);
933 bool CondIsConstInt =
934 !ControllingExpression ||
938 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
939 Result.Val.getInt().getBoolValue());
953 if (HasEmptyBody && CondIsTrue) {
954 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
972 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
976 const Stmt *Body = S.getBody();
977 if (!Body || isa<NullStmt>(Body))
979 if (
const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
980 return Compound->body_empty();
1000 BreakContinueStack.push_back(BreakContinue(
LoopExit, LoopHeader));
1009 RunCleanupsScope ConditionScope(*
this);
1011 if (S.getConditionVariable())
1012 EmitDecl(*S.getConditionVariable());
1021 llvm::ConstantInt *
C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1022 bool EmitBoolCondBranch = !
C || !
C->isOne();
1035 if (EmitBoolCondBranch) {
1036 llvm::BasicBlock *ExitBlock =
LoopExit.getBlock();
1037 if (ConditionScope.requiresCleanups())
1039 llvm::MDNode *Weights =
1040 createProfileWeightsForLoop(S.getCond(),
getProfileCount(S.getBody()));
1042 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1044 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1046 if (ExitBlock !=
LoopExit.getBlock()) {
1052 diag::warn_attribute_has_no_effect_on_infinite_loop)
1053 << A << A->getRange();
1056 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1057 <<
SourceRange(S.getWhileLoc(), S.getRParenLoc());
1063 RunCleanupsScope BodyScope(*
this);
1073 BreakContinueStack.pop_back();
1076 ConditionScope.ForceCleanup();
1089 if (!EmitBoolCondBranch)
1109 BreakContinueStack.push_back(BreakContinue(
LoopExit, LoopCond));
1124 RunCleanupsScope BodyScope(*
this);
1141 BreakContinueStack.pop_back();
1145 llvm::ConstantInt *
C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1146 bool EmitBoolCondBranch = !
C || !
C->isZero();
1155 if (EmitBoolCondBranch) {
1158 BoolCondVal, LoopBody,
LoopExit.getBlock(),
1159 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1169 if (!EmitBoolCondBranch)
1185 LexicalScope ForScope(*
this, S.getSourceRange());
1195 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1209 LexicalScope ConditionScope(*
this, S.getSourceRange());
1220 Continue = CondDest;
1221 else if (!S.getConditionVariable())
1223 BreakContinueStack.push_back(BreakContinue(
LoopExit, Continue));
1228 if (S.getConditionVariable()) {
1229 EmitDecl(*S.getConditionVariable());
1234 BreakContinueStack.back().ContinueBlock = Continue;
1242 llvm::BasicBlock *ExitBlock =
LoopExit.getBlock();
1245 if (ForScope.requiresCleanups())
1254 llvm::MDNode *Weights =
1255 createProfileWeightsForLoop(S.getCond(),
getProfileCount(S.getBody()));
1257 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1260 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1262 if (ExitBlock !=
LoopExit.getBlock()) {
1281 RunCleanupsScope BodyScope(*
this);
1293 BreakContinueStack.pop_back();
1295 ConditionScope.ForceCleanup();
1300 ForScope.ForceCleanup();
1321 LexicalScope ForScope(*
this, S.getSourceRange());
1347 llvm::BasicBlock *ExitBlock =
LoopExit.getBlock();
1348 if (ForScope.requiresCleanups())
1357 llvm::MDNode *Weights =
1358 createProfileWeightsForLoop(S.getCond(),
getProfileCount(S.getBody()));
1360 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1362 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1364 if (ExitBlock !=
LoopExit.getBlock()) {
1379 BreakContinueStack.push_back(BreakContinue(
LoopExit, Continue));
1383 LexicalScope BodyScope(*
this, S.getSourceRange());
1393 BreakContinueStack.pop_back();
1397 ForScope.ForceCleanup();
1413void CodeGenFunction::EmitReturnOfRValue(
RValue RV,
QualType Ty) {
1429struct SaveRetExprRAII {
1431 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1434 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1435 const Expr *OldRetExpr;
1444 if (calleeQualType->isFunctionPointerType() ||
1445 calleeQualType->isFunctionReferenceType() ||
1446 calleeQualType->isBlockPointerType() ||
1447 calleeQualType->isMemberFunctionPointerType()) {
1449 }
else if (
auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1451 }
else if (
auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1452 if (
auto methodDecl = CMCE->getMethodDecl()) {
1468 if (requiresReturnValueCheck()) {
1471 new llvm::GlobalVariable(
CGM.
getModule(), SLoc->getType(),
false,
1472 llvm::GlobalVariable::PrivateLinkage, SLoc);
1473 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1475 assert(ReturnLocation.
isValid() &&
"No valid return location");
1482 Builder.ClearInsertionPoint();
1486 const Expr *RV = S.getRetValue();
1496 SaveRetExprRAII SaveRetExpr(RV, *
this);
1498 RunCleanupsScope cleanupScope(*
this);
1499 if (
const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1500 RV = EWC->getSubExpr();
1504 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1507 if (
auto CE = dyn_cast<CallExpr>(RV)) {
1517 if (
getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1518 S.getNRVOCandidate()->isNRVOVariable() &&
1529 if (llvm::Value *NRVOFlag =
NRVOFlags[S.getNRVOCandidate()])
1572 ++NumSimpleReturnExprs;
1574 cleanupScope.ForceCleanup();
1584 for (
const auto *I : S.decls())
1589 assert(!BreakContinueStack.empty() &&
"break stmt not in a loop or switch!");
1601 assert(!BreakContinueStack.empty() &&
"continue stmt not in a loop!");
1617 assert(S.getRHS() &&
"Expected RHS value in CaseStmt");
1619 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(
getContext());
1620 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(
getContext());
1630 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1634 llvm::APInt
Range = RHS - LHS;
1636 if (
Range.ult(llvm::APInt(
Range.getBitWidth(), 64))) {
1639 unsigned NCases =
Range.getZExtValue() + 1;
1644 uint64_t Weight = Total / NCases,
Rem = Total % NCases;
1645 for (
unsigned I = 0; I != NCases; ++I) {
1647 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1648 else if (SwitchLikelihood)
1649 SwitchLikelihood->push_back(LH);
1653 SwitchInsn->addCase(
Builder.getInt(LHS), CaseDest);
1661 llvm::BasicBlock *RestoreBB =
Builder.GetInsertBlock();
1666 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1670 Builder.SetInsertPoint(CaseRangeBlock);
1674 Builder.CreateSub(SwitchInsn->getCondition(),
Builder.getInt(LHS));
1678 llvm::MDNode *Weights =
nullptr;
1679 if (SwitchWeights) {
1681 uint64_t DefaultCount = (*SwitchWeights)[0];
1682 Weights = createProfileWeights(ThisCount, DefaultCount);
1687 (*SwitchWeights)[0] += ThisCount;
1688 }
else if (SwitchLikelihood)
1689 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1691 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1695 Builder.SetInsertPoint(RestoreBB);
1697 Builder.ClearInsertionPoint();
1718 llvm::ConstantInt *CaseVal =
1723 if (
auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1724 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1726 CE = dyn_cast<ConstantExpr>(S.getLHS());
1728 if (
auto DE = dyn_cast<DeclRefExpr>(CE->
getSubExpr()))
1731 Dbg->EmitGlobalVariable(DE->getDecl(),
1732 APValue(llvm::APSInt(CaseVal->getValue())));
1735 if (SwitchLikelihood)
1743 isa<BreakStmt>(S.getSubStmt())) {
1744 JumpDest
Block = BreakContinueStack.back().BreakBlock;
1750 SwitchInsn->addCase(CaseVal,
Block.getBlock());
1754 if (
Builder.GetInsertBlock()) {
1756 Builder.ClearInsertionPoint();
1766 SwitchInsn->addCase(CaseVal, CaseDest);
1782 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1785 while (NextCase && NextCase->
getRHS() ==
nullptr) {
1787 llvm::ConstantInt *CaseVal =
1798 if (SwitchLikelihood)
1801 SwitchInsn->addCase(CaseVal, CaseDest);
1802 NextCase = dyn_cast<CaseStmt>(CurCase->
getSubStmt());
1825 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1826 assert(DefaultBlock->empty() &&
1827 "EmitDefaultStmt: Default block already defined?");
1829 if (SwitchLikelihood)
1871 if (
const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1885 if (!Case && isa<BreakStmt>(S))
1890 if (
const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1894 bool StartedInLiveCode = FoundCase;
1895 unsigned StartSize = ResultStmts.size();
1902 bool HadSkippedDecl =
false;
1906 for (; Case && I !=
E; ++I) {
1922 for (++I; I !=
E; ++I)
1932 assert(FoundCase &&
"Didn't find case but returned fallthrough?");
1947 assert(!HadSkippedDecl &&
"fallthrough after skipping decl");
1952 bool AnyDecls =
false;
1953 for (; I !=
E; ++I) {
1966 for (++I; I !=
E; ++I)
1983 ResultStmts.resize(StartSize);
1984 ResultStmts.push_back(S);
2008 ResultStmts.push_back(S);
2017 const llvm::APSInt &ConstantCondValue,
2023 const SwitchCase *Case = S.getSwitchCaseList();
2029 if (
const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2035 const CaseStmt *CS = cast<CaseStmt>(Case);
2037 if (CS->
getRHS())
return false;
2062 bool FoundCase =
false;
2069static std::optional<SmallVector<uint64_t, 16>>
2072 if (Likelihoods.size() <= 1)
2073 return std::nullopt;
2075 uint64_t NumUnlikely = 0;
2076 uint64_t NumNone = 0;
2077 uint64_t NumLikely = 0;
2078 for (
const auto LH : Likelihoods) {
2093 if (NumUnlikely == 0 && NumLikely == 0)
2094 return std::nullopt;
2102 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2103 const uint64_t
None = Likely / (NumNone + 1);
2104 const uint64_t Unlikely = 0;
2107 Result.reserve(Likelihoods.size());
2108 for (
const auto LH : Likelihoods) {
2111 Result.push_back(Unlikely);
2117 Result.push_back(Likely);
2127 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2130 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2134 llvm::APSInt ConstantCondValue;
2142 RunCleanupsScope ExecutedScope(*
this);
2149 if (S.getConditionVariable())
2150 EmitDecl(*S.getConditionVariable());
2155 SwitchInsn =
nullptr;
2159 for (
unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2165 SwitchInsn = SavedSwitchInsn;
2173 RunCleanupsScope ConditionScope(*
this);
2178 if (S.getConditionVariable())
2179 EmitDecl(*S.getConditionVariable());
2187 SwitchInsn =
Builder.CreateSwitch(CondV, DefaultBlock);
2191 unsigned NumCases = 0;
2192 for (
const SwitchCase *Case = S.getSwitchCaseList();
2195 if (isa<DefaultStmt>(Case))
2200 SwitchWeights->reserve(NumCases);
2203 SwitchWeights->push_back(DefaultCount);
2210 CaseRangeBlock = DefaultBlock;
2213 Builder.ClearInsertionPoint();
2217 JumpDest OuterContinue;
2218 if (!BreakContinueStack.empty())
2219 OuterContinue = BreakContinueStack.back().ContinueBlock;
2221 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2226 BreakContinueStack.pop_back();
2230 SwitchInsn->setDefaultDest(CaseRangeBlock);
2233 if (!DefaultBlock->getParent()) {
2236 if (ConditionScope.requiresCleanups()) {
2241 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2242 delete DefaultBlock;
2246 ConditionScope.ForceCleanup();
2255 auto *
Call = dyn_cast<CallExpr>(S.getCond());
2257 auto *FD = dyn_cast_or_null<FunctionDecl>(
Call->getCalleeDecl());
2258 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2260 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2261 MDHelper.createUnpredictable());
2265 if (SwitchWeights) {
2266 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2267 "switch weights do not match switch cases");
2269 if (SwitchWeights->size() > 1)
2270 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2271 createProfileWeights(*SwitchWeights));
2272 delete SwitchWeights;
2273 }
else if (SwitchLikelihood) {
2274 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2275 "switch likelihoods do not match switch cases");
2276 std::optional<SmallVector<uint64_t, 16>> LHW =
2280 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2281 createProfileWeights(*LHW));
2283 delete SwitchLikelihood;
2285 SwitchInsn = SavedSwitchInsn;
2286 SwitchWeights = SavedSwitchWeights;
2287 SwitchLikelihood = SavedSwitchLikelihood;
2288 CaseRangeBlock = SavedCRBlock;
2296 while (*Constraint) {
2297 switch (*Constraint) {
2309 while (Constraint[1] && Constraint[1] !=
',')
2315 while (Constraint[1] && Constraint[1] == *Constraint)
2326 "Must pass output names to constraints with a symbolic name");
2328 bool result =
Target.resolveSymbolicName(Constraint, *OutCons, Index);
2329 assert(result &&
"Could not resolve symbolic name"); (void)result;
2330 Result += llvm::utostr(Index);
2348 std::string *GCCReg =
nullptr) {
2349 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2361 StringRef Register =
Attr->getLabel();
2362 assert(
Target.isValidGCCRegisterName(Register));
2366 if (
Target.validateOutputConstraint(Info) &&
2372 Register =
Target.getNormalizedGCCRegisterName(Register);
2373 if (GCCReg !=
nullptr)
2374 *GCCReg = Register.str();
2375 return (EarlyClobber ?
"&{" :
"{") + Register.str() +
"}";
2378std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2387 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2397 ConstraintStr +=
'*';
2401std::pair<llvm::Value *, llvm::Type *>
2403 const Expr *InputExpr,
2404 std::string &ConstraintStr) {
2412 llvm::APSInt IntResult;
2415 return {llvm::ConstantInt::get(
getLLVMContext(), IntResult),
nullptr};
2427 if (InputExpr->
getStmtClass() == Expr::CXXThisExprClass)
2431 return EmitAsmInputLValue(Info, Dest, InputExpr->
getType(), ConstraintStr,
2443 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2446 if (!StrVal.empty()) {
2449 unsigned StartToken = 0;
2450 unsigned ByteOffset = 0;
2454 for (
unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2455 if (StrVal[i] !=
'\n')
continue;
2457 i + 1,
SM, LangOpts, CGF.
getTarget(), &StartToken, &ByteOffset);
2458 Locs.push_back(llvm::ConstantAsMetadata::get(
2467 bool HasUnwindClobber,
bool ReadOnly,
2468 bool ReadNone,
bool NoMerge,
const AsmStmt &S,
2469 const std::vector<llvm::Type *> &ResultRegTypes,
2470 const std::vector<llvm::Type *> &ArgElemTypes,
2472 std::vector<llvm::Value *> &RegResults) {
2473 if (!HasUnwindClobber)
2474 Result.addFnAttr(llvm::Attribute::NoUnwind);
2477 Result.addFnAttr(llvm::Attribute::NoMerge);
2479 if (!HasSideEffect) {
2481 Result.setDoesNotAccessMemory();
2483 Result.setOnlyReadsMemory();
2487 for (
auto Pair : llvm::enumerate(ArgElemTypes)) {
2489 auto Attr = llvm::Attribute::get(
2490 CGF.
getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2497 if (
const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2498 Result.setMetadata(
"srcloc",
2502 llvm::Constant *
Loc =
2503 llvm::ConstantInt::get(CGF.
Int64Ty, S.getAsmLoc().getRawEncoding());
2504 Result.setMetadata(
"srcloc",
2506 llvm::ConstantAsMetadata::get(
Loc)));
2514 Result.addFnAttr(llvm::Attribute::Convergent);
2516 if (ResultRegTypes.size() == 1) {
2517 RegResults.push_back(&
Result);
2519 for (
unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2520 llvm::Value *Tmp = CGF.
Builder.CreateExtractValue(&
Result, i,
"asmresult");
2521 RegResults.push_back(Tmp);
2533 const llvm::BitVector &ResultTypeRequiresCast,
2534 const llvm::BitVector &ResultRegIsFlagReg) {
2539 assert(RegResults.size() == ResultRegTypes.size());
2540 assert(RegResults.size() == ResultTruncRegTypes.size());
2541 assert(RegResults.size() == ResultRegDests.size());
2544 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2545 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2547 for (
unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2548 llvm::Value *Tmp = RegResults[i];
2549 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2551 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2554 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2555 llvm::Value *IsBooleanValue =
2556 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2558 Builder.CreateCall(FnAssume, IsBooleanValue);
2563 if (ResultRegTypes[i] != TruncTy) {
2567 if (TruncTy->isFloatingPointTy())
2568 Tmp =
Builder.CreateFPTrunc(Tmp, TruncTy);
2569 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2572 Tmp, llvm::IntegerType::get(CTX, (
unsigned)ResSize));
2573 Tmp =
Builder.CreateIntToPtr(Tmp, TruncTy);
2574 }
else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2578 Tmp, llvm::IntegerType::get(CTX, (
unsigned)TmpSize));
2579 Tmp =
Builder.CreateTrunc(Tmp, TruncTy);
2580 }
else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2581 Tmp =
Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2582 }
else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2583 Tmp =
Builder.CreateBitCast(Tmp, TruncTy);
2587 LValue Dest = ResultRegDests[i];
2590 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2601 const Expr *OutExpr = S.getOutputExpr(i);
2603 diag::err_store_value_to_reg);
2614 constexpr auto Name =
"__ASM__hipstdpar_unsupported";
2617 if (
auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2618 Asm = GCCAsm->getAsmString()->getString();
2622 auto StrTy = llvm::ConstantDataArray::getString(Ctx,
Asm);
2623 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2624 {StrTy->getType()},
false);
2625 auto UBF = CGF->
CGM.
getModule().getOrInsertFunction(Name, FnTy);
2627 CGF->
Builder.CreateCall(UBF, {StrTy});
2632 CodeGenFunction::RunCleanupsScope Cleanups(*
this);
2635 std::string AsmString = S.generateAsmString(
getContext());
2642 bool IsValidTargetAsm =
true;
2643 for (
unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2645 if (
const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2646 Name = GAS->getOutputName(i);
2649 if (IsHipStdPar && !IsValid)
2650 IsValidTargetAsm =
false;
2652 assert(IsValid &&
"Failed to parse output constraint");
2653 OutputConstraintInfos.push_back(Info);
2656 for (
unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2658 if (
const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2659 Name = GAS->getInputName(i);
2663 if (IsHipStdPar && !IsValid)
2664 IsValidTargetAsm =
false;
2666 assert(IsValid &&
"Failed to parse input constraint");
2667 InputConstraintInfos.push_back(Info);
2670 if (!IsValidTargetAsm)
2673 std::string Constraints;
2675 std::vector<LValue> ResultRegDests;
2676 std::vector<QualType> ResultRegQualTys;
2677 std::vector<llvm::Type *> ResultRegTypes;
2678 std::vector<llvm::Type *> ResultTruncRegTypes;
2679 std::vector<llvm::Type *> ArgTypes;
2680 std::vector<llvm::Type *> ArgElemTypes;
2681 std::vector<llvm::Value*> Args;
2682 llvm::BitVector ResultTypeRequiresCast;
2683 llvm::BitVector ResultRegIsFlagReg;
2686 std::string InOutConstraints;
2687 std::vector<llvm::Value*> InOutArgs;
2688 std::vector<llvm::Type*> InOutArgTypes;
2689 std::vector<llvm::Type*> InOutArgElemTypes;
2692 std::vector<std::string> OutputConstraints;
2695 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2703 bool ReadOnly =
true, ReadNone =
true;
2705 for (
unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2709 std::string OutputConstraint(S.getOutputConstraint(i));
2713 const Expr *OutExpr = S.getOutputExpr(i);
2722 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2723 CGM.
Error(S.getAsmLoc(),
"multiple outputs to hard register: " + GCCReg);
2725 OutputConstraints.push_back(OutputConstraint);
2727 if (!Constraints.empty())
2737 Constraints +=
"=" + OutputConstraint;
2738 ResultRegQualTys.push_back(QTy);
2739 ResultRegDests.push_back(Dest);
2741 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with(
"{@cc");
2742 ResultRegIsFlagReg.push_back(IsFlagReg);
2747 Ty->isAggregateType());
2749 ResultTruncRegTypes.push_back(Ty);
2750 ResultTypeRequiresCast.push_back(RequiresCast);
2756 ResultRegTypes.push_back(Ty);
2762 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2767 assert(InputNo != S.getNumInputs() &&
"Didn't find matching input!");
2769 QualType InputTy = S.getInputExpr(InputNo)->getType();
2778 if (llvm::Type* AdjTy =
2780 ResultRegTypes.back()))
2781 ResultRegTypes.back() = AdjTy;
2784 diag::err_asm_invalid_type_in_input)
2785 << OutExpr->
getType() << OutputConstraint;
2789 if (
auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2790 LargestVectorWidth =
2791 std::max((uint64_t)LargestVectorWidth,
2792 VT->getPrimitiveSizeInBits().getKnownMinValue());
2803 ArgTypes.push_back(DestAddr.
getType());
2806 Constraints +=
"=*";
2807 Constraints += OutputConstraint;
2808 ReadOnly = ReadNone =
false;
2812 InOutConstraints +=
',';
2814 const Expr *InputExpr = S.getOutputExpr(i);
2816 llvm::Type *ArgElemType;
2817 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2818 Info, Dest, InputExpr->
getType(), InOutConstraints,
2821 if (llvm::Type* AdjTy =
2824 Arg =
Builder.CreateBitCast(Arg, AdjTy);
2827 if (
auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2828 LargestVectorWidth =
2829 std::max((uint64_t)LargestVectorWidth,
2830 VT->getPrimitiveSizeInBits().getKnownMinValue());
2833 InOutConstraints += llvm::utostr(i);
2835 InOutConstraints += OutputConstraint;
2837 InOutArgTypes.push_back(Arg->getType());
2838 InOutArgElemTypes.push_back(ArgElemType);
2839 InOutArgs.push_back(Arg);
2845 if (isa<MSAsmStmt>(&S)) {
2851 *
this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2852 ResultRegDests, AsmString, S.getNumOutputs());
2857 for (
unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2858 const Expr *InputExpr = S.getInputExpr(i);
2865 if (!Constraints.empty())
2869 std::string InputConstraint(S.getInputConstraint(i));
2871 &OutputConstraintInfos);
2877 std::string ReplaceConstraint (InputConstraint);
2879 llvm::Type *ArgElemType;
2880 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2889 QualType OutputType = S.getOutputExpr(Output)->getType();
2895 if (isa<llvm::PointerType>(Arg->getType()))
2898 if (isa<llvm::IntegerType>(OutputTy))
2899 Arg =
Builder.CreateZExt(Arg, OutputTy);
2900 else if (isa<llvm::PointerType>(OutputTy))
2902 else if (OutputTy->isFloatingPointTy())
2903 Arg =
Builder.CreateFPExt(Arg, OutputTy);
2906 ReplaceConstraint = OutputConstraints[Output];
2908 if (llvm::Type* AdjTy =
2911 Arg =
Builder.CreateBitCast(Arg, AdjTy);
2914 << InputExpr->
getType() << InputConstraint;
2917 if (
auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2918 LargestVectorWidth =
2919 std::max((uint64_t)LargestVectorWidth,
2920 VT->getPrimitiveSizeInBits().getKnownMinValue());
2922 ArgTypes.push_back(Arg->getType());
2923 ArgElemTypes.push_back(ArgElemType);
2924 Args.push_back(Arg);
2925 Constraints += InputConstraint;
2929 for (
unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2930 ArgTypes.push_back(InOutArgTypes[i]);
2931 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2932 Args.push_back(InOutArgs[i]);
2934 Constraints += InOutConstraints;
2938 llvm::BasicBlock *Fallthrough =
nullptr;
2939 bool IsGCCAsmGoto =
false;
2940 if (
const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2941 IsGCCAsmGoto = GS->isAsmGoto();
2943 for (
const auto *
E : GS->labels()) {
2945 Transfer.push_back(Dest.getBlock());
2946 if (!Constraints.empty())
2948 Constraints +=
"!i";
2954 bool HasUnwindClobber =
false;
2957 for (
unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2958 StringRef Clobber = S.getClobber(i);
2960 if (Clobber ==
"memory")
2961 ReadOnly = ReadNone =
false;
2962 else if (Clobber ==
"unwind") {
2963 HasUnwindClobber =
true;
2965 }
else if (Clobber !=
"cc") {
2970 diag::warn_stack_clash_protection_inline_asm);
2974 if (isa<MSAsmStmt>(&S)) {
2975 if (Clobber ==
"eax" || Clobber ==
"edx") {
2976 if (Constraints.find(
"=&A") != std::string::npos)
2978 std::string::size_type position1 =
2979 Constraints.find(
"={" + Clobber.str() +
"}");
2980 if (position1 != std::string::npos) {
2981 Constraints.insert(position1 + 1,
"&");
2984 std::string::size_type position2 = Constraints.find(
"=A");
2985 if (position2 != std::string::npos) {
2986 Constraints.insert(position2 + 1,
"&");
2991 if (!Constraints.empty())
2994 Constraints +=
"~{";
2995 Constraints += Clobber;
2999 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3000 "unwind clobber can't be used with asm goto");
3004 if (!MachineClobbers.empty()) {
3005 if (!Constraints.empty())
3007 Constraints += MachineClobbers;
3010 llvm::Type *ResultType;
3011 if (ResultRegTypes.empty())
3013 else if (ResultRegTypes.size() == 1)
3014 ResultType = ResultRegTypes[0];
3016 ResultType = llvm::StructType::get(
getLLVMContext(), ResultRegTypes);
3018 llvm::FunctionType *FTy =
3019 llvm::FunctionType::get(ResultType, ArgTypes,
false);
3021 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3023 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3025 ? llvm::InlineAsm::AD_ATT
3026 : llvm::InlineAsm::AD_Intel;
3027 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3028 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3030 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3031 FTy, AsmString, Constraints, HasSideEffect,
3032 false, AsmDialect, HasUnwindClobber);
3033 std::vector<llvm::Value*> RegResults;
3034 llvm::CallBrInst *CBR;
3035 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3038 CBR =
Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3046 if (!RegResults.empty()) {
3048 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3049 llvm::Twine SynthName = Dest->getName() +
".split";
3051 llvm::IRBuilderBase::InsertPointGuard IPG(
Builder);
3052 Builder.SetInsertPoint(SynthBB);
3054 if (ResultRegTypes.size() == 1) {
3055 CBRRegResults[SynthBB].push_back(CBR);
3057 for (
unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3058 llvm::Value *Tmp =
Builder.CreateExtractValue(CBR, j,
"asmresult");
3059 CBRRegResults[SynthBB].push_back(Tmp);
3065 CBR->setIndirectDest(i++, SynthBB);
3068 }
else if (HasUnwindClobber) {
3081 EmitAsmStores(*
this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3082 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3083 ResultRegIsFlagReg);
3088 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3089 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3090 llvm::IRBuilderBase::InsertPointGuard IPG(
Builder);
3091 Builder.SetInsertPoint(Succ, --(Succ->end()));
3092 EmitAsmStores(*
this, S, CBRRegResults[Succ], ResultRegTypes,
3093 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3094 ResultTypeRequiresCast, ResultRegIsFlagReg);
3100 const RecordDecl *RD = S.getCapturedRecordDecl();
3109 E = S.capture_init_end();
3110 I !=
E; ++I, ++CurField) {
3112 if (CurField->hasCapturedVLAType()) {
3130 CGCapturedStmtRAII CapInfoRAII(CGF,
new CGCapturedStmtInfo(S, K));
3131 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3132 delete CGF.CapturedStmtInfo;
3149 "CapturedStmtInfo should be set when generating the captured function");
3151 const RecordDecl *RD = S.getCapturedRecordDecl();
3153 assert(CD->
hasBody() &&
"missing CapturedDecl body");
3166 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3170 F->addFnAttr(llvm::Attribute::NoUnwind);
3182 for (
auto *FD : RD->
fields()) {
3183 if (FD->hasCapturedVLAType()) {
3187 auto VAT = FD->getCapturedVLAType();
3188 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3209llvm::IntrinsicInst *getConvergenceToken(llvm::BasicBlock *BB) {
3210 for (
auto &I : *BB) {
3211 auto *II = dyn_cast<llvm::IntrinsicInst>(&I);
3212 if (II && llvm::isConvergenceControlIntrinsic(II->getIntrinsicID()))
3221CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input,
3222 llvm::Value *ParentToken) {
3223 llvm::Value *bundleArgs[] = {ParentToken};
3224 llvm::OperandBundleDef OB(
"convergencectrl", bundleArgs);
3225 auto Output = llvm::CallBase::addOperandBundle(
3226 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input);
3227 Input->replaceAllUsesWith(Output);
3228 Input->eraseFromParent();
3232llvm::IntrinsicInst *
3233CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB,
3234 llvm::Value *ParentToken) {
3235 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
3239 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3241 llvm::CallBase *CB =
Builder.CreateIntrinsic(
3242 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3245 llvm::CallBase *I = addConvergenceControlToken(CB, ParentToken);
3246 return cast<llvm::IntrinsicInst>(I);
3249llvm::IntrinsicInst *
3250CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3251 llvm::BasicBlock *BB = &F->getEntryBlock();
3252 llvm::IntrinsicInst *
Token = getConvergenceToken(BB);
3260 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
3261 Builder.SetInsertPoint(&BB->front());
3262 llvm::CallBase *I =
Builder.CreateIntrinsic(
3263 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3264 assert(isa<llvm::IntrinsicInst>(I));
3267 return cast<llvm::IntrinsicInst>(I);
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
static bool hasEmptyLoopBody(const LoopStmt &S)
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
llvm::MachO::Target Target
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
SourceManager & getSourceManager()
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Attr - This represents one attribute.
Represents an attribute applied to a statement.
BreakStmt - This represents a break.
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Represents the body of a CapturedStmt, and serves as its DeclContext.
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
This captures a statement into a function.
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
CaseStmt - Represent a case statement.
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::PointerType * getType() const
Return the type of the pointer value.
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual llvm::Value * getContextValue() const
virtual void setContextValue(llvm::Value *V)
bool isCXXThisExprCaptured() const
virtual FieldDecl * getThisFieldDecl() const
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
void addLabel(const LabelDecl *label)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitSehCppScopeBegin()
void EmitIfStmt(const IfStmt &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
CGDebugInfo * getDebugInfo()
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
SmallVector< llvm::IntrinsicInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
ASTContext & getContext() const
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
unsigned NextCleanupDestIndex
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CallExpr * MustTailCall
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
A saved depth on the scope stack.
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
static stable_iterator invalid()
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
bool empty() const
Determines whether the exception-scopes stack is empty.
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
void pop()
End the current loop.
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
CompoundStmt - This represents a group of statements like { stmt stmt }.
Stmt *const * const_body_iterator
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
ContinueStmt - This represents a continue.
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
A reference to a declared variable, function, enum, etc.
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
SourceLocation getLocation() const
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
DoStmt - This represents a 'do/while' stmt.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Represents a member of a struct/union/class.
ForStmt - This represents a 'for (init;cond;inc)' stmt.
const Expr * getSubExpr() const
FunctionType - C99 6.7.5.3 - Function Declarators.
CallingConv getCallConv() const
This represents a GCC inline-assembly statement extension.
GlobalDecl - represents a global declaration.
GotoStmt - This represents a direct goto.
IfStmt - This represents an if/then/else.
IndirectGotoStmt - This represents an indirect goto.
Represents the declaration of a label.
LabelStmt - Represents a label, which has a substatement.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
bool assumeFunctionsAreConvergent() const
Represents a point when we exit a loop.
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
QualType getCanonicalType() const
The collection of all-type qualifiers we support.
Represents a struct/union/class.
field_range fields() const
field_iterator field_begin() const
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
StmtClass getStmtClass() const
Likelihood
The likelihood of a branch being taken.
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
@ LH_Likely
Branch has the [[likely]] attribute.
static const Attr * getLikelihoodAttr(const Stmt *S)
SourceLocation getBeginLoc() const LLVM_READONLY
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
StringLiteral - This represents a string literal expression, e.g.
SourceLocation getBeginLoc() const LLVM_READONLY
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
StringRef getString() const
const SwitchCase * getNextSwitchCase() const
SwitchStmt - This represents a 'switch' stmt.
Exposes information about the current target.
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
bool validateOutputConstraint(ConstraintInfo &Info) const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Represents a variable declaration or definition.
StorageClass getStorageClass() const
Returns the storage class as written in the source.
WhileStmt - This represents a 'while' stmt.
Defines the clang::TargetInfo interface.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Diagnostic wrappers for TextAPI types for error reporting.
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
EvalResult is a struct with detailed info about an evaluated expression.
APValue Val
Val - This is the value the expression can be folded to.
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
unsigned getTiedOperand() const
bool allowsMemory() const
bool requiresImmediateConstant() const
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
bool allowsRegister() const