clang 20.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPReverseDirectiveClass:
226 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
227 break;
228 case Stmt::OMPInterchangeDirectiveClass:
229 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
230 break;
231 case Stmt::OMPForDirectiveClass:
232 EmitOMPForDirective(cast<OMPForDirective>(*S));
233 break;
234 case Stmt::OMPForSimdDirectiveClass:
235 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
236 break;
237 case Stmt::OMPSectionsDirectiveClass:
238 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
239 break;
240 case Stmt::OMPSectionDirectiveClass:
241 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
242 break;
243 case Stmt::OMPSingleDirectiveClass:
244 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
245 break;
246 case Stmt::OMPMasterDirectiveClass:
247 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
248 break;
249 case Stmt::OMPCriticalDirectiveClass:
250 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
251 break;
252 case Stmt::OMPParallelForDirectiveClass:
253 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
254 break;
255 case Stmt::OMPParallelForSimdDirectiveClass:
256 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
257 break;
258 case Stmt::OMPParallelMasterDirectiveClass:
259 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
260 break;
261 case Stmt::OMPParallelSectionsDirectiveClass:
262 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
263 break;
264 case Stmt::OMPTaskDirectiveClass:
265 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
266 break;
267 case Stmt::OMPTaskyieldDirectiveClass:
268 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
269 break;
270 case Stmt::OMPErrorDirectiveClass:
271 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
272 break;
273 case Stmt::OMPBarrierDirectiveClass:
274 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
275 break;
276 case Stmt::OMPTaskwaitDirectiveClass:
277 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
278 break;
279 case Stmt::OMPTaskgroupDirectiveClass:
280 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
281 break;
282 case Stmt::OMPFlushDirectiveClass:
283 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
284 break;
285 case Stmt::OMPDepobjDirectiveClass:
286 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
287 break;
288 case Stmt::OMPScanDirectiveClass:
289 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
290 break;
291 case Stmt::OMPOrderedDirectiveClass:
292 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
293 break;
294 case Stmt::OMPAtomicDirectiveClass:
295 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
296 break;
297 case Stmt::OMPTargetDirectiveClass:
298 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
299 break;
300 case Stmt::OMPTeamsDirectiveClass:
301 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
302 break;
303 case Stmt::OMPCancellationPointDirectiveClass:
304 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
305 break;
306 case Stmt::OMPCancelDirectiveClass:
307 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
308 break;
309 case Stmt::OMPTargetDataDirectiveClass:
310 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetEnterDataDirectiveClass:
313 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
314 break;
315 case Stmt::OMPTargetExitDataDirectiveClass:
316 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
317 break;
318 case Stmt::OMPTargetParallelDirectiveClass:
319 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
320 break;
321 case Stmt::OMPTargetParallelForDirectiveClass:
322 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
323 break;
324 case Stmt::OMPTaskLoopDirectiveClass:
325 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPTaskLoopSimdDirectiveClass:
328 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
329 break;
330 case Stmt::OMPMasterTaskLoopDirectiveClass:
331 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
332 break;
333 case Stmt::OMPMaskedTaskLoopDirectiveClass:
334 llvm_unreachable("masked taskloop directive not supported yet.");
335 break;
336 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
338 cast<OMPMasterTaskLoopSimdDirective>(*S));
339 break;
340 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
341 llvm_unreachable("masked taskloop simd directive not supported yet.");
342 break;
343 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
345 cast<OMPParallelMasterTaskLoopDirective>(*S));
346 break;
347 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
348 llvm_unreachable("parallel masked taskloop directive not supported yet.");
349 break;
350 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
352 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
355 llvm_unreachable(
356 "parallel masked taskloop simd directive not supported yet.");
357 break;
358 case Stmt::OMPDistributeDirectiveClass:
359 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
360 break;
361 case Stmt::OMPTargetUpdateDirectiveClass:
362 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
363 break;
364 case Stmt::OMPDistributeParallelForDirectiveClass:
366 cast<OMPDistributeParallelForDirective>(*S));
367 break;
368 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
370 cast<OMPDistributeParallelForSimdDirective>(*S));
371 break;
372 case Stmt::OMPDistributeSimdDirectiveClass:
373 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
374 break;
375 case Stmt::OMPTargetParallelForSimdDirectiveClass:
377 cast<OMPTargetParallelForSimdDirective>(*S));
378 break;
379 case Stmt::OMPTargetSimdDirectiveClass:
380 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
381 break;
382 case Stmt::OMPTeamsDistributeDirectiveClass:
383 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
384 break;
385 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
387 cast<OMPTeamsDistributeSimdDirective>(*S));
388 break;
389 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
391 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
392 break;
393 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
395 cast<OMPTeamsDistributeParallelForDirective>(*S));
396 break;
397 case Stmt::OMPTargetTeamsDirectiveClass:
398 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
399 break;
400 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
402 cast<OMPTargetTeamsDistributeDirective>(*S));
403 break;
404 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
406 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
407 break;
408 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
410 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
411 break;
412 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
414 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
415 break;
416 case Stmt::OMPInteropDirectiveClass:
417 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
418 break;
419 case Stmt::OMPDispatchDirectiveClass:
420 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
421 break;
422 case Stmt::OMPScopeDirectiveClass:
423 CGM.ErrorUnsupported(S, "scope with FE outlining");
424 break;
425 case Stmt::OMPMaskedDirectiveClass:
426 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
427 break;
428 case Stmt::OMPGenericLoopDirectiveClass:
429 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPTeamsGenericLoopDirectiveClass:
432 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
433 break;
434 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
436 cast<OMPTargetTeamsGenericLoopDirective>(*S));
437 break;
438 case Stmt::OMPParallelGenericLoopDirectiveClass:
440 cast<OMPParallelGenericLoopDirective>(*S));
441 break;
442 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
444 cast<OMPTargetParallelGenericLoopDirective>(*S));
445 break;
446 case Stmt::OMPParallelMaskedDirectiveClass:
447 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
448 break;
449 case Stmt::OMPAssumeDirectiveClass:
450 EmitOMPAssumeDirective(cast<OMPAssumeDirective>(*S));
451 break;
452 case Stmt::OpenACCComputeConstructClass:
453 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
454 break;
455 case Stmt::OpenACCLoopConstructClass:
456 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
457 break;
458 }
459}
460
463 switch (S->getStmtClass()) {
464 default:
465 return false;
466 case Stmt::NullStmtClass:
467 break;
468 case Stmt::CompoundStmtClass:
469 EmitCompoundStmt(cast<CompoundStmt>(*S));
470 break;
471 case Stmt::DeclStmtClass:
472 EmitDeclStmt(cast<DeclStmt>(*S));
473 break;
474 case Stmt::LabelStmtClass:
475 EmitLabelStmt(cast<LabelStmt>(*S));
476 break;
477 case Stmt::AttributedStmtClass:
478 EmitAttributedStmt(cast<AttributedStmt>(*S));
479 break;
480 case Stmt::GotoStmtClass:
481 EmitGotoStmt(cast<GotoStmt>(*S));
482 break;
483 case Stmt::BreakStmtClass:
484 EmitBreakStmt(cast<BreakStmt>(*S));
485 break;
486 case Stmt::ContinueStmtClass:
487 EmitContinueStmt(cast<ContinueStmt>(*S));
488 break;
489 case Stmt::DefaultStmtClass:
490 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
491 break;
492 case Stmt::CaseStmtClass:
493 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
494 break;
495 case Stmt::SEHLeaveStmtClass:
496 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
497 break;
498 }
499 return true;
500}
501
502/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
503/// this captures the expression result of the last sub-statement and returns it
504/// (for use by the statement expression extension).
506 AggValueSlot AggSlot) {
507 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
508 "LLVM IR generation of compound statement ('{}')");
509
510 // Keep track of the current cleanup stack depth, including debug scopes.
511 LexicalScope Scope(*this, S.getSourceRange());
512
513 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
514}
515
518 bool GetLast,
519 AggValueSlot AggSlot) {
520
521 const Stmt *ExprResult = S.getStmtExprResult();
522 assert((!GetLast || (GetLast && ExprResult)) &&
523 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
524
525 Address RetAlloca = Address::invalid();
526
527 for (auto *CurStmt : S.body()) {
528 if (GetLast && ExprResult == CurStmt) {
529 // We have to special case labels here. They are statements, but when put
530 // at the end of a statement expression, they yield the value of their
531 // subexpression. Handle this by walking through all labels we encounter,
532 // emitting them before we evaluate the subexpr.
533 // Similar issues arise for attributed statements.
534 while (!isa<Expr>(ExprResult)) {
535 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
536 EmitLabel(LS->getDecl());
537 ExprResult = LS->getSubStmt();
538 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
539 // FIXME: Update this if we ever have attributes that affect the
540 // semantics of an expression.
541 ExprResult = AS->getSubStmt();
542 } else {
543 llvm_unreachable("unknown value statement");
544 }
545 }
546
548
549 const Expr *E = cast<Expr>(ExprResult);
550 QualType ExprTy = E->getType();
551 if (hasAggregateEvaluationKind(ExprTy)) {
552 EmitAggExpr(E, AggSlot);
553 } else {
554 // We can't return an RValue here because there might be cleanups at
555 // the end of the StmtExpr. Because of that, we have to emit the result
556 // here into a temporary alloca.
557 RetAlloca = CreateMemTemp(ExprTy);
558 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
559 /*IsInit*/ false);
560 }
561 } else {
562 EmitStmt(CurStmt);
563 }
564 }
565
566 return RetAlloca;
567}
568
569void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
570 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
571
572 // If there is a cleanup stack, then we it isn't worth trying to
573 // simplify this block (we would need to remove it from the scope map
574 // and cleanup entry).
575 if (!EHStack.empty())
576 return;
577
578 // Can only simplify direct branches.
579 if (!BI || !BI->isUnconditional())
580 return;
581
582 // Can only simplify empty blocks.
583 if (BI->getIterator() != BB->begin())
584 return;
585
586 BB->replaceAllUsesWith(BI->getSuccessor(0));
587 BI->eraseFromParent();
588 BB->eraseFromParent();
589}
590
591void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
592 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
593
594 // Fall out of the current block (if necessary).
595 EmitBranch(BB);
596
597 if (IsFinished && BB->use_empty()) {
598 delete BB;
599 return;
600 }
601
602 // Place the block after the current block, if possible, or else at
603 // the end of the function.
604 if (CurBB && CurBB->getParent())
605 CurFn->insert(std::next(CurBB->getIterator()), BB);
606 else
607 CurFn->insert(CurFn->end(), BB);
608 Builder.SetInsertPoint(BB);
609}
610
611void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
612 // Emit a branch from the current block to the target one if this
613 // was a real block. If this was just a fall-through block after a
614 // terminator, don't emit it.
615 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
616
617 if (!CurBB || CurBB->getTerminator()) {
618 // If there is no insert point or the previous block is already
619 // terminated, don't touch it.
620 } else {
621 // Otherwise, create a fall-through branch.
622 Builder.CreateBr(Target);
623 }
624
625 Builder.ClearInsertionPoint();
626}
627
628void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
629 bool inserted = false;
630 for (llvm::User *u : block->users()) {
631 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
632 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
633 inserted = true;
634 break;
635 }
636 }
637
638 if (!inserted)
639 CurFn->insert(CurFn->end(), block);
640
641 Builder.SetInsertPoint(block);
642}
643
644CodeGenFunction::JumpDest
646 JumpDest &Dest = LabelMap[D];
647 if (Dest.isValid()) return Dest;
648
649 // Create, but don't insert, the new block.
650 Dest = JumpDest(createBasicBlock(D->getName()),
653 return Dest;
654}
655
657 // Add this label to the current lexical scope if we're within any
658 // normal cleanups. Jumps "in" to this label --- when permitted by
659 // the language --- may need to be routed around such cleanups.
660 if (EHStack.hasNormalCleanups() && CurLexicalScope)
661 CurLexicalScope->addLabel(D);
662
663 JumpDest &Dest = LabelMap[D];
664
665 // If we didn't need a forward reference to this label, just go
666 // ahead and create a destination at the current scope.
667 if (!Dest.isValid()) {
668 Dest = getJumpDestInCurrentScope(D->getName());
669
670 // Otherwise, we need to give this label a target depth and remove
671 // it from the branch-fixups list.
672 } else {
673 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
674 Dest.setScopeDepth(EHStack.stable_begin());
675 ResolveBranchFixups(Dest.getBlock());
676 }
677
678 EmitBlock(Dest.getBlock());
679
680 // Emit debug info for labels.
681 if (CGDebugInfo *DI = getDebugInfo()) {
683 DI->setLocation(D->getLocation());
684 DI->EmitLabel(D, Builder);
685 }
686 }
687
688 incrementProfileCounter(D->getStmt());
689}
690
691/// Change the cleanup scope of the labels in this lexical scope to
692/// match the scope of the enclosing context.
694 assert(!Labels.empty());
695 EHScopeStack::stable_iterator innermostScope
697
698 // Change the scope depth of all the labels.
700 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
701 assert(CGF.LabelMap.count(*i));
702 JumpDest &dest = CGF.LabelMap.find(*i)->second;
703 assert(dest.getScopeDepth().isValid());
704 assert(innermostScope.encloses(dest.getScopeDepth()));
705 dest.setScopeDepth(innermostScope);
706 }
707
708 // Reparent the labels if the new scope also has cleanups.
709 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
710 ParentScope->Labels.append(Labels.begin(), Labels.end());
711 }
712}
713
714
716 EmitLabel(S.getDecl());
717
718 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
719 if (getLangOpts().EHAsynch && S.isSideEntry())
721
722 EmitStmt(S.getSubStmt());
723}
724
726 bool nomerge = false;
727 bool noinline = false;
728 bool alwaysinline = false;
729 bool noconvergent = false;
730 const CallExpr *musttail = nullptr;
731
732 for (const auto *A : S.getAttrs()) {
733 switch (A->getKind()) {
734 default:
735 break;
736 case attr::NoMerge:
737 nomerge = true;
738 break;
739 case attr::NoInline:
740 noinline = true;
741 break;
742 case attr::AlwaysInline:
743 alwaysinline = true;
744 break;
745 case attr::NoConvergent:
746 noconvergent = true;
747 break;
748 case attr::MustTail: {
749 const Stmt *Sub = S.getSubStmt();
750 const ReturnStmt *R = cast<ReturnStmt>(Sub);
751 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
752 } break;
753 case attr::CXXAssume: {
754 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
755 if (getLangOpts().CXXAssumptions &&
756 !Assumption->HasSideEffects(getContext())) {
757 llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption);
758 Builder.CreateAssumption(AssumptionVal);
759 }
760 } break;
761 }
762 }
763 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
764 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
765 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
766 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
767 SaveAndRestore save_musttail(MustTailCall, musttail);
768 EmitStmt(S.getSubStmt(), S.getAttrs());
769}
770
772 // If this code is reachable then emit a stop point (if generating
773 // debug info). We have to do this ourselves because we are on the
774 // "simple" statement path.
775 if (HaveInsertPoint())
776 EmitStopPoint(&S);
777
779}
780
781
783 if (const LabelDecl *Target = S.getConstantTarget()) {
785 return;
786 }
787
788 // Ensure that we have an i8* for our PHI node.
789 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
790 Int8PtrTy, "addr");
791 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
792
793 // Get the basic block for the indirect goto.
794 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
795
796 // The first instruction in the block has to be the PHI for the switch dest,
797 // add an entry for this branch.
798 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
799
800 EmitBranch(IndGotoBB);
801}
802
803void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
804 // The else branch of a consteval if statement is always the only branch that
805 // can be runtime evaluated.
806 if (S.isConsteval()) {
807 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
808 if (Executed) {
809 RunCleanupsScope ExecutedScope(*this);
810 EmitStmt(Executed);
811 }
812 return;
813 }
814
815 // C99 6.8.4.1: The first substatement is executed if the expression compares
816 // unequal to 0. The condition must be a scalar type.
817 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
818
819 if (S.getInit())
820 EmitStmt(S.getInit());
821
822 if (S.getConditionVariable())
823 EmitDecl(*S.getConditionVariable());
824
825 // If the condition constant folds and can be elided, try to avoid emitting
826 // the condition and the dead arm of the if/else.
827 bool CondConstant;
828 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
829 S.isConstexpr())) {
830 // Figure out which block (then or else) is executed.
831 const Stmt *Executed = S.getThen();
832 const Stmt *Skipped = S.getElse();
833 if (!CondConstant) // Condition false?
834 std::swap(Executed, Skipped);
835
836 // If the skipped block has no labels in it, just emit the executed block.
837 // This avoids emitting dead code and simplifies the CFG substantially.
838 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
839 if (CondConstant)
841 if (Executed) {
842 RunCleanupsScope ExecutedScope(*this);
843 EmitStmt(Executed);
844 }
845 return;
846 }
847 }
848
849 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
850 // the conditional branch.
851 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
852 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
853 llvm::BasicBlock *ElseBlock = ContBlock;
854 if (S.getElse())
855 ElseBlock = createBasicBlock("if.else");
856
857 // Prefer the PGO based weights over the likelihood attribute.
858 // When the build isn't optimized the metadata isn't used, so don't generate
859 // it.
860 // Also, differentiate between disabled PGO and a never executed branch with
861 // PGO. Assuming PGO is in use:
862 // - we want to ignore the [[likely]] attribute if the branch is never
863 // executed,
864 // - assuming the profile is poor, preserving the attribute may still be
865 // beneficial.
866 // As an approximation, preserve the attribute only if both the branch and the
867 // parent context were not executed.
869 uint64_t ThenCount = getProfileCount(S.getThen());
870 if (!ThenCount && !getCurrentProfileCount() &&
871 CGM.getCodeGenOpts().OptimizationLevel)
872 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
873
874 // When measuring MC/DC, always fully evaluate the condition up front using
875 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
876 // executing the body of the if.then or if.else. This is useful for when
877 // there is a 'return' within the body, but this is particularly beneficial
878 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
879 // updates are kept linear and consistent.
880 if (!CGM.getCodeGenOpts().MCDCCoverage)
881 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
882 else {
883 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
884 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
885 }
886
887 // Emit the 'then' code.
888 EmitBlock(ThenBlock);
890 incrementProfileCounter(S.getThen());
891 else
893 {
894 RunCleanupsScope ThenScope(*this);
895 EmitStmt(S.getThen());
896 }
897 EmitBranch(ContBlock);
898
899 // Emit the 'else' code if present.
900 if (const Stmt *Else = S.getElse()) {
901 {
902 // There is no need to emit line number for an unconditional branch.
903 auto NL = ApplyDebugLocation::CreateEmpty(*this);
904 EmitBlock(ElseBlock);
905 }
906 // When single byte coverage mode is enabled, add a counter to else block.
909 {
910 RunCleanupsScope ElseScope(*this);
911 EmitStmt(Else);
912 }
913 {
914 // There is no need to emit line number for an unconditional branch.
915 auto NL = ApplyDebugLocation::CreateEmpty(*this);
916 EmitBranch(ContBlock);
917 }
918 }
919
920 // Emit the continuation block for code after the if.
921 EmitBlock(ContBlock, true);
922
923 // When single byte coverage mode is enabled, add a counter to continuation
924 // block.
927}
928
929bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
930 bool HasEmptyBody) {
931 if (CGM.getCodeGenOpts().getFiniteLoops() ==
933 return false;
934
935 // Now apply rules for plain C (see 6.8.5.6 in C11).
936 // Loops with constant conditions do not have to make progress in any C
937 // version.
938 // As an extension, we consisider loops whose constant expression
939 // can be constant-folded.
941 bool CondIsConstInt =
942 !ControllingExpression ||
943 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
944 Result.Val.isInt());
945
946 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
947 Result.Val.getInt().getBoolValue());
948
949 // Loops with non-constant conditions must make progress in C11 and later.
950 if (getLangOpts().C11 && !CondIsConstInt)
951 return true;
952
953 // [C++26][intro.progress] (DR)
954 // The implementation may assume that any thread will eventually do one of the
955 // following:
956 // [...]
957 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
958 if (CGM.getCodeGenOpts().getFiniteLoops() ==
960 getLangOpts().CPlusPlus11) {
961 if (HasEmptyBody && CondIsTrue) {
962 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
963 return false;
964 }
965 return true;
966 }
967 return false;
968}
969
970// [C++26][stmt.iter.general] (DR)
971// A trivially empty iteration statement is an iteration statement matching one
972// of the following forms:
973// - while ( expression ) ;
974// - while ( expression ) { }
975// - do ; while ( expression ) ;
976// - do { } while ( expression ) ;
977// - for ( init-statement expression(opt); ) ;
978// - for ( init-statement expression(opt); ) { }
979template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
980 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
981 if (S.getInc())
982 return false;
983 }
984 const Stmt *Body = S.getBody();
985 if (!Body || isa<NullStmt>(Body))
986 return true;
987 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
988 return Compound->body_empty();
989 return false;
990}
991
993 ArrayRef<const Attr *> WhileAttrs) {
994 // Emit the header for the loop, which will also become
995 // the continue target.
996 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
997 EmitBlock(LoopHeader.getBlock());
998
1000 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(
1001 LoopHeader.getBlock(), ConvergenceTokenStack.back()));
1002
1003 // Create an exit block for when the condition fails, which will
1004 // also become the break target.
1005 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
1006
1007 // Store the blocks to use for break and continue.
1008 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
1009
1010 // C++ [stmt.while]p2:
1011 // When the condition of a while statement is a declaration, the
1012 // scope of the variable that is declared extends from its point
1013 // of declaration (3.3.2) to the end of the while statement.
1014 // [...]
1015 // The object created in a condition is destroyed and created
1016 // with each iteration of the loop.
1017 RunCleanupsScope ConditionScope(*this);
1018
1019 if (S.getConditionVariable())
1020 EmitDecl(*S.getConditionVariable());
1021
1022 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1023 // evaluation of the controlling expression takes place before each
1024 // execution of the loop body.
1025 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1026
1027 // while(1) is common, avoid extra exit blocks. Be sure
1028 // to correctly handle break/continue though.
1029 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1030 bool EmitBoolCondBranch = !C || !C->isOne();
1031 const SourceRange &R = S.getSourceRange();
1032 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1033 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1035 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1036
1037 // When single byte coverage mode is enabled, add a counter to loop condition.
1039 incrementProfileCounter(S.getCond());
1040
1041 // As long as the condition is true, go to the loop body.
1042 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1043 if (EmitBoolCondBranch) {
1044 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1045 if (ConditionScope.requiresCleanups())
1046 ExitBlock = createBasicBlock("while.exit");
1047 llvm::MDNode *Weights =
1048 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1049 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1050 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1051 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1052 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1053
1054 if (ExitBlock != LoopExit.getBlock()) {
1055 EmitBlock(ExitBlock);
1057 }
1058 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1059 CGM.getDiags().Report(A->getLocation(),
1060 diag::warn_attribute_has_no_effect_on_infinite_loop)
1061 << A << A->getRange();
1063 S.getWhileLoc(),
1064 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1065 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1066 }
1067
1068 // Emit the loop body. We have to emit this in a cleanup scope
1069 // because it might be a singleton DeclStmt.
1070 {
1071 RunCleanupsScope BodyScope(*this);
1072 EmitBlock(LoopBody);
1073 // When single byte coverage mode is enabled, add a counter to the body.
1075 incrementProfileCounter(S.getBody());
1076 else
1078 EmitStmt(S.getBody());
1079 }
1080
1081 BreakContinueStack.pop_back();
1082
1083 // Immediately force cleanup.
1084 ConditionScope.ForceCleanup();
1085
1086 EmitStopPoint(&S);
1087 // Branch to the loop header again.
1088 EmitBranch(LoopHeader.getBlock());
1089
1090 LoopStack.pop();
1091
1092 // Emit the exit block.
1093 EmitBlock(LoopExit.getBlock(), true);
1094
1095 // The LoopHeader typically is just a branch if we skipped emitting
1096 // a branch, try to erase it.
1097 if (!EmitBoolCondBranch)
1098 SimplifyForwardingBlocks(LoopHeader.getBlock());
1099
1100 // When single byte coverage mode is enabled, add a counter to continuation
1101 // block.
1104
1106 ConvergenceTokenStack.pop_back();
1107}
1108
1110 ArrayRef<const Attr *> DoAttrs) {
1111 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1112 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1113
1114 uint64_t ParentCount = getCurrentProfileCount();
1115
1116 // Store the blocks to use for break and continue.
1117 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1118
1119 // Emit the body of the loop.
1120 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1121
1123 EmitBlockWithFallThrough(LoopBody, S.getBody());
1124 else
1125 EmitBlockWithFallThrough(LoopBody, &S);
1126
1128 ConvergenceTokenStack.push_back(
1129 emitConvergenceLoopToken(LoopBody, ConvergenceTokenStack.back()));
1130
1131 {
1132 RunCleanupsScope BodyScope(*this);
1133 EmitStmt(S.getBody());
1134 }
1135
1136 EmitBlock(LoopCond.getBlock());
1137 // When single byte coverage mode is enabled, add a counter to loop condition.
1139 incrementProfileCounter(S.getCond());
1140
1141 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1142 // after each execution of the loop body."
1143
1144 // Evaluate the conditional in the while header.
1145 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1146 // compares unequal to 0. The condition must be a scalar type.
1147 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1148
1149 BreakContinueStack.pop_back();
1150
1151 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1152 // to correctly handle break/continue though.
1153 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1154 bool EmitBoolCondBranch = !C || !C->isZero();
1155
1156 const SourceRange &R = S.getSourceRange();
1157 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1160 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1161
1162 // As long as the condition is true, iterate the loop.
1163 if (EmitBoolCondBranch) {
1164 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1165 Builder.CreateCondBr(
1166 BoolCondVal, LoopBody, LoopExit.getBlock(),
1167 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1168 }
1169
1170 LoopStack.pop();
1171
1172 // Emit the exit block.
1173 EmitBlock(LoopExit.getBlock());
1174
1175 // The DoCond block typically is just a branch if we skipped
1176 // emitting a branch, try to erase it.
1177 if (!EmitBoolCondBranch)
1178 SimplifyForwardingBlocks(LoopCond.getBlock());
1179
1180 // When single byte coverage mode is enabled, add a counter to continuation
1181 // block.
1184
1186 ConvergenceTokenStack.pop_back();
1187}
1188
1190 ArrayRef<const Attr *> ForAttrs) {
1191 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1192
1193 LexicalScope ForScope(*this, S.getSourceRange());
1194
1195 // Evaluate the first part before the loop.
1196 if (S.getInit())
1197 EmitStmt(S.getInit());
1198
1199 // Start the loop with a block that tests the condition.
1200 // If there's an increment, the continue scope will be overwritten
1201 // later.
1202 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1203 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1204 EmitBlock(CondBlock);
1205
1207 ConvergenceTokenStack.push_back(
1208 emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1209
1210 const SourceRange &R = S.getSourceRange();
1211 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1214 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1215
1216 // Create a cleanup scope for the condition variable cleanups.
1217 LexicalScope ConditionScope(*this, S.getSourceRange());
1218
1219 // If the for loop doesn't have an increment we can just use the condition as
1220 // the continue block. Otherwise, if there is no condition variable, we can
1221 // form the continue block now. If there is a condition variable, we can't
1222 // form the continue block until after we've emitted the condition, because
1223 // the condition is in scope in the increment, but Sema's jump diagnostics
1224 // ensure that there are no continues from the condition variable that jump
1225 // to the loop increment.
1226 JumpDest Continue;
1227 if (!S.getInc())
1228 Continue = CondDest;
1229 else if (!S.getConditionVariable())
1230 Continue = getJumpDestInCurrentScope("for.inc");
1231 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1232
1233 if (S.getCond()) {
1234 // If the for statement has a condition scope, emit the local variable
1235 // declaration.
1236 if (S.getConditionVariable()) {
1237 EmitDecl(*S.getConditionVariable());
1238
1239 // We have entered the condition variable's scope, so we're now able to
1240 // jump to the continue block.
1241 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1242 BreakContinueStack.back().ContinueBlock = Continue;
1243 }
1244
1245 // When single byte coverage mode is enabled, add a counter to loop
1246 // condition.
1248 incrementProfileCounter(S.getCond());
1249
1250 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1251 // If there are any cleanups between here and the loop-exit scope,
1252 // create a block to stage a loop exit along.
1253 if (ForScope.requiresCleanups())
1254 ExitBlock = createBasicBlock("for.cond.cleanup");
1255
1256 // As long as the condition is true, iterate the loop.
1257 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1258
1259 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1260 // compares unequal to 0. The condition must be a scalar type.
1261 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1262 llvm::MDNode *Weights =
1263 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1264 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1265 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1266 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1267
1268 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1269
1270 if (ExitBlock != LoopExit.getBlock()) {
1271 EmitBlock(ExitBlock);
1273 }
1274
1275 EmitBlock(ForBody);
1276 } else {
1277 // Treat it as a non-zero constant. Don't even create a new block for the
1278 // body, just fall into it.
1279 }
1280
1281 // When single byte coverage mode is enabled, add a counter to the body.
1283 incrementProfileCounter(S.getBody());
1284 else
1286 {
1287 // Create a separate cleanup scope for the body, in case it is not
1288 // a compound statement.
1289 RunCleanupsScope BodyScope(*this);
1290 EmitStmt(S.getBody());
1291 }
1292
1293 // If there is an increment, emit it next.
1294 if (S.getInc()) {
1295 EmitBlock(Continue.getBlock());
1296 EmitStmt(S.getInc());
1298 incrementProfileCounter(S.getInc());
1299 }
1300
1301 BreakContinueStack.pop_back();
1302
1303 ConditionScope.ForceCleanup();
1304
1305 EmitStopPoint(&S);
1306 EmitBranch(CondBlock);
1307
1308 ForScope.ForceCleanup();
1309
1310 LoopStack.pop();
1311
1312 // Emit the fall-through block.
1313 EmitBlock(LoopExit.getBlock(), true);
1314
1315 // When single byte coverage mode is enabled, add a counter to continuation
1316 // block.
1319
1321 ConvergenceTokenStack.pop_back();
1322}
1323
1324void
1326 ArrayRef<const Attr *> ForAttrs) {
1327 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1328
1329 LexicalScope ForScope(*this, S.getSourceRange());
1330
1331 // Evaluate the first pieces before the loop.
1332 if (S.getInit())
1333 EmitStmt(S.getInit());
1334 EmitStmt(S.getRangeStmt());
1335 EmitStmt(S.getBeginStmt());
1336 EmitStmt(S.getEndStmt());
1337
1338 // Start the loop with a block that tests the condition.
1339 // If there's an increment, the continue scope will be overwritten
1340 // later.
1341 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1342 EmitBlock(CondBlock);
1343
1345 ConvergenceTokenStack.push_back(
1346 emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1347
1348 const SourceRange &R = S.getSourceRange();
1349 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1352
1353 // If there are any cleanups between here and the loop-exit scope,
1354 // create a block to stage a loop exit along.
1355 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1356 if (ForScope.requiresCleanups())
1357 ExitBlock = createBasicBlock("for.cond.cleanup");
1358
1359 // The loop body, consisting of the specified body and the loop variable.
1360 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1361
1362 // The body is executed if the expression, contextually converted
1363 // to bool, is true.
1364 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1365 llvm::MDNode *Weights =
1366 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1367 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1368 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1369 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1370 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1371
1372 if (ExitBlock != LoopExit.getBlock()) {
1373 EmitBlock(ExitBlock);
1375 }
1376
1377 EmitBlock(ForBody);
1379 incrementProfileCounter(S.getBody());
1380 else
1382
1383 // Create a block for the increment. In case of a 'continue', we jump there.
1384 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1385
1386 // Store the blocks to use for break and continue.
1387 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1388
1389 {
1390 // Create a separate cleanup scope for the loop variable and body.
1391 LexicalScope BodyScope(*this, S.getSourceRange());
1392 EmitStmt(S.getLoopVarStmt());
1393 EmitStmt(S.getBody());
1394 }
1395
1396 EmitStopPoint(&S);
1397 // If there is an increment, emit it next.
1398 EmitBlock(Continue.getBlock());
1399 EmitStmt(S.getInc());
1400
1401 BreakContinueStack.pop_back();
1402
1403 EmitBranch(CondBlock);
1404
1405 ForScope.ForceCleanup();
1406
1407 LoopStack.pop();
1408
1409 // Emit the fall-through block.
1410 EmitBlock(LoopExit.getBlock(), true);
1411
1412 // When single byte coverage mode is enabled, add a counter to continuation
1413 // block.
1416
1418 ConvergenceTokenStack.pop_back();
1419}
1420
1421void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1422 if (RV.isScalar()) {
1424 } else if (RV.isAggregate()) {
1425 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1428 } else {
1430 /*init*/ true);
1431 }
1433}
1434
1435namespace {
1436// RAII struct used to save and restore a return statment's result expression.
1437struct SaveRetExprRAII {
1438 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1439 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1440 CGF.RetExpr = RetExpr;
1441 }
1442 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1443 const Expr *OldRetExpr;
1444 CodeGenFunction &CGF;
1445};
1446} // namespace
1447
1448/// Determine if the given call uses the swiftasync calling convention.
1449static bool isSwiftAsyncCallee(const CallExpr *CE) {
1450 auto calleeQualType = CE->getCallee()->getType();
1451 const FunctionType *calleeType = nullptr;
1452 if (calleeQualType->isFunctionPointerType() ||
1453 calleeQualType->isFunctionReferenceType() ||
1454 calleeQualType->isBlockPointerType() ||
1455 calleeQualType->isMemberFunctionPointerType()) {
1456 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1457 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1458 calleeType = ty;
1459 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1460 if (auto methodDecl = CMCE->getMethodDecl()) {
1461 // getMethodDecl() doesn't handle member pointers at the moment.
1462 calleeType = methodDecl->getType()->castAs<FunctionType>();
1463 } else {
1464 return false;
1465 }
1466 } else {
1467 return false;
1468 }
1469 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1470}
1471
1472/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1473/// if the function returns void, or may be missing one if the function returns
1474/// non-void. Fun stuff :).
1476 if (requiresReturnValueCheck()) {
1477 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1478 auto *SLocPtr =
1479 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1480 llvm::GlobalVariable::PrivateLinkage, SLoc);
1481 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1483 assert(ReturnLocation.isValid() && "No valid return location");
1484 Builder.CreateStore(SLocPtr, ReturnLocation);
1485 }
1486
1487 // Returning from an outlined SEH helper is UB, and we already warn on it.
1488 if (IsOutlinedSEHHelper) {
1489 Builder.CreateUnreachable();
1490 Builder.ClearInsertionPoint();
1491 }
1492
1493 // Emit the result value, even if unused, to evaluate the side effects.
1494 const Expr *RV = S.getRetValue();
1495
1496 // Record the result expression of the return statement. The recorded
1497 // expression is used to determine whether a block capture's lifetime should
1498 // end at the end of the full expression as opposed to the end of the scope
1499 // enclosing the block expression.
1500 //
1501 // This permits a small, easily-implemented exception to our over-conservative
1502 // rules about not jumping to statements following block literals with
1503 // non-trivial cleanups.
1504 SaveRetExprRAII SaveRetExpr(RV, *this);
1505
1506 RunCleanupsScope cleanupScope(*this);
1507 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1508 RV = EWC->getSubExpr();
1509
1510 // If we're in a swiftasynccall function, and the return expression is a
1511 // call to a swiftasynccall function, mark the call as the musttail call.
1512 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1513 if (RV && CurFnInfo &&
1515 if (auto CE = dyn_cast<CallExpr>(RV)) {
1516 if (isSwiftAsyncCallee(CE)) {
1517 SaveMustTail.emplace(MustTailCall, CE);
1518 }
1519 }
1520 }
1521
1522 // FIXME: Clean this up by using an LValue for ReturnTemp,
1523 // EmitStoreThroughLValue, and EmitAnyExpr.
1524 // Check if the NRVO candidate was not globalized in OpenMP mode.
1525 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1526 S.getNRVOCandidate()->isNRVOVariable() &&
1527 (!getLangOpts().OpenMP ||
1529 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1530 .isValid())) {
1531 // Apply the named return value optimization for this return statement,
1532 // which means doing nothing: the appropriate result has already been
1533 // constructed into the NRVO variable.
1534
1535 // If there is an NRVO flag for this variable, set it to 1 into indicate
1536 // that the cleanup code should not destroy the variable.
1537 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1538 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1539 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1540 // Make sure not to return anything, but evaluate the expression
1541 // for side effects.
1542 if (RV) {
1543 EmitAnyExpr(RV);
1544 }
1545 } else if (!RV) {
1546 // Do nothing (return value is left uninitialized)
1547 } else if (FnRetTy->isReferenceType()) {
1548 // If this function returns a reference, take the address of the expression
1549 // rather than the value.
1551 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1552 } else {
1553 switch (getEvaluationKind(RV->getType())) {
1554 case TEK_Scalar: {
1555 llvm::Value *Ret = EmitScalarExpr(RV);
1558 /*isInit*/ true);
1559 else
1561 break;
1562 }
1563 case TEK_Complex:
1565 /*isInit*/ true);
1566 break;
1567 case TEK_Aggregate:
1574 break;
1575 }
1576 }
1577
1578 ++NumReturnExprs;
1579 if (!RV || RV->isEvaluatable(getContext()))
1580 ++NumSimpleReturnExprs;
1581
1582 cleanupScope.ForceCleanup();
1584}
1585
1587 // As long as debug info is modeled with instructions, we have to ensure we
1588 // have a place to insert here and write the stop point here.
1589 if (HaveInsertPoint())
1590 EmitStopPoint(&S);
1591
1592 for (const auto *I : S.decls())
1593 EmitDecl(*I);
1594}
1595
1597 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1598
1599 // If this code is reachable then emit a stop point (if generating
1600 // debug info). We have to do this ourselves because we are on the
1601 // "simple" statement path.
1602 if (HaveInsertPoint())
1603 EmitStopPoint(&S);
1604
1605 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1606}
1607
1609 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1610
1611 // If this code is reachable then emit a stop point (if generating
1612 // debug info). We have to do this ourselves because we are on the
1613 // "simple" statement path.
1614 if (HaveInsertPoint())
1615 EmitStopPoint(&S);
1616
1617 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1618}
1619
1620/// EmitCaseStmtRange - If case statement range is not too big then
1621/// add multiple cases to switch instruction, one for each value within
1622/// the range. If range is too big then emit "if" condition check.
1624 ArrayRef<const Attr *> Attrs) {
1625 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1626
1627 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1628 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1629
1630 // Emit the code for this case. We do this first to make sure it is
1631 // properly chained from our predecessor before generating the
1632 // switch machinery to enter this block.
1633 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1634 EmitBlockWithFallThrough(CaseDest, &S);
1635 EmitStmt(S.getSubStmt());
1636
1637 // If range is empty, do nothing.
1638 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1639 return;
1640
1642 llvm::APInt Range = RHS - LHS;
1643 // FIXME: parameters such as this should not be hardcoded.
1644 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1645 // Range is small enough to add multiple switch instruction cases.
1646 uint64_t Total = getProfileCount(&S);
1647 unsigned NCases = Range.getZExtValue() + 1;
1648 // We only have one region counter for the entire set of cases here, so we
1649 // need to divide the weights evenly between the generated cases, ensuring
1650 // that the total weight is preserved. E.g., a weight of 5 over three cases
1651 // will be distributed as weights of 2, 2, and 1.
1652 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1653 for (unsigned I = 0; I != NCases; ++I) {
1654 if (SwitchWeights)
1655 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1656 else if (SwitchLikelihood)
1657 SwitchLikelihood->push_back(LH);
1658
1659 if (Rem)
1660 Rem--;
1661 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1662 ++LHS;
1663 }
1664 return;
1665 }
1666
1667 // The range is too big. Emit "if" condition into a new block,
1668 // making sure to save and restore the current insertion point.
1669 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1670
1671 // Push this test onto the chain of range checks (which terminates
1672 // in the default basic block). The switch's default will be changed
1673 // to the top of this chain after switch emission is complete.
1674 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1675 CaseRangeBlock = createBasicBlock("sw.caserange");
1676
1677 CurFn->insert(CurFn->end(), CaseRangeBlock);
1678 Builder.SetInsertPoint(CaseRangeBlock);
1679
1680 // Emit range check.
1681 llvm::Value *Diff =
1682 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1683 llvm::Value *Cond =
1684 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1685
1686 llvm::MDNode *Weights = nullptr;
1687 if (SwitchWeights) {
1688 uint64_t ThisCount = getProfileCount(&S);
1689 uint64_t DefaultCount = (*SwitchWeights)[0];
1690 Weights = createProfileWeights(ThisCount, DefaultCount);
1691
1692 // Since we're chaining the switch default through each large case range, we
1693 // need to update the weight for the default, ie, the first case, to include
1694 // this case.
1695 (*SwitchWeights)[0] += ThisCount;
1696 } else if (SwitchLikelihood)
1697 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1698
1699 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1700
1701 // Restore the appropriate insertion point.
1702 if (RestoreBB)
1703 Builder.SetInsertPoint(RestoreBB);
1704 else
1705 Builder.ClearInsertionPoint();
1706}
1707
1709 ArrayRef<const Attr *> Attrs) {
1710 // If there is no enclosing switch instance that we're aware of, then this
1711 // case statement and its block can be elided. This situation only happens
1712 // when we've constant-folded the switch, are emitting the constant case,
1713 // and part of the constant case includes another case statement. For
1714 // instance: switch (4) { case 4: do { case 5: } while (1); }
1715 if (!SwitchInsn) {
1716 EmitStmt(S.getSubStmt());
1717 return;
1718 }
1719
1720 // Handle case ranges.
1721 if (S.getRHS()) {
1722 EmitCaseStmtRange(S, Attrs);
1723 return;
1724 }
1725
1726 llvm::ConstantInt *CaseVal =
1727 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1728
1729 // Emit debuginfo for the case value if it is an enum value.
1730 const ConstantExpr *CE;
1731 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1732 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1733 else
1734 CE = dyn_cast<ConstantExpr>(S.getLHS());
1735 if (CE) {
1736 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1737 if (CGDebugInfo *Dbg = getDebugInfo())
1739 Dbg->EmitGlobalVariable(DE->getDecl(),
1740 APValue(llvm::APSInt(CaseVal->getValue())));
1741 }
1742
1743 if (SwitchLikelihood)
1744 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1745
1746 // If the body of the case is just a 'break', try to not emit an empty block.
1747 // If we're profiling or we're not optimizing, leave the block in for better
1748 // debug and coverage analysis.
1750 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1751 isa<BreakStmt>(S.getSubStmt())) {
1752 JumpDest Block = BreakContinueStack.back().BreakBlock;
1753
1754 // Only do this optimization if there are no cleanups that need emitting.
1756 if (SwitchWeights)
1757 SwitchWeights->push_back(getProfileCount(&S));
1758 SwitchInsn->addCase(CaseVal, Block.getBlock());
1759
1760 // If there was a fallthrough into this case, make sure to redirect it to
1761 // the end of the switch as well.
1762 if (Builder.GetInsertBlock()) {
1763 Builder.CreateBr(Block.getBlock());
1764 Builder.ClearInsertionPoint();
1765 }
1766 return;
1767 }
1768 }
1769
1770 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1771 EmitBlockWithFallThrough(CaseDest, &S);
1772 if (SwitchWeights)
1773 SwitchWeights->push_back(getProfileCount(&S));
1774 SwitchInsn->addCase(CaseVal, CaseDest);
1775
1776 // Recursively emitting the statement is acceptable, but is not wonderful for
1777 // code where we have many case statements nested together, i.e.:
1778 // case 1:
1779 // case 2:
1780 // case 3: etc.
1781 // Handling this recursively will create a new block for each case statement
1782 // that falls through to the next case which is IR intensive. It also causes
1783 // deep recursion which can run into stack depth limitations. Handle
1784 // sequential non-range case statements specially.
1785 //
1786 // TODO When the next case has a likelihood attribute the code returns to the
1787 // recursive algorithm. Maybe improve this case if it becomes common practice
1788 // to use a lot of attributes.
1789 const CaseStmt *CurCase = &S;
1790 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1791
1792 // Otherwise, iteratively add consecutive cases to this switch stmt.
1793 while (NextCase && NextCase->getRHS() == nullptr) {
1794 CurCase = NextCase;
1795 llvm::ConstantInt *CaseVal =
1796 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1797
1798 if (SwitchWeights)
1799 SwitchWeights->push_back(getProfileCount(NextCase));
1801 CaseDest = createBasicBlock("sw.bb");
1802 EmitBlockWithFallThrough(CaseDest, CurCase);
1803 }
1804 // Since this loop is only executed when the CaseStmt has no attributes
1805 // use a hard-coded value.
1806 if (SwitchLikelihood)
1807 SwitchLikelihood->push_back(Stmt::LH_None);
1808
1809 SwitchInsn->addCase(CaseVal, CaseDest);
1810 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1811 }
1812
1813 // Generate a stop point for debug info if the case statement is
1814 // followed by a default statement. A fallthrough case before a
1815 // default case gets its own branch target.
1816 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1817 EmitStopPoint(CurCase);
1818
1819 // Normal default recursion for non-cases.
1820 EmitStmt(CurCase->getSubStmt());
1821}
1822
1824 ArrayRef<const Attr *> Attrs) {
1825 // If there is no enclosing switch instance that we're aware of, then this
1826 // default statement can be elided. This situation only happens when we've
1827 // constant-folded the switch.
1828 if (!SwitchInsn) {
1829 EmitStmt(S.getSubStmt());
1830 return;
1831 }
1832
1833 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1834 assert(DefaultBlock->empty() &&
1835 "EmitDefaultStmt: Default block already defined?");
1836
1837 if (SwitchLikelihood)
1838 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1839
1840 EmitBlockWithFallThrough(DefaultBlock, &S);
1841
1842 EmitStmt(S.getSubStmt());
1843}
1844
1845/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1846/// constant value that is being switched on, see if we can dead code eliminate
1847/// the body of the switch to a simple series of statements to emit. Basically,
1848/// on a switch (5) we want to find these statements:
1849/// case 5:
1850/// printf(...); <--
1851/// ++i; <--
1852/// break;
1853///
1854/// and add them to the ResultStmts vector. If it is unsafe to do this
1855/// transformation (for example, one of the elided statements contains a label
1856/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1857/// should include statements after it (e.g. the printf() line is a substmt of
1858/// the case) then return CSFC_FallThrough. If we handled it and found a break
1859/// statement, then return CSFC_Success.
1860///
1861/// If Case is non-null, then we are looking for the specified case, checking
1862/// that nothing we jump over contains labels. If Case is null, then we found
1863/// the case and are looking for the break.
1864///
1865/// If the recursive walk actually finds our Case, then we set FoundCase to
1866/// true.
1867///
1870 const SwitchCase *Case,
1871 bool &FoundCase,
1872 SmallVectorImpl<const Stmt*> &ResultStmts) {
1873 // If this is a null statement, just succeed.
1874 if (!S)
1875 return Case ? CSFC_Success : CSFC_FallThrough;
1876
1877 // If this is the switchcase (case 4: or default) that we're looking for, then
1878 // we're in business. Just add the substatement.
1879 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1880 if (S == Case) {
1881 FoundCase = true;
1882 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1883 ResultStmts);
1884 }
1885
1886 // Otherwise, this is some other case or default statement, just ignore it.
1887 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1888 ResultStmts);
1889 }
1890
1891 // If we are in the live part of the code and we found our break statement,
1892 // return a success!
1893 if (!Case && isa<BreakStmt>(S))
1894 return CSFC_Success;
1895
1896 // If this is a switch statement, then it might contain the SwitchCase, the
1897 // break, or neither.
1898 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1899 // Handle this as two cases: we might be looking for the SwitchCase (if so
1900 // the skipped statements must be skippable) or we might already have it.
1901 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1902 bool StartedInLiveCode = FoundCase;
1903 unsigned StartSize = ResultStmts.size();
1904
1905 // If we've not found the case yet, scan through looking for it.
1906 if (Case) {
1907 // Keep track of whether we see a skipped declaration. The code could be
1908 // using the declaration even if it is skipped, so we can't optimize out
1909 // the decl if the kept statements might refer to it.
1910 bool HadSkippedDecl = false;
1911
1912 // If we're looking for the case, just see if we can skip each of the
1913 // substatements.
1914 for (; Case && I != E; ++I) {
1915 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1916
1917 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1918 case CSFC_Failure: return CSFC_Failure;
1919 case CSFC_Success:
1920 // A successful result means that either 1) that the statement doesn't
1921 // have the case and is skippable, or 2) does contain the case value
1922 // and also contains the break to exit the switch. In the later case,
1923 // we just verify the rest of the statements are elidable.
1924 if (FoundCase) {
1925 // If we found the case and skipped declarations, we can't do the
1926 // optimization.
1927 if (HadSkippedDecl)
1928 return CSFC_Failure;
1929
1930 for (++I; I != E; ++I)
1931 if (CodeGenFunction::ContainsLabel(*I, true))
1932 return CSFC_Failure;
1933 return CSFC_Success;
1934 }
1935 break;
1936 case CSFC_FallThrough:
1937 // If we have a fallthrough condition, then we must have found the
1938 // case started to include statements. Consider the rest of the
1939 // statements in the compound statement as candidates for inclusion.
1940 assert(FoundCase && "Didn't find case but returned fallthrough?");
1941 // We recursively found Case, so we're not looking for it anymore.
1942 Case = nullptr;
1943
1944 // If we found the case and skipped declarations, we can't do the
1945 // optimization.
1946 if (HadSkippedDecl)
1947 return CSFC_Failure;
1948 break;
1949 }
1950 }
1951
1952 if (!FoundCase)
1953 return CSFC_Success;
1954
1955 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1956 }
1957
1958 // If we have statements in our range, then we know that the statements are
1959 // live and need to be added to the set of statements we're tracking.
1960 bool AnyDecls = false;
1961 for (; I != E; ++I) {
1963
1964 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1965 case CSFC_Failure: return CSFC_Failure;
1966 case CSFC_FallThrough:
1967 // A fallthrough result means that the statement was simple and just
1968 // included in ResultStmt, keep adding them afterwards.
1969 break;
1970 case CSFC_Success:
1971 // A successful result means that we found the break statement and
1972 // stopped statement inclusion. We just ensure that any leftover stmts
1973 // are skippable and return success ourselves.
1974 for (++I; I != E; ++I)
1975 if (CodeGenFunction::ContainsLabel(*I, true))
1976 return CSFC_Failure;
1977 return CSFC_Success;
1978 }
1979 }
1980
1981 // If we're about to fall out of a scope without hitting a 'break;', we
1982 // can't perform the optimization if there were any decls in that scope
1983 // (we'd lose their end-of-lifetime).
1984 if (AnyDecls) {
1985 // If the entire compound statement was live, there's one more thing we
1986 // can try before giving up: emit the whole thing as a single statement.
1987 // We can do that unless the statement contains a 'break;'.
1988 // FIXME: Such a break must be at the end of a construct within this one.
1989 // We could emit this by just ignoring the BreakStmts entirely.
1990 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1991 ResultStmts.resize(StartSize);
1992 ResultStmts.push_back(S);
1993 } else {
1994 return CSFC_Failure;
1995 }
1996 }
1997
1998 return CSFC_FallThrough;
1999 }
2000
2001 // Okay, this is some other statement that we don't handle explicitly, like a
2002 // for statement or increment etc. If we are skipping over this statement,
2003 // just verify it doesn't have labels, which would make it invalid to elide.
2004 if (Case) {
2005 if (CodeGenFunction::ContainsLabel(S, true))
2006 return CSFC_Failure;
2007 return CSFC_Success;
2008 }
2009
2010 // Otherwise, we want to include this statement. Everything is cool with that
2011 // so long as it doesn't contain a break out of the switch we're in.
2013
2014 // Otherwise, everything is great. Include the statement and tell the caller
2015 // that we fall through and include the next statement as well.
2016 ResultStmts.push_back(S);
2017 return CSFC_FallThrough;
2018}
2019
2020/// FindCaseStatementsForValue - Find the case statement being jumped to and
2021/// then invoke CollectStatementsForCase to find the list of statements to emit
2022/// for a switch on constant. See the comment above CollectStatementsForCase
2023/// for more details.
2025 const llvm::APSInt &ConstantCondValue,
2026 SmallVectorImpl<const Stmt*> &ResultStmts,
2027 ASTContext &C,
2028 const SwitchCase *&ResultCase) {
2029 // First step, find the switch case that is being branched to. We can do this
2030 // efficiently by scanning the SwitchCase list.
2031 const SwitchCase *Case = S.getSwitchCaseList();
2032 const DefaultStmt *DefaultCase = nullptr;
2033
2034 for (; Case; Case = Case->getNextSwitchCase()) {
2035 // It's either a default or case. Just remember the default statement in
2036 // case we're not jumping to any numbered cases.
2037 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2038 DefaultCase = DS;
2039 continue;
2040 }
2041
2042 // Check to see if this case is the one we're looking for.
2043 const CaseStmt *CS = cast<CaseStmt>(Case);
2044 // Don't handle case ranges yet.
2045 if (CS->getRHS()) return false;
2046
2047 // If we found our case, remember it as 'case'.
2048 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2049 break;
2050 }
2051
2052 // If we didn't find a matching case, we use a default if it exists, or we
2053 // elide the whole switch body!
2054 if (!Case) {
2055 // It is safe to elide the body of the switch if it doesn't contain labels
2056 // etc. If it is safe, return successfully with an empty ResultStmts list.
2057 if (!DefaultCase)
2059 Case = DefaultCase;
2060 }
2061
2062 // Ok, we know which case is being jumped to, try to collect all the
2063 // statements that follow it. This can fail for a variety of reasons. Also,
2064 // check to see that the recursive walk actually found our case statement.
2065 // Insane cases like this can fail to find it in the recursive walk since we
2066 // don't handle every stmt kind:
2067 // switch (4) {
2068 // while (1) {
2069 // case 4: ...
2070 bool FoundCase = false;
2071 ResultCase = Case;
2072 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2073 ResultStmts) != CSFC_Failure &&
2074 FoundCase;
2075}
2076
2077static std::optional<SmallVector<uint64_t, 16>>
2079 // Are there enough branches to weight them?
2080 if (Likelihoods.size() <= 1)
2081 return std::nullopt;
2082
2083 uint64_t NumUnlikely = 0;
2084 uint64_t NumNone = 0;
2085 uint64_t NumLikely = 0;
2086 for (const auto LH : Likelihoods) {
2087 switch (LH) {
2088 case Stmt::LH_Unlikely:
2089 ++NumUnlikely;
2090 break;
2091 case Stmt::LH_None:
2092 ++NumNone;
2093 break;
2094 case Stmt::LH_Likely:
2095 ++NumLikely;
2096 break;
2097 }
2098 }
2099
2100 // Is there a likelihood attribute used?
2101 if (NumUnlikely == 0 && NumLikely == 0)
2102 return std::nullopt;
2103
2104 // When multiple cases share the same code they can be combined during
2105 // optimization. In that case the weights of the branch will be the sum of
2106 // the individual weights. Make sure the combined sum of all neutral cases
2107 // doesn't exceed the value of a single likely attribute.
2108 // The additions both avoid divisions by 0 and make sure the weights of None
2109 // don't exceed the weight of Likely.
2110 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2111 const uint64_t None = Likely / (NumNone + 1);
2112 const uint64_t Unlikely = 0;
2113
2115 Result.reserve(Likelihoods.size());
2116 for (const auto LH : Likelihoods) {
2117 switch (LH) {
2118 case Stmt::LH_Unlikely:
2119 Result.push_back(Unlikely);
2120 break;
2121 case Stmt::LH_None:
2122 Result.push_back(None);
2123 break;
2124 case Stmt::LH_Likely:
2125 Result.push_back(Likely);
2126 break;
2127 }
2128 }
2129
2130 return Result;
2131}
2132
2134 // Handle nested switch statements.
2135 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2136 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2137 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2138 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2139
2140 // See if we can constant fold the condition of the switch and therefore only
2141 // emit the live case statement (if any) of the switch.
2142 llvm::APSInt ConstantCondValue;
2143 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2145 const SwitchCase *Case = nullptr;
2146 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2147 getContext(), Case)) {
2148 if (Case)
2150 RunCleanupsScope ExecutedScope(*this);
2151
2152 if (S.getInit())
2153 EmitStmt(S.getInit());
2154
2155 // Emit the condition variable if needed inside the entire cleanup scope
2156 // used by this special case for constant folded switches.
2157 if (S.getConditionVariable())
2158 EmitDecl(*S.getConditionVariable());
2159
2160 // At this point, we are no longer "within" a switch instance, so
2161 // we can temporarily enforce this to ensure that any embedded case
2162 // statements are not emitted.
2163 SwitchInsn = nullptr;
2164
2165 // Okay, we can dead code eliminate everything except this case. Emit the
2166 // specified series of statements and we're good.
2167 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2168 EmitStmt(CaseStmts[i]);
2170
2171 // Now we want to restore the saved switch instance so that nested
2172 // switches continue to function properly
2173 SwitchInsn = SavedSwitchInsn;
2174
2175 return;
2176 }
2177 }
2178
2179 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2180
2181 RunCleanupsScope ConditionScope(*this);
2182
2183 if (S.getInit())
2184 EmitStmt(S.getInit());
2185
2186 if (S.getConditionVariable())
2187 EmitDecl(*S.getConditionVariable());
2188 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2189
2190 // Create basic block to hold stuff that comes after switch
2191 // statement. We also need to create a default block now so that
2192 // explicit case ranges tests can have a place to jump to on
2193 // failure.
2194 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2195 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2196 if (PGO.haveRegionCounts()) {
2197 // Walk the SwitchCase list to find how many there are.
2198 uint64_t DefaultCount = 0;
2199 unsigned NumCases = 0;
2200 for (const SwitchCase *Case = S.getSwitchCaseList();
2201 Case;
2202 Case = Case->getNextSwitchCase()) {
2203 if (isa<DefaultStmt>(Case))
2204 DefaultCount = getProfileCount(Case);
2205 NumCases += 1;
2206 }
2207 SwitchWeights = new SmallVector<uint64_t, 16>();
2208 SwitchWeights->reserve(NumCases);
2209 // The default needs to be first. We store the edge count, so we already
2210 // know the right weight.
2211 SwitchWeights->push_back(DefaultCount);
2212 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2213 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2214 // Initialize the default case.
2215 SwitchLikelihood->push_back(Stmt::LH_None);
2216 }
2217
2218 CaseRangeBlock = DefaultBlock;
2219
2220 // Clear the insertion point to indicate we are in unreachable code.
2221 Builder.ClearInsertionPoint();
2222
2223 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2224 // then reuse last ContinueBlock.
2225 JumpDest OuterContinue;
2226 if (!BreakContinueStack.empty())
2227 OuterContinue = BreakContinueStack.back().ContinueBlock;
2228
2229 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2230
2231 // Emit switch body.
2232 EmitStmt(S.getBody());
2233
2234 BreakContinueStack.pop_back();
2235
2236 // Update the default block in case explicit case range tests have
2237 // been chained on top.
2238 SwitchInsn->setDefaultDest(CaseRangeBlock);
2239
2240 // If a default was never emitted:
2241 if (!DefaultBlock->getParent()) {
2242 // If we have cleanups, emit the default block so that there's a
2243 // place to jump through the cleanups from.
2244 if (ConditionScope.requiresCleanups()) {
2245 EmitBlock(DefaultBlock);
2246
2247 // Otherwise, just forward the default block to the switch end.
2248 } else {
2249 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2250 delete DefaultBlock;
2251 }
2252 }
2253
2254 ConditionScope.ForceCleanup();
2255
2256 // Emit continuation.
2257 EmitBlock(SwitchExit.getBlock(), true);
2259
2260 // If the switch has a condition wrapped by __builtin_unpredictable,
2261 // create metadata that specifies that the switch is unpredictable.
2262 // Don't bother if not optimizing because that metadata would not be used.
2263 auto *Call = dyn_cast<CallExpr>(S.getCond());
2264 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2265 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2266 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2267 llvm::MDBuilder MDHelper(getLLVMContext());
2268 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2269 MDHelper.createUnpredictable());
2270 }
2271 }
2272
2273 if (SwitchWeights) {
2274 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2275 "switch weights do not match switch cases");
2276 // If there's only one jump destination there's no sense weighting it.
2277 if (SwitchWeights->size() > 1)
2278 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2279 createProfileWeights(*SwitchWeights));
2280 delete SwitchWeights;
2281 } else if (SwitchLikelihood) {
2282 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2283 "switch likelihoods do not match switch cases");
2284 std::optional<SmallVector<uint64_t, 16>> LHW =
2285 getLikelihoodWeights(*SwitchLikelihood);
2286 if (LHW) {
2287 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2288 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2289 createProfileWeights(*LHW));
2290 }
2291 delete SwitchLikelihood;
2292 }
2293 SwitchInsn = SavedSwitchInsn;
2294 SwitchWeights = SavedSwitchWeights;
2295 SwitchLikelihood = SavedSwitchLikelihood;
2296 CaseRangeBlock = SavedCRBlock;
2297}
2298
2299static std::string
2300SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2302 std::string Result;
2303
2304 while (*Constraint) {
2305 switch (*Constraint) {
2306 default:
2307 Result += Target.convertConstraint(Constraint);
2308 break;
2309 // Ignore these
2310 case '*':
2311 case '?':
2312 case '!':
2313 case '=': // Will see this and the following in mult-alt constraints.
2314 case '+':
2315 break;
2316 case '#': // Ignore the rest of the constraint alternative.
2317 while (Constraint[1] && Constraint[1] != ',')
2318 Constraint++;
2319 break;
2320 case '&':
2321 case '%':
2322 Result += *Constraint;
2323 while (Constraint[1] && Constraint[1] == *Constraint)
2324 Constraint++;
2325 break;
2326 case ',':
2327 Result += "|";
2328 break;
2329 case 'g':
2330 Result += "imr";
2331 break;
2332 case '[': {
2333 assert(OutCons &&
2334 "Must pass output names to constraints with a symbolic name");
2335 unsigned Index;
2336 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2337 assert(result && "Could not resolve symbolic name"); (void)result;
2338 Result += llvm::utostr(Index);
2339 break;
2340 }
2341 }
2342
2343 Constraint++;
2344 }
2345
2346 return Result;
2347}
2348
2349/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2350/// as using a particular register add that as a constraint that will be used
2351/// in this asm stmt.
2352static std::string
2353AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2355 const AsmStmt &Stmt, const bool EarlyClobber,
2356 std::string *GCCReg = nullptr) {
2357 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2358 if (!AsmDeclRef)
2359 return Constraint;
2360 const ValueDecl &Value = *AsmDeclRef->getDecl();
2361 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2362 if (!Variable)
2363 return Constraint;
2365 return Constraint;
2366 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2367 if (!Attr)
2368 return Constraint;
2369 StringRef Register = Attr->getLabel();
2370 assert(Target.isValidGCCRegisterName(Register));
2371 // We're using validateOutputConstraint here because we only care if
2372 // this is a register constraint.
2373 TargetInfo::ConstraintInfo Info(Constraint, "");
2374 if (Target.validateOutputConstraint(Info) &&
2375 !Info.allowsRegister()) {
2376 CGM.ErrorUnsupported(&Stmt, "__asm__");
2377 return Constraint;
2378 }
2379 // Canonicalize the register here before returning it.
2380 Register = Target.getNormalizedGCCRegisterName(Register);
2381 if (GCCReg != nullptr)
2382 *GCCReg = Register.str();
2383 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2384}
2385
2386std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2387 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2388 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2389 if (Info.allowsRegister() || !Info.allowsMemory()) {
2391 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2392
2393 llvm::Type *Ty = ConvertType(InputType);
2394 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2395 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2396 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2397 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2398
2399 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2400 nullptr};
2401 }
2402 }
2403
2404 Address Addr = InputValue.getAddress();
2405 ConstraintStr += '*';
2406 return {InputValue.getPointer(*this), Addr.getElementType()};
2407}
2408
2409std::pair<llvm::Value *, llvm::Type *>
2410CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2411 const Expr *InputExpr,
2412 std::string &ConstraintStr) {
2413 // If this can't be a register or memory, i.e., has to be a constant
2414 // (immediate or symbolic), try to emit it as such.
2415 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2416 if (Info.requiresImmediateConstant()) {
2417 Expr::EvalResult EVResult;
2418 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2419
2420 llvm::APSInt IntResult;
2421 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2422 getContext()))
2423 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2424 }
2425
2427 if (InputExpr->EvaluateAsInt(Result, getContext()))
2428 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2429 nullptr};
2430 }
2431
2432 if (Info.allowsRegister() || !Info.allowsMemory())
2434 return {EmitScalarExpr(InputExpr), nullptr};
2435 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2436 return {EmitScalarExpr(InputExpr), nullptr};
2437 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2438 LValue Dest = EmitLValue(InputExpr);
2439 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2440 InputExpr->getExprLoc());
2441}
2442
2443/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2444/// asm call instruction. The !srcloc MDNode contains a list of constant
2445/// integers which are the source locations of the start of each line in the
2446/// asm.
2447static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2448 CodeGenFunction &CGF) {
2450 // Add the location of the first line to the MDNode.
2451 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2452 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2453 StringRef StrVal = Str->getString();
2454 if (!StrVal.empty()) {
2456 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2457 unsigned StartToken = 0;
2458 unsigned ByteOffset = 0;
2459
2460 // Add the location of the start of each subsequent line of the asm to the
2461 // MDNode.
2462 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2463 if (StrVal[i] != '\n') continue;
2464 SourceLocation LineLoc = Str->getLocationOfByte(
2465 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2466 Locs.push_back(llvm::ConstantAsMetadata::get(
2467 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2468 }
2469 }
2470
2471 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2472}
2473
2474static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2475 bool HasUnwindClobber, bool ReadOnly,
2476 bool ReadNone, bool NoMerge, bool NoConvergent,
2477 const AsmStmt &S,
2478 const std::vector<llvm::Type *> &ResultRegTypes,
2479 const std::vector<llvm::Type *> &ArgElemTypes,
2480 CodeGenFunction &CGF,
2481 std::vector<llvm::Value *> &RegResults) {
2482 if (!HasUnwindClobber)
2483 Result.addFnAttr(llvm::Attribute::NoUnwind);
2484
2485 if (NoMerge)
2486 Result.addFnAttr(llvm::Attribute::NoMerge);
2487 // Attach readnone and readonly attributes.
2488 if (!HasSideEffect) {
2489 if (ReadNone)
2490 Result.setDoesNotAccessMemory();
2491 else if (ReadOnly)
2492 Result.setOnlyReadsMemory();
2493 }
2494
2495 // Add elementtype attribute for indirect constraints.
2496 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2497 if (Pair.value()) {
2498 auto Attr = llvm::Attribute::get(
2499 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2500 Result.addParamAttr(Pair.index(), Attr);
2501 }
2502 }
2503
2504 // Slap the source location of the inline asm into a !srcloc metadata on the
2505 // call.
2506 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2507 Result.setMetadata("srcloc",
2508 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2509 else {
2510 // At least put the line number on MS inline asm blobs.
2511 llvm::Constant *Loc =
2512 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2513 Result.setMetadata("srcloc",
2514 llvm::MDNode::get(CGF.getLLVMContext(),
2515 llvm::ConstantAsMetadata::get(Loc)));
2516 }
2517
2518 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2519 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2520 // convergent (meaning, they may call an intrinsically convergent op, such
2521 // as bar.sync, and so can't have certain optimizations applied around
2522 // them) unless it's explicitly marked 'noconvergent'.
2523 Result.addFnAttr(llvm::Attribute::Convergent);
2524 // Extract all of the register value results from the asm.
2525 if (ResultRegTypes.size() == 1) {
2526 RegResults.push_back(&Result);
2527 } else {
2528 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2529 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2530 RegResults.push_back(Tmp);
2531 }
2532 }
2533}
2534
2535static void
2537 const llvm::ArrayRef<llvm::Value *> RegResults,
2538 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2539 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2540 const llvm::ArrayRef<LValue> ResultRegDests,
2541 const llvm::ArrayRef<QualType> ResultRegQualTys,
2542 const llvm::BitVector &ResultTypeRequiresCast,
2543 const llvm::BitVector &ResultRegIsFlagReg) {
2545 CodeGenModule &CGM = CGF.CGM;
2546 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2547
2548 assert(RegResults.size() == ResultRegTypes.size());
2549 assert(RegResults.size() == ResultTruncRegTypes.size());
2550 assert(RegResults.size() == ResultRegDests.size());
2551 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2552 // in which case its size may grow.
2553 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2554 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2555
2556 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2557 llvm::Value *Tmp = RegResults[i];
2558 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2559
2560 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2561 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2562 // value.
2563 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2564 llvm::Value *IsBooleanValue =
2565 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2566 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2567 Builder.CreateCall(FnAssume, IsBooleanValue);
2568 }
2569
2570 // If the result type of the LLVM IR asm doesn't match the result type of
2571 // the expression, do the conversion.
2572 if (ResultRegTypes[i] != TruncTy) {
2573
2574 // Truncate the integer result to the right size, note that TruncTy can be
2575 // a pointer.
2576 if (TruncTy->isFloatingPointTy())
2577 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2578 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2579 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2580 Tmp = Builder.CreateTrunc(
2581 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2582 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2583 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2584 uint64_t TmpSize =
2585 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2586 Tmp = Builder.CreatePtrToInt(
2587 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2588 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2589 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2590 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2591 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2592 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2593 }
2594 }
2595
2596 LValue Dest = ResultRegDests[i];
2597 // ResultTypeRequiresCast elements correspond to the first
2598 // ResultTypeRequiresCast.size() elements of RegResults.
2599 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2600 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2601 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2602 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2603 Builder.CreateStore(Tmp, A);
2604 continue;
2605 }
2606
2607 QualType Ty =
2608 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2609 if (Ty.isNull()) {
2610 const Expr *OutExpr = S.getOutputExpr(i);
2611 CGM.getDiags().Report(OutExpr->getExprLoc(),
2612 diag::err_store_value_to_reg);
2613 return;
2614 }
2615 Dest = CGF.MakeAddrLValue(A, Ty);
2616 }
2617 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2618 }
2619}
2620
2622 const AsmStmt &S) {
2623 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2624
2625 StringRef Asm;
2626 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2627 Asm = GCCAsm->getAsmString()->getString();
2628
2629 auto &Ctx = CGF->CGM.getLLVMContext();
2630
2631 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2632 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2633 {StrTy->getType()}, false);
2634 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2635
2636 CGF->Builder.CreateCall(UBF, {StrTy});
2637}
2638
2640 // Pop all cleanup blocks at the end of the asm statement.
2641 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2642
2643 // Assemble the final asm string.
2644 std::string AsmString = S.generateAsmString(getContext());
2645
2646 // Get all the output and input constraints together.
2647 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2648 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2649
2650 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2651 bool IsValidTargetAsm = true;
2652 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2653 StringRef Name;
2654 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2655 Name = GAS->getOutputName(i);
2656 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2657 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2658 if (IsHipStdPar && !IsValid)
2659 IsValidTargetAsm = false;
2660 else
2661 assert(IsValid && "Failed to parse output constraint");
2662 OutputConstraintInfos.push_back(Info);
2663 }
2664
2665 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2666 StringRef Name;
2667 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2668 Name = GAS->getInputName(i);
2669 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2670 bool IsValid =
2671 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2672 if (IsHipStdPar && !IsValid)
2673 IsValidTargetAsm = false;
2674 else
2675 assert(IsValid && "Failed to parse input constraint");
2676 InputConstraintInfos.push_back(Info);
2677 }
2678
2679 if (!IsValidTargetAsm)
2680 return EmitHipStdParUnsupportedAsm(this, S);
2681
2682 std::string Constraints;
2683
2684 std::vector<LValue> ResultRegDests;
2685 std::vector<QualType> ResultRegQualTys;
2686 std::vector<llvm::Type *> ResultRegTypes;
2687 std::vector<llvm::Type *> ResultTruncRegTypes;
2688 std::vector<llvm::Type *> ArgTypes;
2689 std::vector<llvm::Type *> ArgElemTypes;
2690 std::vector<llvm::Value*> Args;
2691 llvm::BitVector ResultTypeRequiresCast;
2692 llvm::BitVector ResultRegIsFlagReg;
2693
2694 // Keep track of inout constraints.
2695 std::string InOutConstraints;
2696 std::vector<llvm::Value*> InOutArgs;
2697 std::vector<llvm::Type*> InOutArgTypes;
2698 std::vector<llvm::Type*> InOutArgElemTypes;
2699
2700 // Keep track of out constraints for tied input operand.
2701 std::vector<std::string> OutputConstraints;
2702
2703 // Keep track of defined physregs.
2704 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2705
2706 // An inline asm can be marked readonly if it meets the following conditions:
2707 // - it doesn't have any sideeffects
2708 // - it doesn't clobber memory
2709 // - it doesn't return a value by-reference
2710 // It can be marked readnone if it doesn't have any input memory constraints
2711 // in addition to meeting the conditions listed above.
2712 bool ReadOnly = true, ReadNone = true;
2713
2714 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2715 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2716
2717 // Simplify the output constraint.
2718 std::string OutputConstraint(S.getOutputConstraint(i));
2719 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2720 getTarget(), &OutputConstraintInfos);
2721
2722 const Expr *OutExpr = S.getOutputExpr(i);
2723 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2724
2725 std::string GCCReg;
2726 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2727 getTarget(), CGM, S,
2728 Info.earlyClobber(),
2729 &GCCReg);
2730 // Give an error on multiple outputs to same physreg.
2731 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2732 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2733
2734 OutputConstraints.push_back(OutputConstraint);
2735 LValue Dest = EmitLValue(OutExpr);
2736 if (!Constraints.empty())
2737 Constraints += ',';
2738
2739 // If this is a register output, then make the inline asm return it
2740 // by-value. If this is a memory result, return the value by-reference.
2741 QualType QTy = OutExpr->getType();
2742 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2744 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2745
2746 Constraints += "=" + OutputConstraint;
2747 ResultRegQualTys.push_back(QTy);
2748 ResultRegDests.push_back(Dest);
2749
2750 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2751 ResultRegIsFlagReg.push_back(IsFlagReg);
2752
2753 llvm::Type *Ty = ConvertTypeForMem(QTy);
2754 const bool RequiresCast = Info.allowsRegister() &&
2756 Ty->isAggregateType());
2757
2758 ResultTruncRegTypes.push_back(Ty);
2759 ResultTypeRequiresCast.push_back(RequiresCast);
2760
2761 if (RequiresCast) {
2762 unsigned Size = getContext().getTypeSize(QTy);
2763 if (Size)
2764 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2765 else
2766 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2767 }
2768 ResultRegTypes.push_back(Ty);
2769 // If this output is tied to an input, and if the input is larger, then
2770 // we need to set the actual result type of the inline asm node to be the
2771 // same as the input type.
2772 if (Info.hasMatchingInput()) {
2773 unsigned InputNo;
2774 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2775 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2776 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2777 break;
2778 }
2779 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2780
2781 QualType InputTy = S.getInputExpr(InputNo)->getType();
2782 QualType OutputType = OutExpr->getType();
2783
2784 uint64_t InputSize = getContext().getTypeSize(InputTy);
2785 if (getContext().getTypeSize(OutputType) < InputSize) {
2786 // Form the asm to return the value as a larger integer or fp type.
2787 ResultRegTypes.back() = ConvertType(InputTy);
2788 }
2789 }
2790 if (llvm::Type* AdjTy =
2791 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2792 ResultRegTypes.back()))
2793 ResultRegTypes.back() = AdjTy;
2794 else {
2795 CGM.getDiags().Report(S.getAsmLoc(),
2796 diag::err_asm_invalid_type_in_input)
2797 << OutExpr->getType() << OutputConstraint;
2798 }
2799
2800 // Update largest vector width for any vector types.
2801 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2802 LargestVectorWidth =
2803 std::max((uint64_t)LargestVectorWidth,
2804 VT->getPrimitiveSizeInBits().getKnownMinValue());
2805 } else {
2806 Address DestAddr = Dest.getAddress();
2807 // Matrix types in memory are represented by arrays, but accessed through
2808 // vector pointers, with the alignment specified on the access operation.
2809 // For inline assembly, update pointer arguments to use vector pointers.
2810 // Otherwise there will be a mis-match if the matrix is also an
2811 // input-argument which is represented as vector.
2812 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2813 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2814
2815 ArgTypes.push_back(DestAddr.getType());
2816 ArgElemTypes.push_back(DestAddr.getElementType());
2817 Args.push_back(DestAddr.emitRawPointer(*this));
2818 Constraints += "=*";
2819 Constraints += OutputConstraint;
2820 ReadOnly = ReadNone = false;
2821 }
2822
2823 if (Info.isReadWrite()) {
2824 InOutConstraints += ',';
2825
2826 const Expr *InputExpr = S.getOutputExpr(i);
2827 llvm::Value *Arg;
2828 llvm::Type *ArgElemType;
2829 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2830 Info, Dest, InputExpr->getType(), InOutConstraints,
2831 InputExpr->getExprLoc());
2832
2833 if (llvm::Type* AdjTy =
2834 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2835 Arg->getType()))
2836 Arg = Builder.CreateBitCast(Arg, AdjTy);
2837
2838 // Update largest vector width for any vector types.
2839 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2840 LargestVectorWidth =
2841 std::max((uint64_t)LargestVectorWidth,
2842 VT->getPrimitiveSizeInBits().getKnownMinValue());
2843 // Only tie earlyclobber physregs.
2844 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2845 InOutConstraints += llvm::utostr(i);
2846 else
2847 InOutConstraints += OutputConstraint;
2848
2849 InOutArgTypes.push_back(Arg->getType());
2850 InOutArgElemTypes.push_back(ArgElemType);
2851 InOutArgs.push_back(Arg);
2852 }
2853 }
2854
2855 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2856 // to the return value slot. Only do this when returning in registers.
2857 if (isa<MSAsmStmt>(&S)) {
2858 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2859 if (RetAI.isDirect() || RetAI.isExtend()) {
2860 // Make a fake lvalue for the return value slot.
2863 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2864 ResultRegDests, AsmString, S.getNumOutputs());
2865 SawAsmBlock = true;
2866 }
2867 }
2868
2869 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2870 const Expr *InputExpr = S.getInputExpr(i);
2871
2872 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2873
2874 if (Info.allowsMemory())
2875 ReadNone = false;
2876
2877 if (!Constraints.empty())
2878 Constraints += ',';
2879
2880 // Simplify the input constraint.
2881 std::string InputConstraint(S.getInputConstraint(i));
2882 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2883 &OutputConstraintInfos);
2884
2885 InputConstraint = AddVariableConstraints(
2886 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2887 getTarget(), CGM, S, false /* No EarlyClobber */);
2888
2889 std::string ReplaceConstraint (InputConstraint);
2890 llvm::Value *Arg;
2891 llvm::Type *ArgElemType;
2892 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2893
2894 // If this input argument is tied to a larger output result, extend the
2895 // input to be the same size as the output. The LLVM backend wants to see
2896 // the input and output of a matching constraint be the same size. Note
2897 // that GCC does not define what the top bits are here. We use zext because
2898 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2899 if (Info.hasTiedOperand()) {
2900 unsigned Output = Info.getTiedOperand();
2901 QualType OutputType = S.getOutputExpr(Output)->getType();
2902 QualType InputTy = InputExpr->getType();
2903
2904 if (getContext().getTypeSize(OutputType) >
2905 getContext().getTypeSize(InputTy)) {
2906 // Use ptrtoint as appropriate so that we can do our extension.
2907 if (isa<llvm::PointerType>(Arg->getType()))
2908 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2909 llvm::Type *OutputTy = ConvertType(OutputType);
2910 if (isa<llvm::IntegerType>(OutputTy))
2911 Arg = Builder.CreateZExt(Arg, OutputTy);
2912 else if (isa<llvm::PointerType>(OutputTy))
2913 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2914 else if (OutputTy->isFloatingPointTy())
2915 Arg = Builder.CreateFPExt(Arg, OutputTy);
2916 }
2917 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2918 ReplaceConstraint = OutputConstraints[Output];
2919 }
2920 if (llvm::Type* AdjTy =
2921 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2922 Arg->getType()))
2923 Arg = Builder.CreateBitCast(Arg, AdjTy);
2924 else
2925 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2926 << InputExpr->getType() << InputConstraint;
2927
2928 // Update largest vector width for any vector types.
2929 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2930 LargestVectorWidth =
2931 std::max((uint64_t)LargestVectorWidth,
2932 VT->getPrimitiveSizeInBits().getKnownMinValue());
2933
2934 ArgTypes.push_back(Arg->getType());
2935 ArgElemTypes.push_back(ArgElemType);
2936 Args.push_back(Arg);
2937 Constraints += InputConstraint;
2938 }
2939
2940 // Append the "input" part of inout constraints.
2941 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2942 ArgTypes.push_back(InOutArgTypes[i]);
2943 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2944 Args.push_back(InOutArgs[i]);
2945 }
2946 Constraints += InOutConstraints;
2947
2948 // Labels
2950 llvm::BasicBlock *Fallthrough = nullptr;
2951 bool IsGCCAsmGoto = false;
2952 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2953 IsGCCAsmGoto = GS->isAsmGoto();
2954 if (IsGCCAsmGoto) {
2955 for (const auto *E : GS->labels()) {
2956 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2957 Transfer.push_back(Dest.getBlock());
2958 if (!Constraints.empty())
2959 Constraints += ',';
2960 Constraints += "!i";
2961 }
2962 Fallthrough = createBasicBlock("asm.fallthrough");
2963 }
2964 }
2965
2966 bool HasUnwindClobber = false;
2967
2968 // Clobbers
2969 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2970 StringRef Clobber = S.getClobber(i);
2971
2972 if (Clobber == "memory")
2973 ReadOnly = ReadNone = false;
2974 else if (Clobber == "unwind") {
2975 HasUnwindClobber = true;
2976 continue;
2977 } else if (Clobber != "cc") {
2978 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2979 if (CGM.getCodeGenOpts().StackClashProtector &&
2980 getTarget().isSPRegName(Clobber)) {
2981 CGM.getDiags().Report(S.getAsmLoc(),
2982 diag::warn_stack_clash_protection_inline_asm);
2983 }
2984 }
2985
2986 if (isa<MSAsmStmt>(&S)) {
2987 if (Clobber == "eax" || Clobber == "edx") {
2988 if (Constraints.find("=&A") != std::string::npos)
2989 continue;
2990 std::string::size_type position1 =
2991 Constraints.find("={" + Clobber.str() + "}");
2992 if (position1 != std::string::npos) {
2993 Constraints.insert(position1 + 1, "&");
2994 continue;
2995 }
2996 std::string::size_type position2 = Constraints.find("=A");
2997 if (position2 != std::string::npos) {
2998 Constraints.insert(position2 + 1, "&");
2999 continue;
3000 }
3001 }
3002 }
3003 if (!Constraints.empty())
3004 Constraints += ',';
3005
3006 Constraints += "~{";
3007 Constraints += Clobber;
3008 Constraints += '}';
3009 }
3010
3011 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3012 "unwind clobber can't be used with asm goto");
3013
3014 // Add machine specific clobbers
3015 std::string_view MachineClobbers = getTarget().getClobbers();
3016 if (!MachineClobbers.empty()) {
3017 if (!Constraints.empty())
3018 Constraints += ',';
3019 Constraints += MachineClobbers;
3020 }
3021
3022 llvm::Type *ResultType;
3023 if (ResultRegTypes.empty())
3024 ResultType = VoidTy;
3025 else if (ResultRegTypes.size() == 1)
3026 ResultType = ResultRegTypes[0];
3027 else
3028 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3029
3030 llvm::FunctionType *FTy =
3031 llvm::FunctionType::get(ResultType, ArgTypes, false);
3032
3033 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3034
3035 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3036 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3037 ? llvm::InlineAsm::AD_ATT
3038 : llvm::InlineAsm::AD_Intel;
3039 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3040 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3041
3042 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3043 FTy, AsmString, Constraints, HasSideEffect,
3044 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3045 std::vector<llvm::Value*> RegResults;
3046 llvm::CallBrInst *CBR;
3047 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3048 CBRRegResults;
3049 if (IsGCCAsmGoto) {
3050 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3051 EmitBlock(Fallthrough);
3052 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3053 ReadNone, InNoMergeAttributedStmt,
3054 InNoConvergentAttributedStmt, S, ResultRegTypes,
3055 ArgElemTypes, *this, RegResults);
3056 // Because we are emitting code top to bottom, we don't have enough
3057 // information at this point to know precisely whether we have a critical
3058 // edge. If we have outputs, split all indirect destinations.
3059 if (!RegResults.empty()) {
3060 unsigned i = 0;
3061 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3062 llvm::Twine SynthName = Dest->getName() + ".split";
3063 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3064 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3065 Builder.SetInsertPoint(SynthBB);
3066
3067 if (ResultRegTypes.size() == 1) {
3068 CBRRegResults[SynthBB].push_back(CBR);
3069 } else {
3070 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3071 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3072 CBRRegResults[SynthBB].push_back(Tmp);
3073 }
3074 }
3075
3076 EmitBranch(Dest);
3077 EmitBlock(SynthBB);
3078 CBR->setIndirectDest(i++, SynthBB);
3079 }
3080 }
3081 } else if (HasUnwindClobber) {
3082 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3083 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3084 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3085 InNoConvergentAttributedStmt, S, ResultRegTypes,
3086 ArgElemTypes, *this, RegResults);
3087 } else {
3088 llvm::CallInst *Result =
3089 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3090 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3091 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3092 InNoConvergentAttributedStmt, S, ResultRegTypes,
3093 ArgElemTypes, *this, RegResults);
3094 }
3095
3096 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3097 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3098 ResultRegIsFlagReg);
3099
3100 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3101 // different insertion point; one for each indirect destination and with
3102 // CBRRegResults rather than RegResults.
3103 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3104 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3105 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3106 Builder.SetInsertPoint(Succ, --(Succ->end()));
3107 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3108 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3109 ResultTypeRequiresCast, ResultRegIsFlagReg);
3110 }
3111 }
3112}
3113
3115 const RecordDecl *RD = S.getCapturedRecordDecl();
3116 QualType RecordTy = getContext().getRecordType(RD);
3117
3118 // Initialize the captured struct.
3119 LValue SlotLV =
3120 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3121
3122 RecordDecl::field_iterator CurField = RD->field_begin();
3123 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3124 E = S.capture_init_end();
3125 I != E; ++I, ++CurField) {
3126 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3127 if (CurField->hasCapturedVLAType()) {
3128 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3129 } else {
3130 EmitInitializerForField(*CurField, LV, *I);
3131 }
3132 }
3133
3134 return SlotLV;
3135}
3136
3137/// Generate an outlined function for the body of a CapturedStmt, store any
3138/// captured variables into the captured struct, and call the outlined function.
3139llvm::Function *
3141 LValue CapStruct = InitCapturedStruct(S);
3142
3143 // Emit the CapturedDecl
3144 CodeGenFunction CGF(CGM, true);
3145 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3146 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3147 delete CGF.CapturedStmtInfo;
3148
3149 // Emit call to the helper function.
3150 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3151
3152 return F;
3153}
3154
3156 LValue CapStruct = InitCapturedStruct(S);
3157 return CapStruct.getAddress();
3158}
3159
3160/// Creates the outlined function for a CapturedStmt.
3161llvm::Function *
3163 assert(CapturedStmtInfo &&
3164 "CapturedStmtInfo should be set when generating the captured function");
3165 const CapturedDecl *CD = S.getCapturedDecl();
3166 const RecordDecl *RD = S.getCapturedRecordDecl();
3167 SourceLocation Loc = S.getBeginLoc();
3168 assert(CD->hasBody() && "missing CapturedDecl body");
3169
3170 // Build the argument list.
3171 ASTContext &Ctx = CGM.getContext();
3172 FunctionArgList Args;
3173 Args.append(CD->param_begin(), CD->param_end());
3174
3175 // Create the function declaration.
3176 const CGFunctionInfo &FuncInfo =
3178 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3179
3180 llvm::Function *F =
3181 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3183 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3184 if (CD->isNothrow())
3185 F->addFnAttr(llvm::Attribute::NoUnwind);
3186
3187 // Generate the function.
3188 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3189 CD->getBody()->getBeginLoc());
3190 // Set the context parameter in CapturedStmtInfo.
3191 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3193
3194 // Initialize variable-length arrays.
3197 for (auto *FD : RD->fields()) {
3198 if (FD->hasCapturedVLAType()) {
3199 auto *ExprArg =
3200 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3201 .getScalarVal();
3202 auto VAT = FD->getCapturedVLAType();
3203 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3204 }
3205 }
3206
3207 // If 'this' is captured, load it into CXXThisValue.
3210 LValue ThisLValue = EmitLValueForField(Base, FD);
3211 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3212 }
3213
3214 PGO.assignRegionCounters(GlobalDecl(CD), F);
3215 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3217
3218 return F;
3219}
3220
3221namespace {
3222// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3223// std::nullptr otherwise.
3224llvm::IntrinsicInst *getConvergenceToken(llvm::BasicBlock *BB) {
3225 for (auto &I : *BB) {
3226 auto *II = dyn_cast<llvm::IntrinsicInst>(&I);
3227 if (II && llvm::isConvergenceControlIntrinsic(II->getIntrinsicID()))
3228 return II;
3229 }
3230 return nullptr;
3231}
3232
3233} // namespace
3234
3235llvm::CallBase *
3236CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input,
3237 llvm::Value *ParentToken) {
3238 llvm::Value *bundleArgs[] = {ParentToken};
3239 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3240 auto Output = llvm::CallBase::addOperandBundle(
3241 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3242 Input->replaceAllUsesWith(Output);
3243 Input->eraseFromParent();
3244 return Output;
3245}
3246
3247llvm::IntrinsicInst *
3248CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB,
3249 llvm::Value *ParentToken) {
3250 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3251 if (BB->empty())
3252 Builder.SetInsertPoint(BB);
3253 else
3254 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3255
3256 llvm::CallBase *CB = Builder.CreateIntrinsic(
3257 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3258 Builder.restoreIP(IP);
3259
3260 llvm::CallBase *I = addConvergenceControlToken(CB, ParentToken);
3261 return cast<llvm::IntrinsicInst>(I);
3262}
3263
3264llvm::IntrinsicInst *
3265CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3266 llvm::BasicBlock *BB = &F->getEntryBlock();
3267 llvm::IntrinsicInst *Token = getConvergenceToken(BB);
3268 if (Token)
3269 return Token;
3270
3271 // Adding a convergence token requires the function to be marked as
3272 // convergent.
3273 F->setConvergent();
3274
3275 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3276 Builder.SetInsertPoint(&BB->front());
3277 llvm::CallBase *I = Builder.CreateIntrinsic(
3278 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3279 assert(isa<llvm::IntrinsicInst>(I));
3280 Builder.restoreIP(IP);
3281
3282 return cast<llvm::IntrinsicInst>(I);
3283}
#define V(N, I)
Definition: ASTContext.h:3341
#define SM(sm)
Definition: Cuda.cpp:83
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2353
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2024
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2621
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2078
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2447
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2300
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1449
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1869
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2536
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:979
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1868
@ CSFC_Failure
Definition: CGStmt.cpp:1868
@ CSFC_Success
Definition: CGStmt.cpp:1868
@ CSFC_FallThrough
Definition: CGStmt.cpp:1868
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2474
const Decl * D
Expr * E
llvm::MachO::Target Target
Definition: MachO.h:51
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:954
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:187
SourceManager & getSourceManager()
Definition: ASTContext.h:721
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2394
CanQualType VoidTy
Definition: ASTContext.h:1119
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3110
Attr - This represents one attribute.
Definition: Attr.h:42
Represents an attribute applied to a statement.
Definition: Stmt.h:2090
BreakStmt - This represents a break.
Definition: Stmt.h:2990
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2830
Expr * getCallee()
Definition: Expr.h:2980
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4670
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4732
bool isNothrow() const
Definition: Decl.cpp:5442
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4749
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4747
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5439
This captures a statement into a function.
Definition: Stmt.h:3767
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3931
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1422
CaseStmt - Represent a case statement.
Definition: Stmt.h:1811
Stmt * getSubStmt()
Definition: Stmt.h:1928
Expr * getLHS()
Definition: Stmt.h:1898
Expr * getRHS()
Definition: Stmt.h:1910
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:912
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:163
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:135
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:107
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:693
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
SmallVector< llvm::IntrinsicInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1606
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:368
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:834
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:186
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:180
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1611
Stmt *const * const_body_iterator
Definition: Stmt.h:1683
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
ContinueStmt - This represents a continue.
Definition: Stmt.h:2960
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2370
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
ValueDecl * getDecl()
Definition: Expr.h:1333
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1502
T * getAttr() const
Definition: DeclBase.h:580
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1067
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1083
SourceLocation getLocation() const
Definition: DeclBase.h:446
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2735
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3097
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3066
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3567
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3030
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2791
const Expr * getSubExpr() const
Definition: Expr.h:1057
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4313
CallingConv getCallConv() const
Definition: Type.h:4646
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3269
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2872
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2148
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2911
Represents the declaration of a label.
Definition: Decl.h:499
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2041
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:476
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:674
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:941
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1008
QualType getCanonicalType() const
Definition: Type.h:7807
The collection of all-type qualifiers we support.
Definition: Type.h:319
Represents a struct/union/class.
Definition: Decl.h:4145
field_range fields() const
Definition: Decl.h:4351
field_iterator field_begin() const
Definition: Decl.cpp:5068
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3029
Expr * getRetValue()
Definition: Stmt.h:3060
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1363
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1306
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1307
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1308
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1310
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:163
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:155
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1959
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1329
StringRef getString() const
Definition: Expr.h:1855
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1784
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2398
Exposes information about the current target.
Definition: TargetInfo.h:218
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:822
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:684
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:725
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:8324
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8612
bool isReferenceType() const
Definition: Type.h:8026
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:705
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:667
Represents a variable declaration or definition.
Definition: Decl.h:879
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1116
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2594
Defines the clang::TargetInfo interface.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
Definition: Interp.h:276
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:607
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:294
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1125
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1132