clang 20.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPReverseDirectiveClass:
226 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
227 break;
228 case Stmt::OMPInterchangeDirectiveClass:
229 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
230 break;
231 case Stmt::OMPForDirectiveClass:
232 EmitOMPForDirective(cast<OMPForDirective>(*S));
233 break;
234 case Stmt::OMPForSimdDirectiveClass:
235 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
236 break;
237 case Stmt::OMPSectionsDirectiveClass:
238 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
239 break;
240 case Stmt::OMPSectionDirectiveClass:
241 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
242 break;
243 case Stmt::OMPSingleDirectiveClass:
244 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
245 break;
246 case Stmt::OMPMasterDirectiveClass:
247 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
248 break;
249 case Stmt::OMPCriticalDirectiveClass:
250 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
251 break;
252 case Stmt::OMPParallelForDirectiveClass:
253 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
254 break;
255 case Stmt::OMPParallelForSimdDirectiveClass:
256 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
257 break;
258 case Stmt::OMPParallelMasterDirectiveClass:
259 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
260 break;
261 case Stmt::OMPParallelSectionsDirectiveClass:
262 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
263 break;
264 case Stmt::OMPTaskDirectiveClass:
265 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
266 break;
267 case Stmt::OMPTaskyieldDirectiveClass:
268 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
269 break;
270 case Stmt::OMPErrorDirectiveClass:
271 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
272 break;
273 case Stmt::OMPBarrierDirectiveClass:
274 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
275 break;
276 case Stmt::OMPTaskwaitDirectiveClass:
277 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
278 break;
279 case Stmt::OMPTaskgroupDirectiveClass:
280 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
281 break;
282 case Stmt::OMPFlushDirectiveClass:
283 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
284 break;
285 case Stmt::OMPDepobjDirectiveClass:
286 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
287 break;
288 case Stmt::OMPScanDirectiveClass:
289 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
290 break;
291 case Stmt::OMPOrderedDirectiveClass:
292 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
293 break;
294 case Stmt::OMPAtomicDirectiveClass:
295 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
296 break;
297 case Stmt::OMPTargetDirectiveClass:
298 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
299 break;
300 case Stmt::OMPTeamsDirectiveClass:
301 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
302 break;
303 case Stmt::OMPCancellationPointDirectiveClass:
304 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
305 break;
306 case Stmt::OMPCancelDirectiveClass:
307 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
308 break;
309 case Stmt::OMPTargetDataDirectiveClass:
310 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetEnterDataDirectiveClass:
313 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
314 break;
315 case Stmt::OMPTargetExitDataDirectiveClass:
316 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
317 break;
318 case Stmt::OMPTargetParallelDirectiveClass:
319 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
320 break;
321 case Stmt::OMPTargetParallelForDirectiveClass:
322 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
323 break;
324 case Stmt::OMPTaskLoopDirectiveClass:
325 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPTaskLoopSimdDirectiveClass:
328 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
329 break;
330 case Stmt::OMPMasterTaskLoopDirectiveClass:
331 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
332 break;
333 case Stmt::OMPMaskedTaskLoopDirectiveClass:
334 llvm_unreachable("masked taskloop directive not supported yet.");
335 break;
336 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
338 cast<OMPMasterTaskLoopSimdDirective>(*S));
339 break;
340 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
341 llvm_unreachable("masked taskloop simd directive not supported yet.");
342 break;
343 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
345 cast<OMPParallelMasterTaskLoopDirective>(*S));
346 break;
347 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
348 llvm_unreachable("parallel masked taskloop directive not supported yet.");
349 break;
350 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
352 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
355 llvm_unreachable(
356 "parallel masked taskloop simd directive not supported yet.");
357 break;
358 case Stmt::OMPDistributeDirectiveClass:
359 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
360 break;
361 case Stmt::OMPTargetUpdateDirectiveClass:
362 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
363 break;
364 case Stmt::OMPDistributeParallelForDirectiveClass:
366 cast<OMPDistributeParallelForDirective>(*S));
367 break;
368 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
370 cast<OMPDistributeParallelForSimdDirective>(*S));
371 break;
372 case Stmt::OMPDistributeSimdDirectiveClass:
373 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
374 break;
375 case Stmt::OMPTargetParallelForSimdDirectiveClass:
377 cast<OMPTargetParallelForSimdDirective>(*S));
378 break;
379 case Stmt::OMPTargetSimdDirectiveClass:
380 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
381 break;
382 case Stmt::OMPTeamsDistributeDirectiveClass:
383 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
384 break;
385 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
387 cast<OMPTeamsDistributeSimdDirective>(*S));
388 break;
389 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
391 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
392 break;
393 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
395 cast<OMPTeamsDistributeParallelForDirective>(*S));
396 break;
397 case Stmt::OMPTargetTeamsDirectiveClass:
398 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
399 break;
400 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
402 cast<OMPTargetTeamsDistributeDirective>(*S));
403 break;
404 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
406 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
407 break;
408 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
410 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
411 break;
412 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
414 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
415 break;
416 case Stmt::OMPInteropDirectiveClass:
417 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
418 break;
419 case Stmt::OMPDispatchDirectiveClass:
420 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
421 break;
422 case Stmt::OMPScopeDirectiveClass:
423 CGM.ErrorUnsupported(S, "scope with FE outlining");
424 break;
425 case Stmt::OMPMaskedDirectiveClass:
426 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
427 break;
428 case Stmt::OMPGenericLoopDirectiveClass:
429 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPTeamsGenericLoopDirectiveClass:
432 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
433 break;
434 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
436 cast<OMPTargetTeamsGenericLoopDirective>(*S));
437 break;
438 case Stmt::OMPParallelGenericLoopDirectiveClass:
440 cast<OMPParallelGenericLoopDirective>(*S));
441 break;
442 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
444 cast<OMPTargetParallelGenericLoopDirective>(*S));
445 break;
446 case Stmt::OMPParallelMaskedDirectiveClass:
447 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
448 break;
449 case Stmt::OpenACCComputeConstructClass:
450 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
451 break;
452 case Stmt::OpenACCLoopConstructClass:
453 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
454 break;
455 }
456}
457
460 switch (S->getStmtClass()) {
461 default:
462 return false;
463 case Stmt::NullStmtClass:
464 break;
465 case Stmt::CompoundStmtClass:
466 EmitCompoundStmt(cast<CompoundStmt>(*S));
467 break;
468 case Stmt::DeclStmtClass:
469 EmitDeclStmt(cast<DeclStmt>(*S));
470 break;
471 case Stmt::LabelStmtClass:
472 EmitLabelStmt(cast<LabelStmt>(*S));
473 break;
474 case Stmt::AttributedStmtClass:
475 EmitAttributedStmt(cast<AttributedStmt>(*S));
476 break;
477 case Stmt::GotoStmtClass:
478 EmitGotoStmt(cast<GotoStmt>(*S));
479 break;
480 case Stmt::BreakStmtClass:
481 EmitBreakStmt(cast<BreakStmt>(*S));
482 break;
483 case Stmt::ContinueStmtClass:
484 EmitContinueStmt(cast<ContinueStmt>(*S));
485 break;
486 case Stmt::DefaultStmtClass:
487 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
488 break;
489 case Stmt::CaseStmtClass:
490 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
491 break;
492 case Stmt::SEHLeaveStmtClass:
493 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
494 break;
495 }
496 return true;
497}
498
499/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
500/// this captures the expression result of the last sub-statement and returns it
501/// (for use by the statement expression extension).
503 AggValueSlot AggSlot) {
504 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
505 "LLVM IR generation of compound statement ('{}')");
506
507 // Keep track of the current cleanup stack depth, including debug scopes.
508 LexicalScope Scope(*this, S.getSourceRange());
509
510 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
511}
512
515 bool GetLast,
516 AggValueSlot AggSlot) {
517
518 const Stmt *ExprResult = S.getStmtExprResult();
519 assert((!GetLast || (GetLast && ExprResult)) &&
520 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
521
522 Address RetAlloca = Address::invalid();
523
524 for (auto *CurStmt : S.body()) {
525 if (GetLast && ExprResult == CurStmt) {
526 // We have to special case labels here. They are statements, but when put
527 // at the end of a statement expression, they yield the value of their
528 // subexpression. Handle this by walking through all labels we encounter,
529 // emitting them before we evaluate the subexpr.
530 // Similar issues arise for attributed statements.
531 while (!isa<Expr>(ExprResult)) {
532 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
533 EmitLabel(LS->getDecl());
534 ExprResult = LS->getSubStmt();
535 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
536 // FIXME: Update this if we ever have attributes that affect the
537 // semantics of an expression.
538 ExprResult = AS->getSubStmt();
539 } else {
540 llvm_unreachable("unknown value statement");
541 }
542 }
543
545
546 const Expr *E = cast<Expr>(ExprResult);
547 QualType ExprTy = E->getType();
548 if (hasAggregateEvaluationKind(ExprTy)) {
549 EmitAggExpr(E, AggSlot);
550 } else {
551 // We can't return an RValue here because there might be cleanups at
552 // the end of the StmtExpr. Because of that, we have to emit the result
553 // here into a temporary alloca.
554 RetAlloca = CreateMemTemp(ExprTy);
555 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
556 /*IsInit*/ false);
557 }
558 } else {
559 EmitStmt(CurStmt);
560 }
561 }
562
563 return RetAlloca;
564}
565
566void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
567 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
568
569 // If there is a cleanup stack, then we it isn't worth trying to
570 // simplify this block (we would need to remove it from the scope map
571 // and cleanup entry).
572 if (!EHStack.empty())
573 return;
574
575 // Can only simplify direct branches.
576 if (!BI || !BI->isUnconditional())
577 return;
578
579 // Can only simplify empty blocks.
580 if (BI->getIterator() != BB->begin())
581 return;
582
583 BB->replaceAllUsesWith(BI->getSuccessor(0));
584 BI->eraseFromParent();
585 BB->eraseFromParent();
586}
587
588void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
589 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
590
591 // Fall out of the current block (if necessary).
592 EmitBranch(BB);
593
594 if (IsFinished && BB->use_empty()) {
595 delete BB;
596 return;
597 }
598
599 // Place the block after the current block, if possible, or else at
600 // the end of the function.
601 if (CurBB && CurBB->getParent())
602 CurFn->insert(std::next(CurBB->getIterator()), BB);
603 else
604 CurFn->insert(CurFn->end(), BB);
605 Builder.SetInsertPoint(BB);
606}
607
608void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
609 // Emit a branch from the current block to the target one if this
610 // was a real block. If this was just a fall-through block after a
611 // terminator, don't emit it.
612 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
613
614 if (!CurBB || CurBB->getTerminator()) {
615 // If there is no insert point or the previous block is already
616 // terminated, don't touch it.
617 } else {
618 // Otherwise, create a fall-through branch.
619 Builder.CreateBr(Target);
620 }
621
622 Builder.ClearInsertionPoint();
623}
624
625void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
626 bool inserted = false;
627 for (llvm::User *u : block->users()) {
628 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
629 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
630 inserted = true;
631 break;
632 }
633 }
634
635 if (!inserted)
636 CurFn->insert(CurFn->end(), block);
637
638 Builder.SetInsertPoint(block);
639}
640
641CodeGenFunction::JumpDest
643 JumpDest &Dest = LabelMap[D];
644 if (Dest.isValid()) return Dest;
645
646 // Create, but don't insert, the new block.
647 Dest = JumpDest(createBasicBlock(D->getName()),
650 return Dest;
651}
652
654 // Add this label to the current lexical scope if we're within any
655 // normal cleanups. Jumps "in" to this label --- when permitted by
656 // the language --- may need to be routed around such cleanups.
657 if (EHStack.hasNormalCleanups() && CurLexicalScope)
658 CurLexicalScope->addLabel(D);
659
660 JumpDest &Dest = LabelMap[D];
661
662 // If we didn't need a forward reference to this label, just go
663 // ahead and create a destination at the current scope.
664 if (!Dest.isValid()) {
665 Dest = getJumpDestInCurrentScope(D->getName());
666
667 // Otherwise, we need to give this label a target depth and remove
668 // it from the branch-fixups list.
669 } else {
670 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
671 Dest.setScopeDepth(EHStack.stable_begin());
672 ResolveBranchFixups(Dest.getBlock());
673 }
674
675 EmitBlock(Dest.getBlock());
676
677 // Emit debug info for labels.
678 if (CGDebugInfo *DI = getDebugInfo()) {
680 DI->setLocation(D->getLocation());
681 DI->EmitLabel(D, Builder);
682 }
683 }
684
685 incrementProfileCounter(D->getStmt());
686}
687
688/// Change the cleanup scope of the labels in this lexical scope to
689/// match the scope of the enclosing context.
691 assert(!Labels.empty());
692 EHScopeStack::stable_iterator innermostScope
694
695 // Change the scope depth of all the labels.
697 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
698 assert(CGF.LabelMap.count(*i));
699 JumpDest &dest = CGF.LabelMap.find(*i)->second;
700 assert(dest.getScopeDepth().isValid());
701 assert(innermostScope.encloses(dest.getScopeDepth()));
702 dest.setScopeDepth(innermostScope);
703 }
704
705 // Reparent the labels if the new scope also has cleanups.
706 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
707 ParentScope->Labels.append(Labels.begin(), Labels.end());
708 }
709}
710
711
713 EmitLabel(S.getDecl());
714
715 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
716 if (getLangOpts().EHAsynch && S.isSideEntry())
718
719 EmitStmt(S.getSubStmt());
720}
721
723 bool nomerge = false;
724 bool noinline = false;
725 bool alwaysinline = false;
726 const CallExpr *musttail = nullptr;
727
728 for (const auto *A : S.getAttrs()) {
729 switch (A->getKind()) {
730 default:
731 break;
732 case attr::NoMerge:
733 nomerge = true;
734 break;
735 case attr::NoInline:
736 noinline = true;
737 break;
738 case attr::AlwaysInline:
739 alwaysinline = true;
740 break;
741 case attr::MustTail: {
742 const Stmt *Sub = S.getSubStmt();
743 const ReturnStmt *R = cast<ReturnStmt>(Sub);
744 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
745 } break;
746 case attr::CXXAssume: {
747 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
748 if (getLangOpts().CXXAssumptions &&
749 !Assumption->HasSideEffects(getContext())) {
750 llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption);
751 Builder.CreateAssumption(AssumptionVal);
752 }
753 } break;
754 }
755 }
756 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
757 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
758 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
759 SaveAndRestore save_musttail(MustTailCall, musttail);
760 EmitStmt(S.getSubStmt(), S.getAttrs());
761}
762
764 // If this code is reachable then emit a stop point (if generating
765 // debug info). We have to do this ourselves because we are on the
766 // "simple" statement path.
767 if (HaveInsertPoint())
768 EmitStopPoint(&S);
769
771}
772
773
775 if (const LabelDecl *Target = S.getConstantTarget()) {
777 return;
778 }
779
780 // Ensure that we have an i8* for our PHI node.
781 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
782 Int8PtrTy, "addr");
783 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
784
785 // Get the basic block for the indirect goto.
786 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
787
788 // The first instruction in the block has to be the PHI for the switch dest,
789 // add an entry for this branch.
790 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
791
792 EmitBranch(IndGotoBB);
793}
794
795void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
796 // The else branch of a consteval if statement is always the only branch that
797 // can be runtime evaluated.
798 if (S.isConsteval()) {
799 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
800 if (Executed) {
801 RunCleanupsScope ExecutedScope(*this);
802 EmitStmt(Executed);
803 }
804 return;
805 }
806
807 // C99 6.8.4.1: The first substatement is executed if the expression compares
808 // unequal to 0. The condition must be a scalar type.
809 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
810
811 if (S.getInit())
812 EmitStmt(S.getInit());
813
814 if (S.getConditionVariable())
815 EmitDecl(*S.getConditionVariable());
816
817 // If the condition constant folds and can be elided, try to avoid emitting
818 // the condition and the dead arm of the if/else.
819 bool CondConstant;
820 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
821 S.isConstexpr())) {
822 // Figure out which block (then or else) is executed.
823 const Stmt *Executed = S.getThen();
824 const Stmt *Skipped = S.getElse();
825 if (!CondConstant) // Condition false?
826 std::swap(Executed, Skipped);
827
828 // If the skipped block has no labels in it, just emit the executed block.
829 // This avoids emitting dead code and simplifies the CFG substantially.
830 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
831 if (CondConstant)
833 if (Executed) {
834 RunCleanupsScope ExecutedScope(*this);
835 EmitStmt(Executed);
836 }
837 return;
838 }
839 }
840
841 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
842 // the conditional branch.
843 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
844 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
845 llvm::BasicBlock *ElseBlock = ContBlock;
846 if (S.getElse())
847 ElseBlock = createBasicBlock("if.else");
848
849 // Prefer the PGO based weights over the likelihood attribute.
850 // When the build isn't optimized the metadata isn't used, so don't generate
851 // it.
852 // Also, differentiate between disabled PGO and a never executed branch with
853 // PGO. Assuming PGO is in use:
854 // - we want to ignore the [[likely]] attribute if the branch is never
855 // executed,
856 // - assuming the profile is poor, preserving the attribute may still be
857 // beneficial.
858 // As an approximation, preserve the attribute only if both the branch and the
859 // parent context were not executed.
861 uint64_t ThenCount = getProfileCount(S.getThen());
862 if (!ThenCount && !getCurrentProfileCount() &&
863 CGM.getCodeGenOpts().OptimizationLevel)
864 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
865
866 // When measuring MC/DC, always fully evaluate the condition up front using
867 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
868 // executing the body of the if.then or if.else. This is useful for when
869 // there is a 'return' within the body, but this is particularly beneficial
870 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
871 // updates are kept linear and consistent.
872 if (!CGM.getCodeGenOpts().MCDCCoverage)
873 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
874 else {
875 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
876 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
877 }
878
879 // Emit the 'then' code.
880 EmitBlock(ThenBlock);
882 incrementProfileCounter(S.getThen());
883 else
885 {
886 RunCleanupsScope ThenScope(*this);
887 EmitStmt(S.getThen());
888 }
889 EmitBranch(ContBlock);
890
891 // Emit the 'else' code if present.
892 if (const Stmt *Else = S.getElse()) {
893 {
894 // There is no need to emit line number for an unconditional branch.
895 auto NL = ApplyDebugLocation::CreateEmpty(*this);
896 EmitBlock(ElseBlock);
897 }
898 // When single byte coverage mode is enabled, add a counter to else block.
901 {
902 RunCleanupsScope ElseScope(*this);
903 EmitStmt(Else);
904 }
905 {
906 // There is no need to emit line number for an unconditional branch.
907 auto NL = ApplyDebugLocation::CreateEmpty(*this);
908 EmitBranch(ContBlock);
909 }
910 }
911
912 // Emit the continuation block for code after the if.
913 EmitBlock(ContBlock, true);
914
915 // When single byte coverage mode is enabled, add a counter to continuation
916 // block.
919}
920
921bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
922 bool HasEmptyBody) {
923 if (CGM.getCodeGenOpts().getFiniteLoops() ==
925 return false;
926
927 // Now apply rules for plain C (see 6.8.5.6 in C11).
928 // Loops with constant conditions do not have to make progress in any C
929 // version.
930 // As an extension, we consisider loops whose constant expression
931 // can be constant-folded.
933 bool CondIsConstInt =
934 !ControllingExpression ||
935 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
936 Result.Val.isInt());
937
938 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
939 Result.Val.getInt().getBoolValue());
940
941 // Loops with non-constant conditions must make progress in C11 and later.
942 if (getLangOpts().C11 && !CondIsConstInt)
943 return true;
944
945 // [C++26][intro.progress] (DR)
946 // The implementation may assume that any thread will eventually do one of the
947 // following:
948 // [...]
949 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
950 if (CGM.getCodeGenOpts().getFiniteLoops() ==
952 getLangOpts().CPlusPlus11) {
953 if (HasEmptyBody && CondIsTrue) {
954 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
955 return false;
956 }
957 return true;
958 }
959 return false;
960}
961
962// [C++26][stmt.iter.general] (DR)
963// A trivially empty iteration statement is an iteration statement matching one
964// of the following forms:
965// - while ( expression ) ;
966// - while ( expression ) { }
967// - do ; while ( expression ) ;
968// - do { } while ( expression ) ;
969// - for ( init-statement expression(opt); ) ;
970// - for ( init-statement expression(opt); ) { }
971template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
972 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
973 if (S.getInc())
974 return false;
975 }
976 const Stmt *Body = S.getBody();
977 if (!Body || isa<NullStmt>(Body))
978 return true;
979 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
980 return Compound->body_empty();
981 return false;
982}
983
985 ArrayRef<const Attr *> WhileAttrs) {
986 // Emit the header for the loop, which will also become
987 // the continue target.
988 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
989 EmitBlock(LoopHeader.getBlock());
990
992 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(
993 LoopHeader.getBlock(), ConvergenceTokenStack.back()));
994
995 // Create an exit block for when the condition fails, which will
996 // also become the break target.
997 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
998
999 // Store the blocks to use for break and continue.
1000 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
1001
1002 // C++ [stmt.while]p2:
1003 // When the condition of a while statement is a declaration, the
1004 // scope of the variable that is declared extends from its point
1005 // of declaration (3.3.2) to the end of the while statement.
1006 // [...]
1007 // The object created in a condition is destroyed and created
1008 // with each iteration of the loop.
1009 RunCleanupsScope ConditionScope(*this);
1010
1011 if (S.getConditionVariable())
1012 EmitDecl(*S.getConditionVariable());
1013
1014 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1015 // evaluation of the controlling expression takes place before each
1016 // execution of the loop body.
1017 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1018
1019 // while(1) is common, avoid extra exit blocks. Be sure
1020 // to correctly handle break/continue though.
1021 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1022 bool EmitBoolCondBranch = !C || !C->isOne();
1023 const SourceRange &R = S.getSourceRange();
1024 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1025 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1027 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1028
1029 // When single byte coverage mode is enabled, add a counter to loop condition.
1031 incrementProfileCounter(S.getCond());
1032
1033 // As long as the condition is true, go to the loop body.
1034 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1035 if (EmitBoolCondBranch) {
1036 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1037 if (ConditionScope.requiresCleanups())
1038 ExitBlock = createBasicBlock("while.exit");
1039 llvm::MDNode *Weights =
1040 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1041 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1042 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1043 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1044 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1045
1046 if (ExitBlock != LoopExit.getBlock()) {
1047 EmitBlock(ExitBlock);
1049 }
1050 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1051 CGM.getDiags().Report(A->getLocation(),
1052 diag::warn_attribute_has_no_effect_on_infinite_loop)
1053 << A << A->getRange();
1055 S.getWhileLoc(),
1056 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1057 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1058 }
1059
1060 // Emit the loop body. We have to emit this in a cleanup scope
1061 // because it might be a singleton DeclStmt.
1062 {
1063 RunCleanupsScope BodyScope(*this);
1064 EmitBlock(LoopBody);
1065 // When single byte coverage mode is enabled, add a counter to the body.
1067 incrementProfileCounter(S.getBody());
1068 else
1070 EmitStmt(S.getBody());
1071 }
1072
1073 BreakContinueStack.pop_back();
1074
1075 // Immediately force cleanup.
1076 ConditionScope.ForceCleanup();
1077
1078 EmitStopPoint(&S);
1079 // Branch to the loop header again.
1080 EmitBranch(LoopHeader.getBlock());
1081
1082 LoopStack.pop();
1083
1084 // Emit the exit block.
1085 EmitBlock(LoopExit.getBlock(), true);
1086
1087 // The LoopHeader typically is just a branch if we skipped emitting
1088 // a branch, try to erase it.
1089 if (!EmitBoolCondBranch)
1090 SimplifyForwardingBlocks(LoopHeader.getBlock());
1091
1092 // When single byte coverage mode is enabled, add a counter to continuation
1093 // block.
1096
1098 ConvergenceTokenStack.pop_back();
1099}
1100
1102 ArrayRef<const Attr *> DoAttrs) {
1103 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1104 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1105
1106 uint64_t ParentCount = getCurrentProfileCount();
1107
1108 // Store the blocks to use for break and continue.
1109 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1110
1111 // Emit the body of the loop.
1112 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1113
1115 EmitBlockWithFallThrough(LoopBody, S.getBody());
1116 else
1117 EmitBlockWithFallThrough(LoopBody, &S);
1118
1120 ConvergenceTokenStack.push_back(
1121 emitConvergenceLoopToken(LoopBody, ConvergenceTokenStack.back()));
1122
1123 {
1124 RunCleanupsScope BodyScope(*this);
1125 EmitStmt(S.getBody());
1126 }
1127
1128 EmitBlock(LoopCond.getBlock());
1129 // When single byte coverage mode is enabled, add a counter to loop condition.
1131 incrementProfileCounter(S.getCond());
1132
1133 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1134 // after each execution of the loop body."
1135
1136 // Evaluate the conditional in the while header.
1137 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1138 // compares unequal to 0. The condition must be a scalar type.
1139 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1140
1141 BreakContinueStack.pop_back();
1142
1143 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1144 // to correctly handle break/continue though.
1145 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1146 bool EmitBoolCondBranch = !C || !C->isZero();
1147
1148 const SourceRange &R = S.getSourceRange();
1149 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1152 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1153
1154 // As long as the condition is true, iterate the loop.
1155 if (EmitBoolCondBranch) {
1156 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1157 Builder.CreateCondBr(
1158 BoolCondVal, LoopBody, LoopExit.getBlock(),
1159 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1160 }
1161
1162 LoopStack.pop();
1163
1164 // Emit the exit block.
1165 EmitBlock(LoopExit.getBlock());
1166
1167 // The DoCond block typically is just a branch if we skipped
1168 // emitting a branch, try to erase it.
1169 if (!EmitBoolCondBranch)
1170 SimplifyForwardingBlocks(LoopCond.getBlock());
1171
1172 // When single byte coverage mode is enabled, add a counter to continuation
1173 // block.
1176
1178 ConvergenceTokenStack.pop_back();
1179}
1180
1182 ArrayRef<const Attr *> ForAttrs) {
1183 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1184
1185 LexicalScope ForScope(*this, S.getSourceRange());
1186
1187 // Evaluate the first part before the loop.
1188 if (S.getInit())
1189 EmitStmt(S.getInit());
1190
1191 // Start the loop with a block that tests the condition.
1192 // If there's an increment, the continue scope will be overwritten
1193 // later.
1194 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1195 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1196 EmitBlock(CondBlock);
1197
1199 ConvergenceTokenStack.push_back(
1200 emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1201
1202 const SourceRange &R = S.getSourceRange();
1203 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1206 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1207
1208 // Create a cleanup scope for the condition variable cleanups.
1209 LexicalScope ConditionScope(*this, S.getSourceRange());
1210
1211 // If the for loop doesn't have an increment we can just use the condition as
1212 // the continue block. Otherwise, if there is no condition variable, we can
1213 // form the continue block now. If there is a condition variable, we can't
1214 // form the continue block until after we've emitted the condition, because
1215 // the condition is in scope in the increment, but Sema's jump diagnostics
1216 // ensure that there are no continues from the condition variable that jump
1217 // to the loop increment.
1218 JumpDest Continue;
1219 if (!S.getInc())
1220 Continue = CondDest;
1221 else if (!S.getConditionVariable())
1222 Continue = getJumpDestInCurrentScope("for.inc");
1223 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1224
1225 if (S.getCond()) {
1226 // If the for statement has a condition scope, emit the local variable
1227 // declaration.
1228 if (S.getConditionVariable()) {
1229 EmitDecl(*S.getConditionVariable());
1230
1231 // We have entered the condition variable's scope, so we're now able to
1232 // jump to the continue block.
1233 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1234 BreakContinueStack.back().ContinueBlock = Continue;
1235 }
1236
1237 // When single byte coverage mode is enabled, add a counter to loop
1238 // condition.
1240 incrementProfileCounter(S.getCond());
1241
1242 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1243 // If there are any cleanups between here and the loop-exit scope,
1244 // create a block to stage a loop exit along.
1245 if (ForScope.requiresCleanups())
1246 ExitBlock = createBasicBlock("for.cond.cleanup");
1247
1248 // As long as the condition is true, iterate the loop.
1249 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1250
1251 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1252 // compares unequal to 0. The condition must be a scalar type.
1253 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1254 llvm::MDNode *Weights =
1255 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1256 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1257 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1258 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1259
1260 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1261
1262 if (ExitBlock != LoopExit.getBlock()) {
1263 EmitBlock(ExitBlock);
1265 }
1266
1267 EmitBlock(ForBody);
1268 } else {
1269 // Treat it as a non-zero constant. Don't even create a new block for the
1270 // body, just fall into it.
1271 }
1272
1273 // When single byte coverage mode is enabled, add a counter to the body.
1275 incrementProfileCounter(S.getBody());
1276 else
1278 {
1279 // Create a separate cleanup scope for the body, in case it is not
1280 // a compound statement.
1281 RunCleanupsScope BodyScope(*this);
1282 EmitStmt(S.getBody());
1283 }
1284
1285 // If there is an increment, emit it next.
1286 if (S.getInc()) {
1287 EmitBlock(Continue.getBlock());
1288 EmitStmt(S.getInc());
1290 incrementProfileCounter(S.getInc());
1291 }
1292
1293 BreakContinueStack.pop_back();
1294
1295 ConditionScope.ForceCleanup();
1296
1297 EmitStopPoint(&S);
1298 EmitBranch(CondBlock);
1299
1300 ForScope.ForceCleanup();
1301
1302 LoopStack.pop();
1303
1304 // Emit the fall-through block.
1305 EmitBlock(LoopExit.getBlock(), true);
1306
1307 // When single byte coverage mode is enabled, add a counter to continuation
1308 // block.
1311
1313 ConvergenceTokenStack.pop_back();
1314}
1315
1316void
1318 ArrayRef<const Attr *> ForAttrs) {
1319 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1320
1321 LexicalScope ForScope(*this, S.getSourceRange());
1322
1323 // Evaluate the first pieces before the loop.
1324 if (S.getInit())
1325 EmitStmt(S.getInit());
1326 EmitStmt(S.getRangeStmt());
1327 EmitStmt(S.getBeginStmt());
1328 EmitStmt(S.getEndStmt());
1329
1330 // Start the loop with a block that tests the condition.
1331 // If there's an increment, the continue scope will be overwritten
1332 // later.
1333 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1334 EmitBlock(CondBlock);
1335
1337 ConvergenceTokenStack.push_back(
1338 emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1339
1340 const SourceRange &R = S.getSourceRange();
1341 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1344
1345 // If there are any cleanups between here and the loop-exit scope,
1346 // create a block to stage a loop exit along.
1347 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1348 if (ForScope.requiresCleanups())
1349 ExitBlock = createBasicBlock("for.cond.cleanup");
1350
1351 // The loop body, consisting of the specified body and the loop variable.
1352 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1353
1354 // The body is executed if the expression, contextually converted
1355 // to bool, is true.
1356 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1357 llvm::MDNode *Weights =
1358 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1359 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1360 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1361 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1362 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1363
1364 if (ExitBlock != LoopExit.getBlock()) {
1365 EmitBlock(ExitBlock);
1367 }
1368
1369 EmitBlock(ForBody);
1371 incrementProfileCounter(S.getBody());
1372 else
1374
1375 // Create a block for the increment. In case of a 'continue', we jump there.
1376 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1377
1378 // Store the blocks to use for break and continue.
1379 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1380
1381 {
1382 // Create a separate cleanup scope for the loop variable and body.
1383 LexicalScope BodyScope(*this, S.getSourceRange());
1384 EmitStmt(S.getLoopVarStmt());
1385 EmitStmt(S.getBody());
1386 }
1387
1388 EmitStopPoint(&S);
1389 // If there is an increment, emit it next.
1390 EmitBlock(Continue.getBlock());
1391 EmitStmt(S.getInc());
1392
1393 BreakContinueStack.pop_back();
1394
1395 EmitBranch(CondBlock);
1396
1397 ForScope.ForceCleanup();
1398
1399 LoopStack.pop();
1400
1401 // Emit the fall-through block.
1402 EmitBlock(LoopExit.getBlock(), true);
1403
1404 // When single byte coverage mode is enabled, add a counter to continuation
1405 // block.
1408
1410 ConvergenceTokenStack.pop_back();
1411}
1412
1413void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1414 if (RV.isScalar()) {
1416 } else if (RV.isAggregate()) {
1417 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1420 } else {
1422 /*init*/ true);
1423 }
1425}
1426
1427namespace {
1428// RAII struct used to save and restore a return statment's result expression.
1429struct SaveRetExprRAII {
1430 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1431 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1432 CGF.RetExpr = RetExpr;
1433 }
1434 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1435 const Expr *OldRetExpr;
1436 CodeGenFunction &CGF;
1437};
1438} // namespace
1439
1440/// Determine if the given call uses the swiftasync calling convention.
1441static bool isSwiftAsyncCallee(const CallExpr *CE) {
1442 auto calleeQualType = CE->getCallee()->getType();
1443 const FunctionType *calleeType = nullptr;
1444 if (calleeQualType->isFunctionPointerType() ||
1445 calleeQualType->isFunctionReferenceType() ||
1446 calleeQualType->isBlockPointerType() ||
1447 calleeQualType->isMemberFunctionPointerType()) {
1448 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1449 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1450 calleeType = ty;
1451 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1452 if (auto methodDecl = CMCE->getMethodDecl()) {
1453 // getMethodDecl() doesn't handle member pointers at the moment.
1454 calleeType = methodDecl->getType()->castAs<FunctionType>();
1455 } else {
1456 return false;
1457 }
1458 } else {
1459 return false;
1460 }
1461 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1462}
1463
1464/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1465/// if the function returns void, or may be missing one if the function returns
1466/// non-void. Fun stuff :).
1468 if (requiresReturnValueCheck()) {
1469 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1470 auto *SLocPtr =
1471 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1472 llvm::GlobalVariable::PrivateLinkage, SLoc);
1473 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1475 assert(ReturnLocation.isValid() && "No valid return location");
1476 Builder.CreateStore(SLocPtr, ReturnLocation);
1477 }
1478
1479 // Returning from an outlined SEH helper is UB, and we already warn on it.
1480 if (IsOutlinedSEHHelper) {
1481 Builder.CreateUnreachable();
1482 Builder.ClearInsertionPoint();
1483 }
1484
1485 // Emit the result value, even if unused, to evaluate the side effects.
1486 const Expr *RV = S.getRetValue();
1487
1488 // Record the result expression of the return statement. The recorded
1489 // expression is used to determine whether a block capture's lifetime should
1490 // end at the end of the full expression as opposed to the end of the scope
1491 // enclosing the block expression.
1492 //
1493 // This permits a small, easily-implemented exception to our over-conservative
1494 // rules about not jumping to statements following block literals with
1495 // non-trivial cleanups.
1496 SaveRetExprRAII SaveRetExpr(RV, *this);
1497
1498 RunCleanupsScope cleanupScope(*this);
1499 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1500 RV = EWC->getSubExpr();
1501
1502 // If we're in a swiftasynccall function, and the return expression is a
1503 // call to a swiftasynccall function, mark the call as the musttail call.
1504 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1505 if (RV && CurFnInfo &&
1507 if (auto CE = dyn_cast<CallExpr>(RV)) {
1508 if (isSwiftAsyncCallee(CE)) {
1509 SaveMustTail.emplace(MustTailCall, CE);
1510 }
1511 }
1512 }
1513
1514 // FIXME: Clean this up by using an LValue for ReturnTemp,
1515 // EmitStoreThroughLValue, and EmitAnyExpr.
1516 // Check if the NRVO candidate was not globalized in OpenMP mode.
1517 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1518 S.getNRVOCandidate()->isNRVOVariable() &&
1519 (!getLangOpts().OpenMP ||
1521 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1522 .isValid())) {
1523 // Apply the named return value optimization for this return statement,
1524 // which means doing nothing: the appropriate result has already been
1525 // constructed into the NRVO variable.
1526
1527 // If there is an NRVO flag for this variable, set it to 1 into indicate
1528 // that the cleanup code should not destroy the variable.
1529 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1530 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1531 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1532 // Make sure not to return anything, but evaluate the expression
1533 // for side effects.
1534 if (RV) {
1535 EmitAnyExpr(RV);
1536 }
1537 } else if (!RV) {
1538 // Do nothing (return value is left uninitialized)
1539 } else if (FnRetTy->isReferenceType()) {
1540 // If this function returns a reference, take the address of the expression
1541 // rather than the value.
1543 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1544 } else {
1545 switch (getEvaluationKind(RV->getType())) {
1546 case TEK_Scalar: {
1547 llvm::Value *Ret = EmitScalarExpr(RV);
1550 /*isInit*/ true);
1551 else
1553 break;
1554 }
1555 case TEK_Complex:
1557 /*isInit*/ true);
1558 break;
1559 case TEK_Aggregate:
1566 break;
1567 }
1568 }
1569
1570 ++NumReturnExprs;
1571 if (!RV || RV->isEvaluatable(getContext()))
1572 ++NumSimpleReturnExprs;
1573
1574 cleanupScope.ForceCleanup();
1576}
1577
1579 // As long as debug info is modeled with instructions, we have to ensure we
1580 // have a place to insert here and write the stop point here.
1581 if (HaveInsertPoint())
1582 EmitStopPoint(&S);
1583
1584 for (const auto *I : S.decls())
1585 EmitDecl(*I);
1586}
1587
1589 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1590
1591 // If this code is reachable then emit a stop point (if generating
1592 // debug info). We have to do this ourselves because we are on the
1593 // "simple" statement path.
1594 if (HaveInsertPoint())
1595 EmitStopPoint(&S);
1596
1597 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1598}
1599
1601 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1602
1603 // If this code is reachable then emit a stop point (if generating
1604 // debug info). We have to do this ourselves because we are on the
1605 // "simple" statement path.
1606 if (HaveInsertPoint())
1607 EmitStopPoint(&S);
1608
1609 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1610}
1611
1612/// EmitCaseStmtRange - If case statement range is not too big then
1613/// add multiple cases to switch instruction, one for each value within
1614/// the range. If range is too big then emit "if" condition check.
1616 ArrayRef<const Attr *> Attrs) {
1617 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1618
1619 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1620 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1621
1622 // Emit the code for this case. We do this first to make sure it is
1623 // properly chained from our predecessor before generating the
1624 // switch machinery to enter this block.
1625 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1626 EmitBlockWithFallThrough(CaseDest, &S);
1627 EmitStmt(S.getSubStmt());
1628
1629 // If range is empty, do nothing.
1630 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1631 return;
1632
1634 llvm::APInt Range = RHS - LHS;
1635 // FIXME: parameters such as this should not be hardcoded.
1636 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1637 // Range is small enough to add multiple switch instruction cases.
1638 uint64_t Total = getProfileCount(&S);
1639 unsigned NCases = Range.getZExtValue() + 1;
1640 // We only have one region counter for the entire set of cases here, so we
1641 // need to divide the weights evenly between the generated cases, ensuring
1642 // that the total weight is preserved. E.g., a weight of 5 over three cases
1643 // will be distributed as weights of 2, 2, and 1.
1644 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1645 for (unsigned I = 0; I != NCases; ++I) {
1646 if (SwitchWeights)
1647 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1648 else if (SwitchLikelihood)
1649 SwitchLikelihood->push_back(LH);
1650
1651 if (Rem)
1652 Rem--;
1653 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1654 ++LHS;
1655 }
1656 return;
1657 }
1658
1659 // The range is too big. Emit "if" condition into a new block,
1660 // making sure to save and restore the current insertion point.
1661 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1662
1663 // Push this test onto the chain of range checks (which terminates
1664 // in the default basic block). The switch's default will be changed
1665 // to the top of this chain after switch emission is complete.
1666 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1667 CaseRangeBlock = createBasicBlock("sw.caserange");
1668
1669 CurFn->insert(CurFn->end(), CaseRangeBlock);
1670 Builder.SetInsertPoint(CaseRangeBlock);
1671
1672 // Emit range check.
1673 llvm::Value *Diff =
1674 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1675 llvm::Value *Cond =
1676 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1677
1678 llvm::MDNode *Weights = nullptr;
1679 if (SwitchWeights) {
1680 uint64_t ThisCount = getProfileCount(&S);
1681 uint64_t DefaultCount = (*SwitchWeights)[0];
1682 Weights = createProfileWeights(ThisCount, DefaultCount);
1683
1684 // Since we're chaining the switch default through each large case range, we
1685 // need to update the weight for the default, ie, the first case, to include
1686 // this case.
1687 (*SwitchWeights)[0] += ThisCount;
1688 } else if (SwitchLikelihood)
1689 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1690
1691 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1692
1693 // Restore the appropriate insertion point.
1694 if (RestoreBB)
1695 Builder.SetInsertPoint(RestoreBB);
1696 else
1697 Builder.ClearInsertionPoint();
1698}
1699
1701 ArrayRef<const Attr *> Attrs) {
1702 // If there is no enclosing switch instance that we're aware of, then this
1703 // case statement and its block can be elided. This situation only happens
1704 // when we've constant-folded the switch, are emitting the constant case,
1705 // and part of the constant case includes another case statement. For
1706 // instance: switch (4) { case 4: do { case 5: } while (1); }
1707 if (!SwitchInsn) {
1708 EmitStmt(S.getSubStmt());
1709 return;
1710 }
1711
1712 // Handle case ranges.
1713 if (S.getRHS()) {
1714 EmitCaseStmtRange(S, Attrs);
1715 return;
1716 }
1717
1718 llvm::ConstantInt *CaseVal =
1719 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1720
1721 // Emit debuginfo for the case value if it is an enum value.
1722 const ConstantExpr *CE;
1723 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1724 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1725 else
1726 CE = dyn_cast<ConstantExpr>(S.getLHS());
1727 if (CE) {
1728 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1729 if (CGDebugInfo *Dbg = getDebugInfo())
1731 Dbg->EmitGlobalVariable(DE->getDecl(),
1732 APValue(llvm::APSInt(CaseVal->getValue())));
1733 }
1734
1735 if (SwitchLikelihood)
1736 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1737
1738 // If the body of the case is just a 'break', try to not emit an empty block.
1739 // If we're profiling or we're not optimizing, leave the block in for better
1740 // debug and coverage analysis.
1742 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1743 isa<BreakStmt>(S.getSubStmt())) {
1744 JumpDest Block = BreakContinueStack.back().BreakBlock;
1745
1746 // Only do this optimization if there are no cleanups that need emitting.
1748 if (SwitchWeights)
1749 SwitchWeights->push_back(getProfileCount(&S));
1750 SwitchInsn->addCase(CaseVal, Block.getBlock());
1751
1752 // If there was a fallthrough into this case, make sure to redirect it to
1753 // the end of the switch as well.
1754 if (Builder.GetInsertBlock()) {
1755 Builder.CreateBr(Block.getBlock());
1756 Builder.ClearInsertionPoint();
1757 }
1758 return;
1759 }
1760 }
1761
1762 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1763 EmitBlockWithFallThrough(CaseDest, &S);
1764 if (SwitchWeights)
1765 SwitchWeights->push_back(getProfileCount(&S));
1766 SwitchInsn->addCase(CaseVal, CaseDest);
1767
1768 // Recursively emitting the statement is acceptable, but is not wonderful for
1769 // code where we have many case statements nested together, i.e.:
1770 // case 1:
1771 // case 2:
1772 // case 3: etc.
1773 // Handling this recursively will create a new block for each case statement
1774 // that falls through to the next case which is IR intensive. It also causes
1775 // deep recursion which can run into stack depth limitations. Handle
1776 // sequential non-range case statements specially.
1777 //
1778 // TODO When the next case has a likelihood attribute the code returns to the
1779 // recursive algorithm. Maybe improve this case if it becomes common practice
1780 // to use a lot of attributes.
1781 const CaseStmt *CurCase = &S;
1782 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1783
1784 // Otherwise, iteratively add consecutive cases to this switch stmt.
1785 while (NextCase && NextCase->getRHS() == nullptr) {
1786 CurCase = NextCase;
1787 llvm::ConstantInt *CaseVal =
1788 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1789
1790 if (SwitchWeights)
1791 SwitchWeights->push_back(getProfileCount(NextCase));
1793 CaseDest = createBasicBlock("sw.bb");
1794 EmitBlockWithFallThrough(CaseDest, CurCase);
1795 }
1796 // Since this loop is only executed when the CaseStmt has no attributes
1797 // use a hard-coded value.
1798 if (SwitchLikelihood)
1799 SwitchLikelihood->push_back(Stmt::LH_None);
1800
1801 SwitchInsn->addCase(CaseVal, CaseDest);
1802 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1803 }
1804
1805 // Generate a stop point for debug info if the case statement is
1806 // followed by a default statement. A fallthrough case before a
1807 // default case gets its own branch target.
1808 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1809 EmitStopPoint(CurCase);
1810
1811 // Normal default recursion for non-cases.
1812 EmitStmt(CurCase->getSubStmt());
1813}
1814
1816 ArrayRef<const Attr *> Attrs) {
1817 // If there is no enclosing switch instance that we're aware of, then this
1818 // default statement can be elided. This situation only happens when we've
1819 // constant-folded the switch.
1820 if (!SwitchInsn) {
1821 EmitStmt(S.getSubStmt());
1822 return;
1823 }
1824
1825 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1826 assert(DefaultBlock->empty() &&
1827 "EmitDefaultStmt: Default block already defined?");
1828
1829 if (SwitchLikelihood)
1830 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1831
1832 EmitBlockWithFallThrough(DefaultBlock, &S);
1833
1834 EmitStmt(S.getSubStmt());
1835}
1836
1837/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1838/// constant value that is being switched on, see if we can dead code eliminate
1839/// the body of the switch to a simple series of statements to emit. Basically,
1840/// on a switch (5) we want to find these statements:
1841/// case 5:
1842/// printf(...); <--
1843/// ++i; <--
1844/// break;
1845///
1846/// and add them to the ResultStmts vector. If it is unsafe to do this
1847/// transformation (for example, one of the elided statements contains a label
1848/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1849/// should include statements after it (e.g. the printf() line is a substmt of
1850/// the case) then return CSFC_FallThrough. If we handled it and found a break
1851/// statement, then return CSFC_Success.
1852///
1853/// If Case is non-null, then we are looking for the specified case, checking
1854/// that nothing we jump over contains labels. If Case is null, then we found
1855/// the case and are looking for the break.
1856///
1857/// If the recursive walk actually finds our Case, then we set FoundCase to
1858/// true.
1859///
1862 const SwitchCase *Case,
1863 bool &FoundCase,
1864 SmallVectorImpl<const Stmt*> &ResultStmts) {
1865 // If this is a null statement, just succeed.
1866 if (!S)
1867 return Case ? CSFC_Success : CSFC_FallThrough;
1868
1869 // If this is the switchcase (case 4: or default) that we're looking for, then
1870 // we're in business. Just add the substatement.
1871 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1872 if (S == Case) {
1873 FoundCase = true;
1874 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1875 ResultStmts);
1876 }
1877
1878 // Otherwise, this is some other case or default statement, just ignore it.
1879 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1880 ResultStmts);
1881 }
1882
1883 // If we are in the live part of the code and we found our break statement,
1884 // return a success!
1885 if (!Case && isa<BreakStmt>(S))
1886 return CSFC_Success;
1887
1888 // If this is a switch statement, then it might contain the SwitchCase, the
1889 // break, or neither.
1890 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1891 // Handle this as two cases: we might be looking for the SwitchCase (if so
1892 // the skipped statements must be skippable) or we might already have it.
1893 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1894 bool StartedInLiveCode = FoundCase;
1895 unsigned StartSize = ResultStmts.size();
1896
1897 // If we've not found the case yet, scan through looking for it.
1898 if (Case) {
1899 // Keep track of whether we see a skipped declaration. The code could be
1900 // using the declaration even if it is skipped, so we can't optimize out
1901 // the decl if the kept statements might refer to it.
1902 bool HadSkippedDecl = false;
1903
1904 // If we're looking for the case, just see if we can skip each of the
1905 // substatements.
1906 for (; Case && I != E; ++I) {
1907 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1908
1909 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1910 case CSFC_Failure: return CSFC_Failure;
1911 case CSFC_Success:
1912 // A successful result means that either 1) that the statement doesn't
1913 // have the case and is skippable, or 2) does contain the case value
1914 // and also contains the break to exit the switch. In the later case,
1915 // we just verify the rest of the statements are elidable.
1916 if (FoundCase) {
1917 // If we found the case and skipped declarations, we can't do the
1918 // optimization.
1919 if (HadSkippedDecl)
1920 return CSFC_Failure;
1921
1922 for (++I; I != E; ++I)
1923 if (CodeGenFunction::ContainsLabel(*I, true))
1924 return CSFC_Failure;
1925 return CSFC_Success;
1926 }
1927 break;
1928 case CSFC_FallThrough:
1929 // If we have a fallthrough condition, then we must have found the
1930 // case started to include statements. Consider the rest of the
1931 // statements in the compound statement as candidates for inclusion.
1932 assert(FoundCase && "Didn't find case but returned fallthrough?");
1933 // We recursively found Case, so we're not looking for it anymore.
1934 Case = nullptr;
1935
1936 // If we found the case and skipped declarations, we can't do the
1937 // optimization.
1938 if (HadSkippedDecl)
1939 return CSFC_Failure;
1940 break;
1941 }
1942 }
1943
1944 if (!FoundCase)
1945 return CSFC_Success;
1946
1947 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1948 }
1949
1950 // If we have statements in our range, then we know that the statements are
1951 // live and need to be added to the set of statements we're tracking.
1952 bool AnyDecls = false;
1953 for (; I != E; ++I) {
1955
1956 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1957 case CSFC_Failure: return CSFC_Failure;
1958 case CSFC_FallThrough:
1959 // A fallthrough result means that the statement was simple and just
1960 // included in ResultStmt, keep adding them afterwards.
1961 break;
1962 case CSFC_Success:
1963 // A successful result means that we found the break statement and
1964 // stopped statement inclusion. We just ensure that any leftover stmts
1965 // are skippable and return success ourselves.
1966 for (++I; I != E; ++I)
1967 if (CodeGenFunction::ContainsLabel(*I, true))
1968 return CSFC_Failure;
1969 return CSFC_Success;
1970 }
1971 }
1972
1973 // If we're about to fall out of a scope without hitting a 'break;', we
1974 // can't perform the optimization if there were any decls in that scope
1975 // (we'd lose their end-of-lifetime).
1976 if (AnyDecls) {
1977 // If the entire compound statement was live, there's one more thing we
1978 // can try before giving up: emit the whole thing as a single statement.
1979 // We can do that unless the statement contains a 'break;'.
1980 // FIXME: Such a break must be at the end of a construct within this one.
1981 // We could emit this by just ignoring the BreakStmts entirely.
1982 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1983 ResultStmts.resize(StartSize);
1984 ResultStmts.push_back(S);
1985 } else {
1986 return CSFC_Failure;
1987 }
1988 }
1989
1990 return CSFC_FallThrough;
1991 }
1992
1993 // Okay, this is some other statement that we don't handle explicitly, like a
1994 // for statement or increment etc. If we are skipping over this statement,
1995 // just verify it doesn't have labels, which would make it invalid to elide.
1996 if (Case) {
1997 if (CodeGenFunction::ContainsLabel(S, true))
1998 return CSFC_Failure;
1999 return CSFC_Success;
2000 }
2001
2002 // Otherwise, we want to include this statement. Everything is cool with that
2003 // so long as it doesn't contain a break out of the switch we're in.
2005
2006 // Otherwise, everything is great. Include the statement and tell the caller
2007 // that we fall through and include the next statement as well.
2008 ResultStmts.push_back(S);
2009 return CSFC_FallThrough;
2010}
2011
2012/// FindCaseStatementsForValue - Find the case statement being jumped to and
2013/// then invoke CollectStatementsForCase to find the list of statements to emit
2014/// for a switch on constant. See the comment above CollectStatementsForCase
2015/// for more details.
2017 const llvm::APSInt &ConstantCondValue,
2018 SmallVectorImpl<const Stmt*> &ResultStmts,
2019 ASTContext &C,
2020 const SwitchCase *&ResultCase) {
2021 // First step, find the switch case that is being branched to. We can do this
2022 // efficiently by scanning the SwitchCase list.
2023 const SwitchCase *Case = S.getSwitchCaseList();
2024 const DefaultStmt *DefaultCase = nullptr;
2025
2026 for (; Case; Case = Case->getNextSwitchCase()) {
2027 // It's either a default or case. Just remember the default statement in
2028 // case we're not jumping to any numbered cases.
2029 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2030 DefaultCase = DS;
2031 continue;
2032 }
2033
2034 // Check to see if this case is the one we're looking for.
2035 const CaseStmt *CS = cast<CaseStmt>(Case);
2036 // Don't handle case ranges yet.
2037 if (CS->getRHS()) return false;
2038
2039 // If we found our case, remember it as 'case'.
2040 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2041 break;
2042 }
2043
2044 // If we didn't find a matching case, we use a default if it exists, or we
2045 // elide the whole switch body!
2046 if (!Case) {
2047 // It is safe to elide the body of the switch if it doesn't contain labels
2048 // etc. If it is safe, return successfully with an empty ResultStmts list.
2049 if (!DefaultCase)
2051 Case = DefaultCase;
2052 }
2053
2054 // Ok, we know which case is being jumped to, try to collect all the
2055 // statements that follow it. This can fail for a variety of reasons. Also,
2056 // check to see that the recursive walk actually found our case statement.
2057 // Insane cases like this can fail to find it in the recursive walk since we
2058 // don't handle every stmt kind:
2059 // switch (4) {
2060 // while (1) {
2061 // case 4: ...
2062 bool FoundCase = false;
2063 ResultCase = Case;
2064 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2065 ResultStmts) != CSFC_Failure &&
2066 FoundCase;
2067}
2068
2069static std::optional<SmallVector<uint64_t, 16>>
2071 // Are there enough branches to weight them?
2072 if (Likelihoods.size() <= 1)
2073 return std::nullopt;
2074
2075 uint64_t NumUnlikely = 0;
2076 uint64_t NumNone = 0;
2077 uint64_t NumLikely = 0;
2078 for (const auto LH : Likelihoods) {
2079 switch (LH) {
2080 case Stmt::LH_Unlikely:
2081 ++NumUnlikely;
2082 break;
2083 case Stmt::LH_None:
2084 ++NumNone;
2085 break;
2086 case Stmt::LH_Likely:
2087 ++NumLikely;
2088 break;
2089 }
2090 }
2091
2092 // Is there a likelihood attribute used?
2093 if (NumUnlikely == 0 && NumLikely == 0)
2094 return std::nullopt;
2095
2096 // When multiple cases share the same code they can be combined during
2097 // optimization. In that case the weights of the branch will be the sum of
2098 // the individual weights. Make sure the combined sum of all neutral cases
2099 // doesn't exceed the value of a single likely attribute.
2100 // The additions both avoid divisions by 0 and make sure the weights of None
2101 // don't exceed the weight of Likely.
2102 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2103 const uint64_t None = Likely / (NumNone + 1);
2104 const uint64_t Unlikely = 0;
2105
2107 Result.reserve(Likelihoods.size());
2108 for (const auto LH : Likelihoods) {
2109 switch (LH) {
2110 case Stmt::LH_Unlikely:
2111 Result.push_back(Unlikely);
2112 break;
2113 case Stmt::LH_None:
2114 Result.push_back(None);
2115 break;
2116 case Stmt::LH_Likely:
2117 Result.push_back(Likely);
2118 break;
2119 }
2120 }
2121
2122 return Result;
2123}
2124
2126 // Handle nested switch statements.
2127 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2128 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2129 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2130 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2131
2132 // See if we can constant fold the condition of the switch and therefore only
2133 // emit the live case statement (if any) of the switch.
2134 llvm::APSInt ConstantCondValue;
2135 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2137 const SwitchCase *Case = nullptr;
2138 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2139 getContext(), Case)) {
2140 if (Case)
2142 RunCleanupsScope ExecutedScope(*this);
2143
2144 if (S.getInit())
2145 EmitStmt(S.getInit());
2146
2147 // Emit the condition variable if needed inside the entire cleanup scope
2148 // used by this special case for constant folded switches.
2149 if (S.getConditionVariable())
2150 EmitDecl(*S.getConditionVariable());
2151
2152 // At this point, we are no longer "within" a switch instance, so
2153 // we can temporarily enforce this to ensure that any embedded case
2154 // statements are not emitted.
2155 SwitchInsn = nullptr;
2156
2157 // Okay, we can dead code eliminate everything except this case. Emit the
2158 // specified series of statements and we're good.
2159 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2160 EmitStmt(CaseStmts[i]);
2162
2163 // Now we want to restore the saved switch instance so that nested
2164 // switches continue to function properly
2165 SwitchInsn = SavedSwitchInsn;
2166
2167 return;
2168 }
2169 }
2170
2171 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2172
2173 RunCleanupsScope ConditionScope(*this);
2174
2175 if (S.getInit())
2176 EmitStmt(S.getInit());
2177
2178 if (S.getConditionVariable())
2179 EmitDecl(*S.getConditionVariable());
2180 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2181
2182 // Create basic block to hold stuff that comes after switch
2183 // statement. We also need to create a default block now so that
2184 // explicit case ranges tests can have a place to jump to on
2185 // failure.
2186 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2187 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2188 if (PGO.haveRegionCounts()) {
2189 // Walk the SwitchCase list to find how many there are.
2190 uint64_t DefaultCount = 0;
2191 unsigned NumCases = 0;
2192 for (const SwitchCase *Case = S.getSwitchCaseList();
2193 Case;
2194 Case = Case->getNextSwitchCase()) {
2195 if (isa<DefaultStmt>(Case))
2196 DefaultCount = getProfileCount(Case);
2197 NumCases += 1;
2198 }
2199 SwitchWeights = new SmallVector<uint64_t, 16>();
2200 SwitchWeights->reserve(NumCases);
2201 // The default needs to be first. We store the edge count, so we already
2202 // know the right weight.
2203 SwitchWeights->push_back(DefaultCount);
2204 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2205 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2206 // Initialize the default case.
2207 SwitchLikelihood->push_back(Stmt::LH_None);
2208 }
2209
2210 CaseRangeBlock = DefaultBlock;
2211
2212 // Clear the insertion point to indicate we are in unreachable code.
2213 Builder.ClearInsertionPoint();
2214
2215 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2216 // then reuse last ContinueBlock.
2217 JumpDest OuterContinue;
2218 if (!BreakContinueStack.empty())
2219 OuterContinue = BreakContinueStack.back().ContinueBlock;
2220
2221 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2222
2223 // Emit switch body.
2224 EmitStmt(S.getBody());
2225
2226 BreakContinueStack.pop_back();
2227
2228 // Update the default block in case explicit case range tests have
2229 // been chained on top.
2230 SwitchInsn->setDefaultDest(CaseRangeBlock);
2231
2232 // If a default was never emitted:
2233 if (!DefaultBlock->getParent()) {
2234 // If we have cleanups, emit the default block so that there's a
2235 // place to jump through the cleanups from.
2236 if (ConditionScope.requiresCleanups()) {
2237 EmitBlock(DefaultBlock);
2238
2239 // Otherwise, just forward the default block to the switch end.
2240 } else {
2241 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2242 delete DefaultBlock;
2243 }
2244 }
2245
2246 ConditionScope.ForceCleanup();
2247
2248 // Emit continuation.
2249 EmitBlock(SwitchExit.getBlock(), true);
2251
2252 // If the switch has a condition wrapped by __builtin_unpredictable,
2253 // create metadata that specifies that the switch is unpredictable.
2254 // Don't bother if not optimizing because that metadata would not be used.
2255 auto *Call = dyn_cast<CallExpr>(S.getCond());
2256 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2257 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2258 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2259 llvm::MDBuilder MDHelper(getLLVMContext());
2260 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2261 MDHelper.createUnpredictable());
2262 }
2263 }
2264
2265 if (SwitchWeights) {
2266 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2267 "switch weights do not match switch cases");
2268 // If there's only one jump destination there's no sense weighting it.
2269 if (SwitchWeights->size() > 1)
2270 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2271 createProfileWeights(*SwitchWeights));
2272 delete SwitchWeights;
2273 } else if (SwitchLikelihood) {
2274 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2275 "switch likelihoods do not match switch cases");
2276 std::optional<SmallVector<uint64_t, 16>> LHW =
2277 getLikelihoodWeights(*SwitchLikelihood);
2278 if (LHW) {
2279 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2280 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2281 createProfileWeights(*LHW));
2282 }
2283 delete SwitchLikelihood;
2284 }
2285 SwitchInsn = SavedSwitchInsn;
2286 SwitchWeights = SavedSwitchWeights;
2287 SwitchLikelihood = SavedSwitchLikelihood;
2288 CaseRangeBlock = SavedCRBlock;
2289}
2290
2291static std::string
2292SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2294 std::string Result;
2295
2296 while (*Constraint) {
2297 switch (*Constraint) {
2298 default:
2299 Result += Target.convertConstraint(Constraint);
2300 break;
2301 // Ignore these
2302 case '*':
2303 case '?':
2304 case '!':
2305 case '=': // Will see this and the following in mult-alt constraints.
2306 case '+':
2307 break;
2308 case '#': // Ignore the rest of the constraint alternative.
2309 while (Constraint[1] && Constraint[1] != ',')
2310 Constraint++;
2311 break;
2312 case '&':
2313 case '%':
2314 Result += *Constraint;
2315 while (Constraint[1] && Constraint[1] == *Constraint)
2316 Constraint++;
2317 break;
2318 case ',':
2319 Result += "|";
2320 break;
2321 case 'g':
2322 Result += "imr";
2323 break;
2324 case '[': {
2325 assert(OutCons &&
2326 "Must pass output names to constraints with a symbolic name");
2327 unsigned Index;
2328 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2329 assert(result && "Could not resolve symbolic name"); (void)result;
2330 Result += llvm::utostr(Index);
2331 break;
2332 }
2333 }
2334
2335 Constraint++;
2336 }
2337
2338 return Result;
2339}
2340
2341/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2342/// as using a particular register add that as a constraint that will be used
2343/// in this asm stmt.
2344static std::string
2345AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2347 const AsmStmt &Stmt, const bool EarlyClobber,
2348 std::string *GCCReg = nullptr) {
2349 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2350 if (!AsmDeclRef)
2351 return Constraint;
2352 const ValueDecl &Value = *AsmDeclRef->getDecl();
2353 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2354 if (!Variable)
2355 return Constraint;
2357 return Constraint;
2358 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2359 if (!Attr)
2360 return Constraint;
2361 StringRef Register = Attr->getLabel();
2362 assert(Target.isValidGCCRegisterName(Register));
2363 // We're using validateOutputConstraint here because we only care if
2364 // this is a register constraint.
2365 TargetInfo::ConstraintInfo Info(Constraint, "");
2366 if (Target.validateOutputConstraint(Info) &&
2367 !Info.allowsRegister()) {
2368 CGM.ErrorUnsupported(&Stmt, "__asm__");
2369 return Constraint;
2370 }
2371 // Canonicalize the register here before returning it.
2372 Register = Target.getNormalizedGCCRegisterName(Register);
2373 if (GCCReg != nullptr)
2374 *GCCReg = Register.str();
2375 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2376}
2377
2378std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2379 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2380 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2381 if (Info.allowsRegister() || !Info.allowsMemory()) {
2383 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2384
2385 llvm::Type *Ty = ConvertType(InputType);
2386 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2387 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2388 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2389 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2390
2391 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2392 nullptr};
2393 }
2394 }
2395
2396 Address Addr = InputValue.getAddress();
2397 ConstraintStr += '*';
2398 return {InputValue.getPointer(*this), Addr.getElementType()};
2399}
2400
2401std::pair<llvm::Value *, llvm::Type *>
2402CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2403 const Expr *InputExpr,
2404 std::string &ConstraintStr) {
2405 // If this can't be a register or memory, i.e., has to be a constant
2406 // (immediate or symbolic), try to emit it as such.
2407 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2408 if (Info.requiresImmediateConstant()) {
2409 Expr::EvalResult EVResult;
2410 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2411
2412 llvm::APSInt IntResult;
2413 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2414 getContext()))
2415 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2416 }
2417
2419 if (InputExpr->EvaluateAsInt(Result, getContext()))
2420 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2421 nullptr};
2422 }
2423
2424 if (Info.allowsRegister() || !Info.allowsMemory())
2426 return {EmitScalarExpr(InputExpr), nullptr};
2427 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2428 return {EmitScalarExpr(InputExpr), nullptr};
2429 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2430 LValue Dest = EmitLValue(InputExpr);
2431 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2432 InputExpr->getExprLoc());
2433}
2434
2435/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2436/// asm call instruction. The !srcloc MDNode contains a list of constant
2437/// integers which are the source locations of the start of each line in the
2438/// asm.
2439static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2440 CodeGenFunction &CGF) {
2442 // Add the location of the first line to the MDNode.
2443 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2444 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2445 StringRef StrVal = Str->getString();
2446 if (!StrVal.empty()) {
2448 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2449 unsigned StartToken = 0;
2450 unsigned ByteOffset = 0;
2451
2452 // Add the location of the start of each subsequent line of the asm to the
2453 // MDNode.
2454 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2455 if (StrVal[i] != '\n') continue;
2456 SourceLocation LineLoc = Str->getLocationOfByte(
2457 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2458 Locs.push_back(llvm::ConstantAsMetadata::get(
2459 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2460 }
2461 }
2462
2463 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2464}
2465
2466static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2467 bool HasUnwindClobber, bool ReadOnly,
2468 bool ReadNone, bool NoMerge, const AsmStmt &S,
2469 const std::vector<llvm::Type *> &ResultRegTypes,
2470 const std::vector<llvm::Type *> &ArgElemTypes,
2471 CodeGenFunction &CGF,
2472 std::vector<llvm::Value *> &RegResults) {
2473 if (!HasUnwindClobber)
2474 Result.addFnAttr(llvm::Attribute::NoUnwind);
2475
2476 if (NoMerge)
2477 Result.addFnAttr(llvm::Attribute::NoMerge);
2478 // Attach readnone and readonly attributes.
2479 if (!HasSideEffect) {
2480 if (ReadNone)
2481 Result.setDoesNotAccessMemory();
2482 else if (ReadOnly)
2483 Result.setOnlyReadsMemory();
2484 }
2485
2486 // Add elementtype attribute for indirect constraints.
2487 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2488 if (Pair.value()) {
2489 auto Attr = llvm::Attribute::get(
2490 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2491 Result.addParamAttr(Pair.index(), Attr);
2492 }
2493 }
2494
2495 // Slap the source location of the inline asm into a !srcloc metadata on the
2496 // call.
2497 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2498 Result.setMetadata("srcloc",
2499 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2500 else {
2501 // At least put the line number on MS inline asm blobs.
2502 llvm::Constant *Loc =
2503 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2504 Result.setMetadata("srcloc",
2505 llvm::MDNode::get(CGF.getLLVMContext(),
2506 llvm::ConstantAsMetadata::get(Loc)));
2507 }
2508
2510 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2511 // convergent (meaning, they may call an intrinsically convergent op, such
2512 // as bar.sync, and so can't have certain optimizations applied around
2513 // them).
2514 Result.addFnAttr(llvm::Attribute::Convergent);
2515 // Extract all of the register value results from the asm.
2516 if (ResultRegTypes.size() == 1) {
2517 RegResults.push_back(&Result);
2518 } else {
2519 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2520 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2521 RegResults.push_back(Tmp);
2522 }
2523 }
2524}
2525
2526static void
2528 const llvm::ArrayRef<llvm::Value *> RegResults,
2529 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2530 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2531 const llvm::ArrayRef<LValue> ResultRegDests,
2532 const llvm::ArrayRef<QualType> ResultRegQualTys,
2533 const llvm::BitVector &ResultTypeRequiresCast,
2534 const llvm::BitVector &ResultRegIsFlagReg) {
2536 CodeGenModule &CGM = CGF.CGM;
2537 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2538
2539 assert(RegResults.size() == ResultRegTypes.size());
2540 assert(RegResults.size() == ResultTruncRegTypes.size());
2541 assert(RegResults.size() == ResultRegDests.size());
2542 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2543 // in which case its size may grow.
2544 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2545 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2546
2547 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2548 llvm::Value *Tmp = RegResults[i];
2549 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2550
2551 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2552 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2553 // value.
2554 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2555 llvm::Value *IsBooleanValue =
2556 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2557 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2558 Builder.CreateCall(FnAssume, IsBooleanValue);
2559 }
2560
2561 // If the result type of the LLVM IR asm doesn't match the result type of
2562 // the expression, do the conversion.
2563 if (ResultRegTypes[i] != TruncTy) {
2564
2565 // Truncate the integer result to the right size, note that TruncTy can be
2566 // a pointer.
2567 if (TruncTy->isFloatingPointTy())
2568 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2569 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2570 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2571 Tmp = Builder.CreateTrunc(
2572 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2573 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2574 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2575 uint64_t TmpSize =
2576 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2577 Tmp = Builder.CreatePtrToInt(
2578 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2579 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2580 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2581 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2582 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2583 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2584 }
2585 }
2586
2587 LValue Dest = ResultRegDests[i];
2588 // ResultTypeRequiresCast elements correspond to the first
2589 // ResultTypeRequiresCast.size() elements of RegResults.
2590 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2591 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2592 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2593 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2594 Builder.CreateStore(Tmp, A);
2595 continue;
2596 }
2597
2598 QualType Ty =
2599 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2600 if (Ty.isNull()) {
2601 const Expr *OutExpr = S.getOutputExpr(i);
2602 CGM.getDiags().Report(OutExpr->getExprLoc(),
2603 diag::err_store_value_to_reg);
2604 return;
2605 }
2606 Dest = CGF.MakeAddrLValue(A, Ty);
2607 }
2608 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2609 }
2610}
2611
2613 const AsmStmt &S) {
2614 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2615
2616 StringRef Asm;
2617 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2618 Asm = GCCAsm->getAsmString()->getString();
2619
2620 auto &Ctx = CGF->CGM.getLLVMContext();
2621
2622 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2623 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2624 {StrTy->getType()}, false);
2625 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2626
2627 CGF->Builder.CreateCall(UBF, {StrTy});
2628}
2629
2631 // Pop all cleanup blocks at the end of the asm statement.
2632 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2633
2634 // Assemble the final asm string.
2635 std::string AsmString = S.generateAsmString(getContext());
2636
2637 // Get all the output and input constraints together.
2638 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2639 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2640
2641 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2642 bool IsValidTargetAsm = true;
2643 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2644 StringRef Name;
2645 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2646 Name = GAS->getOutputName(i);
2647 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2648 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2649 if (IsHipStdPar && !IsValid)
2650 IsValidTargetAsm = false;
2651 else
2652 assert(IsValid && "Failed to parse output constraint");
2653 OutputConstraintInfos.push_back(Info);
2654 }
2655
2656 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2657 StringRef Name;
2658 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2659 Name = GAS->getInputName(i);
2660 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2661 bool IsValid =
2662 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2663 if (IsHipStdPar && !IsValid)
2664 IsValidTargetAsm = false;
2665 else
2666 assert(IsValid && "Failed to parse input constraint");
2667 InputConstraintInfos.push_back(Info);
2668 }
2669
2670 if (!IsValidTargetAsm)
2671 return EmitHipStdParUnsupportedAsm(this, S);
2672
2673 std::string Constraints;
2674
2675 std::vector<LValue> ResultRegDests;
2676 std::vector<QualType> ResultRegQualTys;
2677 std::vector<llvm::Type *> ResultRegTypes;
2678 std::vector<llvm::Type *> ResultTruncRegTypes;
2679 std::vector<llvm::Type *> ArgTypes;
2680 std::vector<llvm::Type *> ArgElemTypes;
2681 std::vector<llvm::Value*> Args;
2682 llvm::BitVector ResultTypeRequiresCast;
2683 llvm::BitVector ResultRegIsFlagReg;
2684
2685 // Keep track of inout constraints.
2686 std::string InOutConstraints;
2687 std::vector<llvm::Value*> InOutArgs;
2688 std::vector<llvm::Type*> InOutArgTypes;
2689 std::vector<llvm::Type*> InOutArgElemTypes;
2690
2691 // Keep track of out constraints for tied input operand.
2692 std::vector<std::string> OutputConstraints;
2693
2694 // Keep track of defined physregs.
2695 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2696
2697 // An inline asm can be marked readonly if it meets the following conditions:
2698 // - it doesn't have any sideeffects
2699 // - it doesn't clobber memory
2700 // - it doesn't return a value by-reference
2701 // It can be marked readnone if it doesn't have any input memory constraints
2702 // in addition to meeting the conditions listed above.
2703 bool ReadOnly = true, ReadNone = true;
2704
2705 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2706 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2707
2708 // Simplify the output constraint.
2709 std::string OutputConstraint(S.getOutputConstraint(i));
2710 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2711 getTarget(), &OutputConstraintInfos);
2712
2713 const Expr *OutExpr = S.getOutputExpr(i);
2714 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2715
2716 std::string GCCReg;
2717 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2718 getTarget(), CGM, S,
2719 Info.earlyClobber(),
2720 &GCCReg);
2721 // Give an error on multiple outputs to same physreg.
2722 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2723 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2724
2725 OutputConstraints.push_back(OutputConstraint);
2726 LValue Dest = EmitLValue(OutExpr);
2727 if (!Constraints.empty())
2728 Constraints += ',';
2729
2730 // If this is a register output, then make the inline asm return it
2731 // by-value. If this is a memory result, return the value by-reference.
2732 QualType QTy = OutExpr->getType();
2733 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2735 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2736
2737 Constraints += "=" + OutputConstraint;
2738 ResultRegQualTys.push_back(QTy);
2739 ResultRegDests.push_back(Dest);
2740
2741 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2742 ResultRegIsFlagReg.push_back(IsFlagReg);
2743
2744 llvm::Type *Ty = ConvertTypeForMem(QTy);
2745 const bool RequiresCast = Info.allowsRegister() &&
2747 Ty->isAggregateType());
2748
2749 ResultTruncRegTypes.push_back(Ty);
2750 ResultTypeRequiresCast.push_back(RequiresCast);
2751
2752 if (RequiresCast) {
2753 unsigned Size = getContext().getTypeSize(QTy);
2754 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2755 }
2756 ResultRegTypes.push_back(Ty);
2757 // If this output is tied to an input, and if the input is larger, then
2758 // we need to set the actual result type of the inline asm node to be the
2759 // same as the input type.
2760 if (Info.hasMatchingInput()) {
2761 unsigned InputNo;
2762 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2763 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2764 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2765 break;
2766 }
2767 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2768
2769 QualType InputTy = S.getInputExpr(InputNo)->getType();
2770 QualType OutputType = OutExpr->getType();
2771
2772 uint64_t InputSize = getContext().getTypeSize(InputTy);
2773 if (getContext().getTypeSize(OutputType) < InputSize) {
2774 // Form the asm to return the value as a larger integer or fp type.
2775 ResultRegTypes.back() = ConvertType(InputTy);
2776 }
2777 }
2778 if (llvm::Type* AdjTy =
2779 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2780 ResultRegTypes.back()))
2781 ResultRegTypes.back() = AdjTy;
2782 else {
2783 CGM.getDiags().Report(S.getAsmLoc(),
2784 diag::err_asm_invalid_type_in_input)
2785 << OutExpr->getType() << OutputConstraint;
2786 }
2787
2788 // Update largest vector width for any vector types.
2789 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2790 LargestVectorWidth =
2791 std::max((uint64_t)LargestVectorWidth,
2792 VT->getPrimitiveSizeInBits().getKnownMinValue());
2793 } else {
2794 Address DestAddr = Dest.getAddress();
2795 // Matrix types in memory are represented by arrays, but accessed through
2796 // vector pointers, with the alignment specified on the access operation.
2797 // For inline assembly, update pointer arguments to use vector pointers.
2798 // Otherwise there will be a mis-match if the matrix is also an
2799 // input-argument which is represented as vector.
2800 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2801 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2802
2803 ArgTypes.push_back(DestAddr.getType());
2804 ArgElemTypes.push_back(DestAddr.getElementType());
2805 Args.push_back(DestAddr.emitRawPointer(*this));
2806 Constraints += "=*";
2807 Constraints += OutputConstraint;
2808 ReadOnly = ReadNone = false;
2809 }
2810
2811 if (Info.isReadWrite()) {
2812 InOutConstraints += ',';
2813
2814 const Expr *InputExpr = S.getOutputExpr(i);
2815 llvm::Value *Arg;
2816 llvm::Type *ArgElemType;
2817 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2818 Info, Dest, InputExpr->getType(), InOutConstraints,
2819 InputExpr->getExprLoc());
2820
2821 if (llvm::Type* AdjTy =
2822 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2823 Arg->getType()))
2824 Arg = Builder.CreateBitCast(Arg, AdjTy);
2825
2826 // Update largest vector width for any vector types.
2827 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2828 LargestVectorWidth =
2829 std::max((uint64_t)LargestVectorWidth,
2830 VT->getPrimitiveSizeInBits().getKnownMinValue());
2831 // Only tie earlyclobber physregs.
2832 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2833 InOutConstraints += llvm::utostr(i);
2834 else
2835 InOutConstraints += OutputConstraint;
2836
2837 InOutArgTypes.push_back(Arg->getType());
2838 InOutArgElemTypes.push_back(ArgElemType);
2839 InOutArgs.push_back(Arg);
2840 }
2841 }
2842
2843 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2844 // to the return value slot. Only do this when returning in registers.
2845 if (isa<MSAsmStmt>(&S)) {
2846 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2847 if (RetAI.isDirect() || RetAI.isExtend()) {
2848 // Make a fake lvalue for the return value slot.
2851 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2852 ResultRegDests, AsmString, S.getNumOutputs());
2853 SawAsmBlock = true;
2854 }
2855 }
2856
2857 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2858 const Expr *InputExpr = S.getInputExpr(i);
2859
2860 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2861
2862 if (Info.allowsMemory())
2863 ReadNone = false;
2864
2865 if (!Constraints.empty())
2866 Constraints += ',';
2867
2868 // Simplify the input constraint.
2869 std::string InputConstraint(S.getInputConstraint(i));
2870 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2871 &OutputConstraintInfos);
2872
2873 InputConstraint = AddVariableConstraints(
2874 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2875 getTarget(), CGM, S, false /* No EarlyClobber */);
2876
2877 std::string ReplaceConstraint (InputConstraint);
2878 llvm::Value *Arg;
2879 llvm::Type *ArgElemType;
2880 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2881
2882 // If this input argument is tied to a larger output result, extend the
2883 // input to be the same size as the output. The LLVM backend wants to see
2884 // the input and output of a matching constraint be the same size. Note
2885 // that GCC does not define what the top bits are here. We use zext because
2886 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2887 if (Info.hasTiedOperand()) {
2888 unsigned Output = Info.getTiedOperand();
2889 QualType OutputType = S.getOutputExpr(Output)->getType();
2890 QualType InputTy = InputExpr->getType();
2891
2892 if (getContext().getTypeSize(OutputType) >
2893 getContext().getTypeSize(InputTy)) {
2894 // Use ptrtoint as appropriate so that we can do our extension.
2895 if (isa<llvm::PointerType>(Arg->getType()))
2896 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2897 llvm::Type *OutputTy = ConvertType(OutputType);
2898 if (isa<llvm::IntegerType>(OutputTy))
2899 Arg = Builder.CreateZExt(Arg, OutputTy);
2900 else if (isa<llvm::PointerType>(OutputTy))
2901 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2902 else if (OutputTy->isFloatingPointTy())
2903 Arg = Builder.CreateFPExt(Arg, OutputTy);
2904 }
2905 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2906 ReplaceConstraint = OutputConstraints[Output];
2907 }
2908 if (llvm::Type* AdjTy =
2909 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2910 Arg->getType()))
2911 Arg = Builder.CreateBitCast(Arg, AdjTy);
2912 else
2913 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2914 << InputExpr->getType() << InputConstraint;
2915
2916 // Update largest vector width for any vector types.
2917 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2918 LargestVectorWidth =
2919 std::max((uint64_t)LargestVectorWidth,
2920 VT->getPrimitiveSizeInBits().getKnownMinValue());
2921
2922 ArgTypes.push_back(Arg->getType());
2923 ArgElemTypes.push_back(ArgElemType);
2924 Args.push_back(Arg);
2925 Constraints += InputConstraint;
2926 }
2927
2928 // Append the "input" part of inout constraints.
2929 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2930 ArgTypes.push_back(InOutArgTypes[i]);
2931 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2932 Args.push_back(InOutArgs[i]);
2933 }
2934 Constraints += InOutConstraints;
2935
2936 // Labels
2938 llvm::BasicBlock *Fallthrough = nullptr;
2939 bool IsGCCAsmGoto = false;
2940 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2941 IsGCCAsmGoto = GS->isAsmGoto();
2942 if (IsGCCAsmGoto) {
2943 for (const auto *E : GS->labels()) {
2944 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2945 Transfer.push_back(Dest.getBlock());
2946 if (!Constraints.empty())
2947 Constraints += ',';
2948 Constraints += "!i";
2949 }
2950 Fallthrough = createBasicBlock("asm.fallthrough");
2951 }
2952 }
2953
2954 bool HasUnwindClobber = false;
2955
2956 // Clobbers
2957 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2958 StringRef Clobber = S.getClobber(i);
2959
2960 if (Clobber == "memory")
2961 ReadOnly = ReadNone = false;
2962 else if (Clobber == "unwind") {
2963 HasUnwindClobber = true;
2964 continue;
2965 } else if (Clobber != "cc") {
2966 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2967 if (CGM.getCodeGenOpts().StackClashProtector &&
2968 getTarget().isSPRegName(Clobber)) {
2969 CGM.getDiags().Report(S.getAsmLoc(),
2970 diag::warn_stack_clash_protection_inline_asm);
2971 }
2972 }
2973
2974 if (isa<MSAsmStmt>(&S)) {
2975 if (Clobber == "eax" || Clobber == "edx") {
2976 if (Constraints.find("=&A") != std::string::npos)
2977 continue;
2978 std::string::size_type position1 =
2979 Constraints.find("={" + Clobber.str() + "}");
2980 if (position1 != std::string::npos) {
2981 Constraints.insert(position1 + 1, "&");
2982 continue;
2983 }
2984 std::string::size_type position2 = Constraints.find("=A");
2985 if (position2 != std::string::npos) {
2986 Constraints.insert(position2 + 1, "&");
2987 continue;
2988 }
2989 }
2990 }
2991 if (!Constraints.empty())
2992 Constraints += ',';
2993
2994 Constraints += "~{";
2995 Constraints += Clobber;
2996 Constraints += '}';
2997 }
2998
2999 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3000 "unwind clobber can't be used with asm goto");
3001
3002 // Add machine specific clobbers
3003 std::string_view MachineClobbers = getTarget().getClobbers();
3004 if (!MachineClobbers.empty()) {
3005 if (!Constraints.empty())
3006 Constraints += ',';
3007 Constraints += MachineClobbers;
3008 }
3009
3010 llvm::Type *ResultType;
3011 if (ResultRegTypes.empty())
3012 ResultType = VoidTy;
3013 else if (ResultRegTypes.size() == 1)
3014 ResultType = ResultRegTypes[0];
3015 else
3016 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3017
3018 llvm::FunctionType *FTy =
3019 llvm::FunctionType::get(ResultType, ArgTypes, false);
3020
3021 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3022
3023 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3024 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3025 ? llvm::InlineAsm::AD_ATT
3026 : llvm::InlineAsm::AD_Intel;
3027 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3028 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3029
3030 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3031 FTy, AsmString, Constraints, HasSideEffect,
3032 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3033 std::vector<llvm::Value*> RegResults;
3034 llvm::CallBrInst *CBR;
3035 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3036 CBRRegResults;
3037 if (IsGCCAsmGoto) {
3038 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3039 EmitBlock(Fallthrough);
3040 UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
3041 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3042 *this, RegResults);
3043 // Because we are emitting code top to bottom, we don't have enough
3044 // information at this point to know precisely whether we have a critical
3045 // edge. If we have outputs, split all indirect destinations.
3046 if (!RegResults.empty()) {
3047 unsigned i = 0;
3048 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3049 llvm::Twine SynthName = Dest->getName() + ".split";
3050 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3051 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3052 Builder.SetInsertPoint(SynthBB);
3053
3054 if (ResultRegTypes.size() == 1) {
3055 CBRRegResults[SynthBB].push_back(CBR);
3056 } else {
3057 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3058 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3059 CBRRegResults[SynthBB].push_back(Tmp);
3060 }
3061 }
3062
3063 EmitBranch(Dest);
3064 EmitBlock(SynthBB);
3065 CBR->setIndirectDest(i++, SynthBB);
3066 }
3067 }
3068 } else if (HasUnwindClobber) {
3069 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3070 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
3071 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3072 *this, RegResults);
3073 } else {
3074 llvm::CallInst *Result =
3075 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3076 UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
3077 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3078 *this, RegResults);
3079 }
3080
3081 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3082 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3083 ResultRegIsFlagReg);
3084
3085 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3086 // different insertion point; one for each indirect destination and with
3087 // CBRRegResults rather than RegResults.
3088 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3089 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3090 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3091 Builder.SetInsertPoint(Succ, --(Succ->end()));
3092 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3093 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3094 ResultTypeRequiresCast, ResultRegIsFlagReg);
3095 }
3096 }
3097}
3098
3100 const RecordDecl *RD = S.getCapturedRecordDecl();
3101 QualType RecordTy = getContext().getRecordType(RD);
3102
3103 // Initialize the captured struct.
3104 LValue SlotLV =
3105 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3106
3107 RecordDecl::field_iterator CurField = RD->field_begin();
3108 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3109 E = S.capture_init_end();
3110 I != E; ++I, ++CurField) {
3111 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3112 if (CurField->hasCapturedVLAType()) {
3113 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3114 } else {
3115 EmitInitializerForField(*CurField, LV, *I);
3116 }
3117 }
3118
3119 return SlotLV;
3120}
3121
3122/// Generate an outlined function for the body of a CapturedStmt, store any
3123/// captured variables into the captured struct, and call the outlined function.
3124llvm::Function *
3126 LValue CapStruct = InitCapturedStruct(S);
3127
3128 // Emit the CapturedDecl
3129 CodeGenFunction CGF(CGM, true);
3130 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3131 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3132 delete CGF.CapturedStmtInfo;
3133
3134 // Emit call to the helper function.
3135 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3136
3137 return F;
3138}
3139
3141 LValue CapStruct = InitCapturedStruct(S);
3142 return CapStruct.getAddress();
3143}
3144
3145/// Creates the outlined function for a CapturedStmt.
3146llvm::Function *
3148 assert(CapturedStmtInfo &&
3149 "CapturedStmtInfo should be set when generating the captured function");
3150 const CapturedDecl *CD = S.getCapturedDecl();
3151 const RecordDecl *RD = S.getCapturedRecordDecl();
3152 SourceLocation Loc = S.getBeginLoc();
3153 assert(CD->hasBody() && "missing CapturedDecl body");
3154
3155 // Build the argument list.
3156 ASTContext &Ctx = CGM.getContext();
3157 FunctionArgList Args;
3158 Args.append(CD->param_begin(), CD->param_end());
3159
3160 // Create the function declaration.
3161 const CGFunctionInfo &FuncInfo =
3163 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3164
3165 llvm::Function *F =
3166 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3168 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3169 if (CD->isNothrow())
3170 F->addFnAttr(llvm::Attribute::NoUnwind);
3171
3172 // Generate the function.
3173 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3174 CD->getBody()->getBeginLoc());
3175 // Set the context parameter in CapturedStmtInfo.
3176 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3178
3179 // Initialize variable-length arrays.
3182 for (auto *FD : RD->fields()) {
3183 if (FD->hasCapturedVLAType()) {
3184 auto *ExprArg =
3185 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3186 .getScalarVal();
3187 auto VAT = FD->getCapturedVLAType();
3188 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3189 }
3190 }
3191
3192 // If 'this' is captured, load it into CXXThisValue.
3195 LValue ThisLValue = EmitLValueForField(Base, FD);
3196 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3197 }
3198
3199 PGO.assignRegionCounters(GlobalDecl(CD), F);
3200 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3202
3203 return F;
3204}
3205
3206namespace {
3207// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3208// std::nullptr otherwise.
3209llvm::IntrinsicInst *getConvergenceToken(llvm::BasicBlock *BB) {
3210 for (auto &I : *BB) {
3211 auto *II = dyn_cast<llvm::IntrinsicInst>(&I);
3212 if (II && llvm::isConvergenceControlIntrinsic(II->getIntrinsicID()))
3213 return II;
3214 }
3215 return nullptr;
3216}
3217
3218} // namespace
3219
3220llvm::CallBase *
3221CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input,
3222 llvm::Value *ParentToken) {
3223 llvm::Value *bundleArgs[] = {ParentToken};
3224 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3225 auto Output = llvm::CallBase::addOperandBundle(
3226 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input);
3227 Input->replaceAllUsesWith(Output);
3228 Input->eraseFromParent();
3229 return Output;
3230}
3231
3232llvm::IntrinsicInst *
3233CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB,
3234 llvm::Value *ParentToken) {
3235 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3236 if (BB->empty())
3237 Builder.SetInsertPoint(BB);
3238 else
3239 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3240
3241 llvm::CallBase *CB = Builder.CreateIntrinsic(
3242 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3243 Builder.restoreIP(IP);
3244
3245 llvm::CallBase *I = addConvergenceControlToken(CB, ParentToken);
3246 return cast<llvm::IntrinsicInst>(I);
3247}
3248
3249llvm::IntrinsicInst *
3250CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3251 llvm::BasicBlock *BB = &F->getEntryBlock();
3252 llvm::IntrinsicInst *Token = getConvergenceToken(BB);
3253 if (Token)
3254 return Token;
3255
3256 // Adding a convergence token requires the function to be marked as
3257 // convergent.
3258 F->setConvergent();
3259
3260 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3261 Builder.SetInsertPoint(&BB->front());
3262 llvm::CallBase *I = Builder.CreateIntrinsic(
3263 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3264 assert(isa<llvm::IntrinsicInst>(I));
3265 Builder.restoreIP(IP);
3266
3267 return cast<llvm::IntrinsicInst>(I);
3268}
#define V(N, I)
Definition: ASTContext.h:3338
#define SM(sm)
Definition: Cuda.cpp:83
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2345
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2016
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2612
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2070
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2439
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2292
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1441
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2466
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1861
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2527
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:971
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1860
@ CSFC_Failure
Definition: CGStmt.cpp:1860
@ CSFC_Success
Definition: CGStmt.cpp:1860
@ CSFC_FallThrough
Definition: CGStmt.cpp:1860
const Decl * D
Expr * E
llvm::MachO::Target Target
Definition: MachO.h:51
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:757
VarDecl * Variable
Definition: SemaObjC.cpp:756
SourceLocation Loc
Definition: SemaObjC.cpp:758
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:954
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:186
SourceManager & getSourceManager()
Definition: ASTContext.h:720
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2391
CanQualType VoidTy
Definition: ASTContext.h:1118
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3105
Attr - This represents one attribute.
Definition: Attr.h:42
Represents an attribute applied to a statement.
Definition: Stmt.h:2085
BreakStmt - This represents a break.
Definition: Stmt.h:2985
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2830
Expr * getCallee()
Definition: Expr.h:2980
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4666
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4728
bool isNothrow() const
Definition: Decl.cpp:5431
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4745
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4743
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5428
This captures a statement into a function.
Definition: Stmt.h:3762
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3926
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1422
CaseStmt - Represent a case statement.
Definition: Stmt.h:1806
Stmt * getSubStmt()
Definition: Stmt.h:1923
Expr * getLHS()
Definition: Stmt.h:1893
Expr * getRHS()
Definition: Stmt.h:1905
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:909
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:163
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:135
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:107
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:690
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
SmallVector< llvm::IntrinsicInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1632
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:680
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:368
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:834
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:186
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:180
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1606
Stmt *const * const_body_iterator
Definition: Stmt.h:1678
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
ContinueStmt - This represents a continue.
Definition: Stmt.h:2955
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2359
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
ValueDecl * getDecl()
Definition: Expr.h:1333
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1497
T * getAttr() const
Definition: DeclBase.h:579
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1068
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1072
SourceLocation getLocation() const
Definition: DeclBase.h:445
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2730
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3097
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3066
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3567
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3030
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2786
const Expr * getSubExpr() const
Definition: Expr.h:1057
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4278
CallingConv getCallConv() const
Definition: Type.h:4611
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3264
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2867
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2143
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2906
Represents the declaration of a label.
Definition: Decl.h:499
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2036
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:461
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:646
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:941
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1008
QualType getCanonicalType() const
Definition: Type.h:7795
The collection of all-type qualifiers we support.
Definition: Type.h:319
Represents a struct/union/class.
Definition: Decl.h:4141
field_range fields() const
Definition: Decl.h:4347
field_iterator field_begin() const
Definition: Decl.cpp:5057
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3024
Expr * getRetValue()
Definition: Stmt.h:3055
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1358
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1301
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1302
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1303
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1305
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:163
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:155
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1959
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1329
StringRef getString() const
Definition: Expr.h:1855
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1779
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2393
Exposes information about the current target.
Definition: TargetInfo.h:218
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:822
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:684
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:725
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:8295
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8583
bool isReferenceType() const
Definition: Type.h:8010
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:705
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:667
Represents a variable declaration or definition.
Definition: Decl.h:879
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1116
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2589
Defines the clang::TargetInfo interface.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
Definition: Interp.h:275
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:606
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:254
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:291
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1125
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1132