clang 19.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPForDirectiveClass:
226 EmitOMPForDirective(cast<OMPForDirective>(*S));
227 break;
228 case Stmt::OMPForSimdDirectiveClass:
229 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
230 break;
231 case Stmt::OMPSectionsDirectiveClass:
232 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
233 break;
234 case Stmt::OMPSectionDirectiveClass:
235 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
236 break;
237 case Stmt::OMPSingleDirectiveClass:
238 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
239 break;
240 case Stmt::OMPMasterDirectiveClass:
241 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
242 break;
243 case Stmt::OMPCriticalDirectiveClass:
244 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
245 break;
246 case Stmt::OMPParallelForDirectiveClass:
247 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
248 break;
249 case Stmt::OMPParallelForSimdDirectiveClass:
250 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
251 break;
252 case Stmt::OMPParallelMasterDirectiveClass:
253 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
254 break;
255 case Stmt::OMPParallelSectionsDirectiveClass:
256 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
257 break;
258 case Stmt::OMPTaskDirectiveClass:
259 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
260 break;
261 case Stmt::OMPTaskyieldDirectiveClass:
262 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
263 break;
264 case Stmt::OMPErrorDirectiveClass:
265 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
266 break;
267 case Stmt::OMPBarrierDirectiveClass:
268 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
269 break;
270 case Stmt::OMPTaskwaitDirectiveClass:
271 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
272 break;
273 case Stmt::OMPTaskgroupDirectiveClass:
274 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
275 break;
276 case Stmt::OMPFlushDirectiveClass:
277 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
278 break;
279 case Stmt::OMPDepobjDirectiveClass:
280 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
281 break;
282 case Stmt::OMPScanDirectiveClass:
283 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
284 break;
285 case Stmt::OMPOrderedDirectiveClass:
286 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
287 break;
288 case Stmt::OMPAtomicDirectiveClass:
289 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
290 break;
291 case Stmt::OMPTargetDirectiveClass:
292 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
293 break;
294 case Stmt::OMPTeamsDirectiveClass:
295 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
296 break;
297 case Stmt::OMPCancellationPointDirectiveClass:
298 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
299 break;
300 case Stmt::OMPCancelDirectiveClass:
301 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
302 break;
303 case Stmt::OMPTargetDataDirectiveClass:
304 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
305 break;
306 case Stmt::OMPTargetEnterDataDirectiveClass:
307 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
308 break;
309 case Stmt::OMPTargetExitDataDirectiveClass:
310 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetParallelDirectiveClass:
313 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
314 break;
315 case Stmt::OMPTargetParallelForDirectiveClass:
316 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
317 break;
318 case Stmt::OMPTaskLoopDirectiveClass:
319 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
320 break;
321 case Stmt::OMPTaskLoopSimdDirectiveClass:
322 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
323 break;
324 case Stmt::OMPMasterTaskLoopDirectiveClass:
325 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPMaskedTaskLoopDirectiveClass:
328 llvm_unreachable("masked taskloop directive not supported yet.");
329 break;
330 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
332 cast<OMPMasterTaskLoopSimdDirective>(*S));
333 break;
334 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
335 llvm_unreachable("masked taskloop simd directive not supported yet.");
336 break;
337 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
339 cast<OMPParallelMasterTaskLoopDirective>(*S));
340 break;
341 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
342 llvm_unreachable("parallel masked taskloop directive not supported yet.");
343 break;
344 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
346 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
347 break;
348 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
349 llvm_unreachable(
350 "parallel masked taskloop simd directive not supported yet.");
351 break;
352 case Stmt::OMPDistributeDirectiveClass:
353 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
354 break;
355 case Stmt::OMPTargetUpdateDirectiveClass:
356 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
357 break;
358 case Stmt::OMPDistributeParallelForDirectiveClass:
360 cast<OMPDistributeParallelForDirective>(*S));
361 break;
362 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
364 cast<OMPDistributeParallelForSimdDirective>(*S));
365 break;
366 case Stmt::OMPDistributeSimdDirectiveClass:
367 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
368 break;
369 case Stmt::OMPTargetParallelForSimdDirectiveClass:
371 cast<OMPTargetParallelForSimdDirective>(*S));
372 break;
373 case Stmt::OMPTargetSimdDirectiveClass:
374 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
375 break;
376 case Stmt::OMPTeamsDistributeDirectiveClass:
377 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
378 break;
379 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
381 cast<OMPTeamsDistributeSimdDirective>(*S));
382 break;
383 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
385 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
386 break;
387 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
389 cast<OMPTeamsDistributeParallelForDirective>(*S));
390 break;
391 case Stmt::OMPTargetTeamsDirectiveClass:
392 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
393 break;
394 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
396 cast<OMPTargetTeamsDistributeDirective>(*S));
397 break;
398 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
400 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
401 break;
402 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
404 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
405 break;
406 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
408 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
409 break;
410 case Stmt::OMPInteropDirectiveClass:
411 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
412 break;
413 case Stmt::OMPDispatchDirectiveClass:
414 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
415 break;
416 case Stmt::OMPScopeDirectiveClass:
417 llvm_unreachable("scope not supported with FE outlining");
418 case Stmt::OMPMaskedDirectiveClass:
419 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
420 break;
421 case Stmt::OMPGenericLoopDirectiveClass:
422 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
423 break;
424 case Stmt::OMPTeamsGenericLoopDirectiveClass:
425 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
426 break;
427 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
429 cast<OMPTargetTeamsGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPParallelGenericLoopDirectiveClass:
433 cast<OMPParallelGenericLoopDirective>(*S));
434 break;
435 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
437 cast<OMPTargetParallelGenericLoopDirective>(*S));
438 break;
439 case Stmt::OMPParallelMaskedDirectiveClass:
440 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
441 break;
442 case Stmt::OpenACCComputeConstructClass:
443 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
444 break;
445 }
446}
447
450 switch (S->getStmtClass()) {
451 default:
452 return false;
453 case Stmt::NullStmtClass:
454 break;
455 case Stmt::CompoundStmtClass:
456 EmitCompoundStmt(cast<CompoundStmt>(*S));
457 break;
458 case Stmt::DeclStmtClass:
459 EmitDeclStmt(cast<DeclStmt>(*S));
460 break;
461 case Stmt::LabelStmtClass:
462 EmitLabelStmt(cast<LabelStmt>(*S));
463 break;
464 case Stmt::AttributedStmtClass:
465 EmitAttributedStmt(cast<AttributedStmt>(*S));
466 break;
467 case Stmt::GotoStmtClass:
468 EmitGotoStmt(cast<GotoStmt>(*S));
469 break;
470 case Stmt::BreakStmtClass:
471 EmitBreakStmt(cast<BreakStmt>(*S));
472 break;
473 case Stmt::ContinueStmtClass:
474 EmitContinueStmt(cast<ContinueStmt>(*S));
475 break;
476 case Stmt::DefaultStmtClass:
477 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
478 break;
479 case Stmt::CaseStmtClass:
480 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
481 break;
482 case Stmt::SEHLeaveStmtClass:
483 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
484 break;
485 }
486 return true;
487}
488
489/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
490/// this captures the expression result of the last sub-statement and returns it
491/// (for use by the statement expression extension).
493 AggValueSlot AggSlot) {
494 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
495 "LLVM IR generation of compound statement ('{}')");
496
497 // Keep track of the current cleanup stack depth, including debug scopes.
498 LexicalScope Scope(*this, S.getSourceRange());
499
500 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
501}
502
505 bool GetLast,
506 AggValueSlot AggSlot) {
507
508 const Stmt *ExprResult = S.getStmtExprResult();
509 assert((!GetLast || (GetLast && ExprResult)) &&
510 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
511
512 Address RetAlloca = Address::invalid();
513
514 for (auto *CurStmt : S.body()) {
515 if (GetLast && ExprResult == CurStmt) {
516 // We have to special case labels here. They are statements, but when put
517 // at the end of a statement expression, they yield the value of their
518 // subexpression. Handle this by walking through all labels we encounter,
519 // emitting them before we evaluate the subexpr.
520 // Similar issues arise for attributed statements.
521 while (!isa<Expr>(ExprResult)) {
522 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
523 EmitLabel(LS->getDecl());
524 ExprResult = LS->getSubStmt();
525 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
526 // FIXME: Update this if we ever have attributes that affect the
527 // semantics of an expression.
528 ExprResult = AS->getSubStmt();
529 } else {
530 llvm_unreachable("unknown value statement");
531 }
532 }
533
535
536 const Expr *E = cast<Expr>(ExprResult);
537 QualType ExprTy = E->getType();
538 if (hasAggregateEvaluationKind(ExprTy)) {
539 EmitAggExpr(E, AggSlot);
540 } else {
541 // We can't return an RValue here because there might be cleanups at
542 // the end of the StmtExpr. Because of that, we have to emit the result
543 // here into a temporary alloca.
544 RetAlloca = CreateMemTemp(ExprTy);
545 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
546 /*IsInit*/ false);
547 }
548 } else {
549 EmitStmt(CurStmt);
550 }
551 }
552
553 return RetAlloca;
554}
555
556void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
557 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
558
559 // If there is a cleanup stack, then we it isn't worth trying to
560 // simplify this block (we would need to remove it from the scope map
561 // and cleanup entry).
562 if (!EHStack.empty())
563 return;
564
565 // Can only simplify direct branches.
566 if (!BI || !BI->isUnconditional())
567 return;
568
569 // Can only simplify empty blocks.
570 if (BI->getIterator() != BB->begin())
571 return;
572
573 BB->replaceAllUsesWith(BI->getSuccessor(0));
574 BI->eraseFromParent();
575 BB->eraseFromParent();
576}
577
578void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
579 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
580
581 // Fall out of the current block (if necessary).
582 EmitBranch(BB);
583
584 if (IsFinished && BB->use_empty()) {
585 delete BB;
586 return;
587 }
588
589 // Place the block after the current block, if possible, or else at
590 // the end of the function.
591 if (CurBB && CurBB->getParent())
592 CurFn->insert(std::next(CurBB->getIterator()), BB);
593 else
594 CurFn->insert(CurFn->end(), BB);
595 Builder.SetInsertPoint(BB);
596}
597
598void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
599 // Emit a branch from the current block to the target one if this
600 // was a real block. If this was just a fall-through block after a
601 // terminator, don't emit it.
602 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
603
604 if (!CurBB || CurBB->getTerminator()) {
605 // If there is no insert point or the previous block is already
606 // terminated, don't touch it.
607 } else {
608 // Otherwise, create a fall-through branch.
609 Builder.CreateBr(Target);
610 }
611
612 Builder.ClearInsertionPoint();
613}
614
615void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
616 bool inserted = false;
617 for (llvm::User *u : block->users()) {
618 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
619 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
620 inserted = true;
621 break;
622 }
623 }
624
625 if (!inserted)
626 CurFn->insert(CurFn->end(), block);
627
628 Builder.SetInsertPoint(block);
629}
630
631CodeGenFunction::JumpDest
633 JumpDest &Dest = LabelMap[D];
634 if (Dest.isValid()) return Dest;
635
636 // Create, but don't insert, the new block.
637 Dest = JumpDest(createBasicBlock(D->getName()),
640 return Dest;
641}
642
644 // Add this label to the current lexical scope if we're within any
645 // normal cleanups. Jumps "in" to this label --- when permitted by
646 // the language --- may need to be routed around such cleanups.
647 if (EHStack.hasNormalCleanups() && CurLexicalScope)
648 CurLexicalScope->addLabel(D);
649
650 JumpDest &Dest = LabelMap[D];
651
652 // If we didn't need a forward reference to this label, just go
653 // ahead and create a destination at the current scope.
654 if (!Dest.isValid()) {
656
657 // Otherwise, we need to give this label a target depth and remove
658 // it from the branch-fixups list.
659 } else {
660 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
661 Dest.setScopeDepth(EHStack.stable_begin());
662 ResolveBranchFixups(Dest.getBlock());
663 }
664
665 EmitBlock(Dest.getBlock());
666
667 // Emit debug info for labels.
668 if (CGDebugInfo *DI = getDebugInfo()) {
670 DI->setLocation(D->getLocation());
671 DI->EmitLabel(D, Builder);
672 }
673 }
674
676}
677
678/// Change the cleanup scope of the labels in this lexical scope to
679/// match the scope of the enclosing context.
681 assert(!Labels.empty());
682 EHScopeStack::stable_iterator innermostScope
684
685 // Change the scope depth of all the labels.
687 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
688 assert(CGF.LabelMap.count(*i));
689 JumpDest &dest = CGF.LabelMap.find(*i)->second;
690 assert(dest.getScopeDepth().isValid());
691 assert(innermostScope.encloses(dest.getScopeDepth()));
692 dest.setScopeDepth(innermostScope);
693 }
694
695 // Reparent the labels if the new scope also has cleanups.
696 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
697 ParentScope->Labels.append(Labels.begin(), Labels.end());
698 }
699}
700
701
703 EmitLabel(S.getDecl());
704
705 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
706 if (getLangOpts().EHAsynch && S.isSideEntry())
708
709 EmitStmt(S.getSubStmt());
710}
711
713 bool nomerge = false;
714 bool noinline = false;
715 bool alwaysinline = false;
716 const CallExpr *musttail = nullptr;
717
718 for (const auto *A : S.getAttrs()) {
719 switch (A->getKind()) {
720 default:
721 break;
722 case attr::NoMerge:
723 nomerge = true;
724 break;
725 case attr::NoInline:
726 noinline = true;
727 break;
728 case attr::AlwaysInline:
729 alwaysinline = true;
730 break;
731 case attr::MustTail: {
732 const Stmt *Sub = S.getSubStmt();
733 const ReturnStmt *R = cast<ReturnStmt>(Sub);
734 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
735 } break;
736 case attr::CXXAssume: {
737 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
738 if (getLangOpts().CXXAssumptions &&
739 !Assumption->HasSideEffects(getContext())) {
740 llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption);
741 Builder.CreateAssumption(AssumptionVal);
742 }
743 } break;
744 }
745 }
746 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
747 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
748 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
749 SaveAndRestore save_musttail(MustTailCall, musttail);
750 EmitStmt(S.getSubStmt(), S.getAttrs());
751}
752
754 // If this code is reachable then emit a stop point (if generating
755 // debug info). We have to do this ourselves because we are on the
756 // "simple" statement path.
757 if (HaveInsertPoint())
758 EmitStopPoint(&S);
759
761}
762
763
765 if (const LabelDecl *Target = S.getConstantTarget()) {
767 return;
768 }
769
770 // Ensure that we have an i8* for our PHI node.
771 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
772 Int8PtrTy, "addr");
773 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
774
775 // Get the basic block for the indirect goto.
776 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
777
778 // The first instruction in the block has to be the PHI for the switch dest,
779 // add an entry for this branch.
780 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
781
782 EmitBranch(IndGotoBB);
783}
784
785void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
786 // The else branch of a consteval if statement is always the only branch that
787 // can be runtime evaluated.
788 if (S.isConsteval()) {
789 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
790 if (Executed) {
791 RunCleanupsScope ExecutedScope(*this);
792 EmitStmt(Executed);
793 }
794 return;
795 }
796
797 // C99 6.8.4.1: The first substatement is executed if the expression compares
798 // unequal to 0. The condition must be a scalar type.
799 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
800
801 if (S.getInit())
802 EmitStmt(S.getInit());
803
804 if (S.getConditionVariable())
805 EmitDecl(*S.getConditionVariable());
806
807 // If the condition constant folds and can be elided, try to avoid emitting
808 // the condition and the dead arm of the if/else.
809 bool CondConstant;
810 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
811 S.isConstexpr())) {
812 // Figure out which block (then or else) is executed.
813 const Stmt *Executed = S.getThen();
814 const Stmt *Skipped = S.getElse();
815 if (!CondConstant) // Condition false?
816 std::swap(Executed, Skipped);
817
818 // If the skipped block has no labels in it, just emit the executed block.
819 // This avoids emitting dead code and simplifies the CFG substantially.
820 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
821 if (CondConstant)
823 if (Executed) {
824 RunCleanupsScope ExecutedScope(*this);
825 EmitStmt(Executed);
826 }
827 return;
828 }
829 }
830
831 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
832 // the conditional branch.
833 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
834 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
835 llvm::BasicBlock *ElseBlock = ContBlock;
836 if (S.getElse())
837 ElseBlock = createBasicBlock("if.else");
838
839 // Prefer the PGO based weights over the likelihood attribute.
840 // When the build isn't optimized the metadata isn't used, so don't generate
841 // it.
842 // Also, differentiate between disabled PGO and a never executed branch with
843 // PGO. Assuming PGO is in use:
844 // - we want to ignore the [[likely]] attribute if the branch is never
845 // executed,
846 // - assuming the profile is poor, preserving the attribute may still be
847 // beneficial.
848 // As an approximation, preserve the attribute only if both the branch and the
849 // parent context were not executed.
851 uint64_t ThenCount = getProfileCount(S.getThen());
852 if (!ThenCount && !getCurrentProfileCount() &&
853 CGM.getCodeGenOpts().OptimizationLevel)
854 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
855
856 // When measuring MC/DC, always fully evaluate the condition up front using
857 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
858 // executing the body of the if.then or if.else. This is useful for when
859 // there is a 'return' within the body, but this is particularly beneficial
860 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
861 // updates are kept linear and consistent.
862 if (!CGM.getCodeGenOpts().MCDCCoverage)
863 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
864 else {
865 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
866 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
867 }
868
869 // Emit the 'then' code.
870 EmitBlock(ThenBlock);
872 incrementProfileCounter(S.getThen());
873 else
875 {
876 RunCleanupsScope ThenScope(*this);
877 EmitStmt(S.getThen());
878 }
879 EmitBranch(ContBlock);
880
881 // Emit the 'else' code if present.
882 if (const Stmt *Else = S.getElse()) {
883 {
884 // There is no need to emit line number for an unconditional branch.
885 auto NL = ApplyDebugLocation::CreateEmpty(*this);
886 EmitBlock(ElseBlock);
887 }
888 // When single byte coverage mode is enabled, add a counter to else block.
891 {
892 RunCleanupsScope ElseScope(*this);
893 EmitStmt(Else);
894 }
895 {
896 // There is no need to emit line number for an unconditional branch.
897 auto NL = ApplyDebugLocation::CreateEmpty(*this);
898 EmitBranch(ContBlock);
899 }
900 }
901
902 // Emit the continuation block for code after the if.
903 EmitBlock(ContBlock, true);
904
905 // When single byte coverage mode is enabled, add a counter to continuation
906 // block.
909}
910
912 ArrayRef<const Attr *> WhileAttrs) {
913 // Emit the header for the loop, which will also become
914 // the continue target.
915 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
916 EmitBlock(LoopHeader.getBlock());
917
918 // Create an exit block for when the condition fails, which will
919 // also become the break target.
920 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
921
922 // Store the blocks to use for break and continue.
923 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
924
925 // C++ [stmt.while]p2:
926 // When the condition of a while statement is a declaration, the
927 // scope of the variable that is declared extends from its point
928 // of declaration (3.3.2) to the end of the while statement.
929 // [...]
930 // The object created in a condition is destroyed and created
931 // with each iteration of the loop.
932 RunCleanupsScope ConditionScope(*this);
933
934 if (S.getConditionVariable())
935 EmitDecl(*S.getConditionVariable());
936
937 // Evaluate the conditional in the while header. C99 6.8.5.1: The
938 // evaluation of the controlling expression takes place before each
939 // execution of the loop body.
940 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
941
942 // while(1) is common, avoid extra exit blocks. Be sure
943 // to correctly handle break/continue though.
944 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
945 bool CondIsConstInt = C != nullptr;
946 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
947 const SourceRange &R = S.getSourceRange();
948 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
949 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
951 checkIfLoopMustProgress(CondIsConstInt));
952
953 // When single byte coverage mode is enabled, add a counter to loop condition.
955 incrementProfileCounter(S.getCond());
956
957 // As long as the condition is true, go to the loop body.
958 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
959 if (EmitBoolCondBranch) {
960 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
961 if (ConditionScope.requiresCleanups())
962 ExitBlock = createBasicBlock("while.exit");
963 llvm::MDNode *Weights =
964 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
965 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
966 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
967 BoolCondVal, Stmt::getLikelihood(S.getBody()));
968 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
969
970 if (ExitBlock != LoopExit.getBlock()) {
971 EmitBlock(ExitBlock);
973 }
974 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
975 CGM.getDiags().Report(A->getLocation(),
976 diag::warn_attribute_has_no_effect_on_infinite_loop)
977 << A << A->getRange();
979 S.getWhileLoc(),
980 diag::note_attribute_has_no_effect_on_infinite_loop_here)
981 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
982 }
983
984 // Emit the loop body. We have to emit this in a cleanup scope
985 // because it might be a singleton DeclStmt.
986 {
987 RunCleanupsScope BodyScope(*this);
988 EmitBlock(LoopBody);
989 // When single byte coverage mode is enabled, add a counter to the body.
991 incrementProfileCounter(S.getBody());
992 else
994 EmitStmt(S.getBody());
995 }
996
997 BreakContinueStack.pop_back();
998
999 // Immediately force cleanup.
1000 ConditionScope.ForceCleanup();
1001
1002 EmitStopPoint(&S);
1003 // Branch to the loop header again.
1004 EmitBranch(LoopHeader.getBlock());
1005
1006 LoopStack.pop();
1007
1008 // Emit the exit block.
1009 EmitBlock(LoopExit.getBlock(), true);
1010
1011 // The LoopHeader typically is just a branch if we skipped emitting
1012 // a branch, try to erase it.
1013 if (!EmitBoolCondBranch)
1014 SimplifyForwardingBlocks(LoopHeader.getBlock());
1015
1016 // When single byte coverage mode is enabled, add a counter to continuation
1017 // block.
1020}
1021
1023 ArrayRef<const Attr *> DoAttrs) {
1024 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1025 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1026
1027 uint64_t ParentCount = getCurrentProfileCount();
1028
1029 // Store the blocks to use for break and continue.
1030 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1031
1032 // Emit the body of the loop.
1033 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1034
1036 EmitBlockWithFallThrough(LoopBody, S.getBody());
1037 else
1038 EmitBlockWithFallThrough(LoopBody, &S);
1039 {
1040 RunCleanupsScope BodyScope(*this);
1041 EmitStmt(S.getBody());
1042 }
1043
1044 EmitBlock(LoopCond.getBlock());
1045 // When single byte coverage mode is enabled, add a counter to loop condition.
1047 incrementProfileCounter(S.getCond());
1048
1049 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1050 // after each execution of the loop body."
1051
1052 // Evaluate the conditional in the while header.
1053 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1054 // compares unequal to 0. The condition must be a scalar type.
1055 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1056
1057 BreakContinueStack.pop_back();
1058
1059 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1060 // to correctly handle break/continue though.
1061 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1062 bool CondIsConstInt = C;
1063 bool EmitBoolCondBranch = !C || !C->isZero();
1064
1065 const SourceRange &R = S.getSourceRange();
1066 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1069 checkIfLoopMustProgress(CondIsConstInt));
1070
1071 // As long as the condition is true, iterate the loop.
1072 if (EmitBoolCondBranch) {
1073 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1074 Builder.CreateCondBr(
1075 BoolCondVal, LoopBody, LoopExit.getBlock(),
1076 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1077 }
1078
1079 LoopStack.pop();
1080
1081 // Emit the exit block.
1082 EmitBlock(LoopExit.getBlock());
1083
1084 // The DoCond block typically is just a branch if we skipped
1085 // emitting a branch, try to erase it.
1086 if (!EmitBoolCondBranch)
1087 SimplifyForwardingBlocks(LoopCond.getBlock());
1088
1089 // When single byte coverage mode is enabled, add a counter to continuation
1090 // block.
1093}
1094
1096 ArrayRef<const Attr *> ForAttrs) {
1097 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1098
1099 LexicalScope ForScope(*this, S.getSourceRange());
1100
1101 // Evaluate the first part before the loop.
1102 if (S.getInit())
1103 EmitStmt(S.getInit());
1104
1105 // Start the loop with a block that tests the condition.
1106 // If there's an increment, the continue scope will be overwritten
1107 // later.
1108 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1109 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1110 EmitBlock(CondBlock);
1111
1113 bool CondIsConstInt =
1114 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
1115
1116 const SourceRange &R = S.getSourceRange();
1117 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1120 checkIfLoopMustProgress(CondIsConstInt));
1121
1122 // Create a cleanup scope for the condition variable cleanups.
1123 LexicalScope ConditionScope(*this, S.getSourceRange());
1124
1125 // If the for loop doesn't have an increment we can just use the condition as
1126 // the continue block. Otherwise, if there is no condition variable, we can
1127 // form the continue block now. If there is a condition variable, we can't
1128 // form the continue block until after we've emitted the condition, because
1129 // the condition is in scope in the increment, but Sema's jump diagnostics
1130 // ensure that there are no continues from the condition variable that jump
1131 // to the loop increment.
1132 JumpDest Continue;
1133 if (!S.getInc())
1134 Continue = CondDest;
1135 else if (!S.getConditionVariable())
1136 Continue = getJumpDestInCurrentScope("for.inc");
1137 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1138
1139 if (S.getCond()) {
1140 // If the for statement has a condition scope, emit the local variable
1141 // declaration.
1142 if (S.getConditionVariable()) {
1143 EmitDecl(*S.getConditionVariable());
1144
1145 // We have entered the condition variable's scope, so we're now able to
1146 // jump to the continue block.
1147 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1148 BreakContinueStack.back().ContinueBlock = Continue;
1149 }
1150
1151 // When single byte coverage mode is enabled, add a counter to loop
1152 // condition.
1154 incrementProfileCounter(S.getCond());
1155
1156 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1157 // If there are any cleanups between here and the loop-exit scope,
1158 // create a block to stage a loop exit along.
1159 if (ForScope.requiresCleanups())
1160 ExitBlock = createBasicBlock("for.cond.cleanup");
1161
1162 // As long as the condition is true, iterate the loop.
1163 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1164
1165 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1166 // compares unequal to 0. The condition must be a scalar type.
1167 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1168 llvm::MDNode *Weights =
1169 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1170 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1171 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1172 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1173
1174 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1175
1176 if (ExitBlock != LoopExit.getBlock()) {
1177 EmitBlock(ExitBlock);
1179 }
1180
1181 EmitBlock(ForBody);
1182 } else {
1183 // Treat it as a non-zero constant. Don't even create a new block for the
1184 // body, just fall into it.
1185 }
1186
1187 // When single byte coverage mode is enabled, add a counter to the body.
1189 incrementProfileCounter(S.getBody());
1190 else
1192 {
1193 // Create a separate cleanup scope for the body, in case it is not
1194 // a compound statement.
1195 RunCleanupsScope BodyScope(*this);
1196 EmitStmt(S.getBody());
1197 }
1198
1199 // If there is an increment, emit it next.
1200 if (S.getInc()) {
1201 EmitBlock(Continue.getBlock());
1202 EmitStmt(S.getInc());
1204 incrementProfileCounter(S.getInc());
1205 }
1206
1207 BreakContinueStack.pop_back();
1208
1209 ConditionScope.ForceCleanup();
1210
1211 EmitStopPoint(&S);
1212 EmitBranch(CondBlock);
1213
1214 ForScope.ForceCleanup();
1215
1216 LoopStack.pop();
1217
1218 // Emit the fall-through block.
1219 EmitBlock(LoopExit.getBlock(), true);
1220
1221 // When single byte coverage mode is enabled, add a counter to continuation
1222 // block.
1225}
1226
1227void
1229 ArrayRef<const Attr *> ForAttrs) {
1230 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1231
1232 LexicalScope ForScope(*this, S.getSourceRange());
1233
1234 // Evaluate the first pieces before the loop.
1235 if (S.getInit())
1236 EmitStmt(S.getInit());
1237 EmitStmt(S.getRangeStmt());
1238 EmitStmt(S.getBeginStmt());
1239 EmitStmt(S.getEndStmt());
1240
1241 // Start the loop with a block that tests the condition.
1242 // If there's an increment, the continue scope will be overwritten
1243 // later.
1244 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1245 EmitBlock(CondBlock);
1246
1247 const SourceRange &R = S.getSourceRange();
1248 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1251
1252 // If there are any cleanups between here and the loop-exit scope,
1253 // create a block to stage a loop exit along.
1254 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1255 if (ForScope.requiresCleanups())
1256 ExitBlock = createBasicBlock("for.cond.cleanup");
1257
1258 // The loop body, consisting of the specified body and the loop variable.
1259 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1260
1261 // The body is executed if the expression, contextually converted
1262 // to bool, is true.
1263 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1264 llvm::MDNode *Weights =
1265 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1266 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1267 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1268 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1269 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1270
1271 if (ExitBlock != LoopExit.getBlock()) {
1272 EmitBlock(ExitBlock);
1274 }
1275
1276 EmitBlock(ForBody);
1278 incrementProfileCounter(S.getBody());
1279 else
1281
1282 // Create a block for the increment. In case of a 'continue', we jump there.
1283 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1284
1285 // Store the blocks to use for break and continue.
1286 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1287
1288 {
1289 // Create a separate cleanup scope for the loop variable and body.
1290 LexicalScope BodyScope(*this, S.getSourceRange());
1291 EmitStmt(S.getLoopVarStmt());
1292 EmitStmt(S.getBody());
1293 }
1294
1295 EmitStopPoint(&S);
1296 // If there is an increment, emit it next.
1297 EmitBlock(Continue.getBlock());
1298 EmitStmt(S.getInc());
1299
1300 BreakContinueStack.pop_back();
1301
1302 EmitBranch(CondBlock);
1303
1304 ForScope.ForceCleanup();
1305
1306 LoopStack.pop();
1307
1308 // Emit the fall-through block.
1309 EmitBlock(LoopExit.getBlock(), true);
1310
1311 // When single byte coverage mode is enabled, add a counter to continuation
1312 // block.
1315}
1316
1317void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1318 if (RV.isScalar()) {
1320 } else if (RV.isAggregate()) {
1321 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1324 } else {
1326 /*init*/ true);
1327 }
1329}
1330
1331namespace {
1332// RAII struct used to save and restore a return statment's result expression.
1333struct SaveRetExprRAII {
1334 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1335 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1336 CGF.RetExpr = RetExpr;
1337 }
1338 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1339 const Expr *OldRetExpr;
1340 CodeGenFunction &CGF;
1341};
1342} // namespace
1343
1344/// Determine if the given call uses the swiftasync calling convention.
1345static bool isSwiftAsyncCallee(const CallExpr *CE) {
1346 auto calleeQualType = CE->getCallee()->getType();
1347 const FunctionType *calleeType = nullptr;
1348 if (calleeQualType->isFunctionPointerType() ||
1349 calleeQualType->isFunctionReferenceType() ||
1350 calleeQualType->isBlockPointerType() ||
1351 calleeQualType->isMemberFunctionPointerType()) {
1352 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1353 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1354 calleeType = ty;
1355 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1356 if (auto methodDecl = CMCE->getMethodDecl()) {
1357 // getMethodDecl() doesn't handle member pointers at the moment.
1358 calleeType = methodDecl->getType()->castAs<FunctionType>();
1359 } else {
1360 return false;
1361 }
1362 } else {
1363 return false;
1364 }
1365 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1366}
1367
1368/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1369/// if the function returns void, or may be missing one if the function returns
1370/// non-void. Fun stuff :).
1372 if (requiresReturnValueCheck()) {
1373 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1374 auto *SLocPtr =
1375 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1376 llvm::GlobalVariable::PrivateLinkage, SLoc);
1377 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1379 assert(ReturnLocation.isValid() && "No valid return location");
1380 Builder.CreateStore(SLocPtr, ReturnLocation);
1381 }
1382
1383 // Returning from an outlined SEH helper is UB, and we already warn on it.
1384 if (IsOutlinedSEHHelper) {
1385 Builder.CreateUnreachable();
1386 Builder.ClearInsertionPoint();
1387 }
1388
1389 // Emit the result value, even if unused, to evaluate the side effects.
1390 const Expr *RV = S.getRetValue();
1391
1392 // Record the result expression of the return statement. The recorded
1393 // expression is used to determine whether a block capture's lifetime should
1394 // end at the end of the full expression as opposed to the end of the scope
1395 // enclosing the block expression.
1396 //
1397 // This permits a small, easily-implemented exception to our over-conservative
1398 // rules about not jumping to statements following block literals with
1399 // non-trivial cleanups.
1400 SaveRetExprRAII SaveRetExpr(RV, *this);
1401
1402 RunCleanupsScope cleanupScope(*this);
1403 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1404 RV = EWC->getSubExpr();
1405
1406 // If we're in a swiftasynccall function, and the return expression is a
1407 // call to a swiftasynccall function, mark the call as the musttail call.
1408 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1409 if (RV && CurFnInfo &&
1411 if (auto CE = dyn_cast<CallExpr>(RV)) {
1412 if (isSwiftAsyncCallee(CE)) {
1413 SaveMustTail.emplace(MustTailCall, CE);
1414 }
1415 }
1416 }
1417
1418 // FIXME: Clean this up by using an LValue for ReturnTemp,
1419 // EmitStoreThroughLValue, and EmitAnyExpr.
1420 // Check if the NRVO candidate was not globalized in OpenMP mode.
1421 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1422 S.getNRVOCandidate()->isNRVOVariable() &&
1423 (!getLangOpts().OpenMP ||
1425 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1426 .isValid())) {
1427 // Apply the named return value optimization for this return statement,
1428 // which means doing nothing: the appropriate result has already been
1429 // constructed into the NRVO variable.
1430
1431 // If there is an NRVO flag for this variable, set it to 1 into indicate
1432 // that the cleanup code should not destroy the variable.
1433 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1434 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1435 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1436 // Make sure not to return anything, but evaluate the expression
1437 // for side effects.
1438 if (RV) {
1439 EmitAnyExpr(RV);
1440 }
1441 } else if (!RV) {
1442 // Do nothing (return value is left uninitialized)
1443 } else if (FnRetTy->isReferenceType()) {
1444 // If this function returns a reference, take the address of the expression
1445 // rather than the value.
1447 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1448 } else {
1449 switch (getEvaluationKind(RV->getType())) {
1450 case TEK_Scalar:
1452 break;
1453 case TEK_Complex:
1455 /*isInit*/ true);
1456 break;
1457 case TEK_Aggregate:
1464 break;
1465 }
1466 }
1467
1468 ++NumReturnExprs;
1469 if (!RV || RV->isEvaluatable(getContext()))
1470 ++NumSimpleReturnExprs;
1471
1472 cleanupScope.ForceCleanup();
1474}
1475
1477 // As long as debug info is modeled with instructions, we have to ensure we
1478 // have a place to insert here and write the stop point here.
1479 if (HaveInsertPoint())
1480 EmitStopPoint(&S);
1481
1482 for (const auto *I : S.decls())
1483 EmitDecl(*I);
1484}
1485
1487 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1488
1489 // If this code is reachable then emit a stop point (if generating
1490 // debug info). We have to do this ourselves because we are on the
1491 // "simple" statement path.
1492 if (HaveInsertPoint())
1493 EmitStopPoint(&S);
1494
1495 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1496}
1497
1499 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1500
1501 // If this code is reachable then emit a stop point (if generating
1502 // debug info). We have to do this ourselves because we are on the
1503 // "simple" statement path.
1504 if (HaveInsertPoint())
1505 EmitStopPoint(&S);
1506
1507 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1508}
1509
1510/// EmitCaseStmtRange - If case statement range is not too big then
1511/// add multiple cases to switch instruction, one for each value within
1512/// the range. If range is too big then emit "if" condition check.
1514 ArrayRef<const Attr *> Attrs) {
1515 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1516
1517 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1518 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1519
1520 // Emit the code for this case. We do this first to make sure it is
1521 // properly chained from our predecessor before generating the
1522 // switch machinery to enter this block.
1523 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1524 EmitBlockWithFallThrough(CaseDest, &S);
1525 EmitStmt(S.getSubStmt());
1526
1527 // If range is empty, do nothing.
1528 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1529 return;
1530
1532 llvm::APInt Range = RHS - LHS;
1533 // FIXME: parameters such as this should not be hardcoded.
1534 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1535 // Range is small enough to add multiple switch instruction cases.
1536 uint64_t Total = getProfileCount(&S);
1537 unsigned NCases = Range.getZExtValue() + 1;
1538 // We only have one region counter for the entire set of cases here, so we
1539 // need to divide the weights evenly between the generated cases, ensuring
1540 // that the total weight is preserved. E.g., a weight of 5 over three cases
1541 // will be distributed as weights of 2, 2, and 1.
1542 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1543 for (unsigned I = 0; I != NCases; ++I) {
1544 if (SwitchWeights)
1545 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1546 else if (SwitchLikelihood)
1547 SwitchLikelihood->push_back(LH);
1548
1549 if (Rem)
1550 Rem--;
1551 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1552 ++LHS;
1553 }
1554 return;
1555 }
1556
1557 // The range is too big. Emit "if" condition into a new block,
1558 // making sure to save and restore the current insertion point.
1559 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1560
1561 // Push this test onto the chain of range checks (which terminates
1562 // in the default basic block). The switch's default will be changed
1563 // to the top of this chain after switch emission is complete.
1564 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1565 CaseRangeBlock = createBasicBlock("sw.caserange");
1566
1567 CurFn->insert(CurFn->end(), CaseRangeBlock);
1568 Builder.SetInsertPoint(CaseRangeBlock);
1569
1570 // Emit range check.
1571 llvm::Value *Diff =
1572 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1573 llvm::Value *Cond =
1574 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1575
1576 llvm::MDNode *Weights = nullptr;
1577 if (SwitchWeights) {
1578 uint64_t ThisCount = getProfileCount(&S);
1579 uint64_t DefaultCount = (*SwitchWeights)[0];
1580 Weights = createProfileWeights(ThisCount, DefaultCount);
1581
1582 // Since we're chaining the switch default through each large case range, we
1583 // need to update the weight for the default, ie, the first case, to include
1584 // this case.
1585 (*SwitchWeights)[0] += ThisCount;
1586 } else if (SwitchLikelihood)
1587 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1588
1589 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1590
1591 // Restore the appropriate insertion point.
1592 if (RestoreBB)
1593 Builder.SetInsertPoint(RestoreBB);
1594 else
1595 Builder.ClearInsertionPoint();
1596}
1597
1599 ArrayRef<const Attr *> Attrs) {
1600 // If there is no enclosing switch instance that we're aware of, then this
1601 // case statement and its block can be elided. This situation only happens
1602 // when we've constant-folded the switch, are emitting the constant case,
1603 // and part of the constant case includes another case statement. For
1604 // instance: switch (4) { case 4: do { case 5: } while (1); }
1605 if (!SwitchInsn) {
1606 EmitStmt(S.getSubStmt());
1607 return;
1608 }
1609
1610 // Handle case ranges.
1611 if (S.getRHS()) {
1612 EmitCaseStmtRange(S, Attrs);
1613 return;
1614 }
1615
1616 llvm::ConstantInt *CaseVal =
1617 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1618
1619 // Emit debuginfo for the case value if it is an enum value.
1620 const ConstantExpr *CE;
1621 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1622 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1623 else
1624 CE = dyn_cast<ConstantExpr>(S.getLHS());
1625 if (CE) {
1626 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1627 if (CGDebugInfo *Dbg = getDebugInfo())
1629 Dbg->EmitGlobalVariable(DE->getDecl(),
1630 APValue(llvm::APSInt(CaseVal->getValue())));
1631 }
1632
1633 if (SwitchLikelihood)
1634 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1635
1636 // If the body of the case is just a 'break', try to not emit an empty block.
1637 // If we're profiling or we're not optimizing, leave the block in for better
1638 // debug and coverage analysis.
1640 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1641 isa<BreakStmt>(S.getSubStmt())) {
1642 JumpDest Block = BreakContinueStack.back().BreakBlock;
1643
1644 // Only do this optimization if there are no cleanups that need emitting.
1646 if (SwitchWeights)
1647 SwitchWeights->push_back(getProfileCount(&S));
1648 SwitchInsn->addCase(CaseVal, Block.getBlock());
1649
1650 // If there was a fallthrough into this case, make sure to redirect it to
1651 // the end of the switch as well.
1652 if (Builder.GetInsertBlock()) {
1653 Builder.CreateBr(Block.getBlock());
1654 Builder.ClearInsertionPoint();
1655 }
1656 return;
1657 }
1658 }
1659
1660 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1661 EmitBlockWithFallThrough(CaseDest, &S);
1662 if (SwitchWeights)
1663 SwitchWeights->push_back(getProfileCount(&S));
1664 SwitchInsn->addCase(CaseVal, CaseDest);
1665
1666 // Recursively emitting the statement is acceptable, but is not wonderful for
1667 // code where we have many case statements nested together, i.e.:
1668 // case 1:
1669 // case 2:
1670 // case 3: etc.
1671 // Handling this recursively will create a new block for each case statement
1672 // that falls through to the next case which is IR intensive. It also causes
1673 // deep recursion which can run into stack depth limitations. Handle
1674 // sequential non-range case statements specially.
1675 //
1676 // TODO When the next case has a likelihood attribute the code returns to the
1677 // recursive algorithm. Maybe improve this case if it becomes common practice
1678 // to use a lot of attributes.
1679 const CaseStmt *CurCase = &S;
1680 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1681
1682 // Otherwise, iteratively add consecutive cases to this switch stmt.
1683 while (NextCase && NextCase->getRHS() == nullptr) {
1684 CurCase = NextCase;
1685 llvm::ConstantInt *CaseVal =
1686 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1687
1688 if (SwitchWeights)
1689 SwitchWeights->push_back(getProfileCount(NextCase));
1691 CaseDest = createBasicBlock("sw.bb");
1692 EmitBlockWithFallThrough(CaseDest, CurCase);
1693 }
1694 // Since this loop is only executed when the CaseStmt has no attributes
1695 // use a hard-coded value.
1696 if (SwitchLikelihood)
1697 SwitchLikelihood->push_back(Stmt::LH_None);
1698
1699 SwitchInsn->addCase(CaseVal, CaseDest);
1700 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1701 }
1702
1703 // Generate a stop point for debug info if the case statement is
1704 // followed by a default statement. A fallthrough case before a
1705 // default case gets its own branch target.
1706 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1707 EmitStopPoint(CurCase);
1708
1709 // Normal default recursion for non-cases.
1710 EmitStmt(CurCase->getSubStmt());
1711}
1712
1714 ArrayRef<const Attr *> Attrs) {
1715 // If there is no enclosing switch instance that we're aware of, then this
1716 // default statement can be elided. This situation only happens when we've
1717 // constant-folded the switch.
1718 if (!SwitchInsn) {
1719 EmitStmt(S.getSubStmt());
1720 return;
1721 }
1722
1723 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1724 assert(DefaultBlock->empty() &&
1725 "EmitDefaultStmt: Default block already defined?");
1726
1727 if (SwitchLikelihood)
1728 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1729
1730 EmitBlockWithFallThrough(DefaultBlock, &S);
1731
1732 EmitStmt(S.getSubStmt());
1733}
1734
1735/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1736/// constant value that is being switched on, see if we can dead code eliminate
1737/// the body of the switch to a simple series of statements to emit. Basically,
1738/// on a switch (5) we want to find these statements:
1739/// case 5:
1740/// printf(...); <--
1741/// ++i; <--
1742/// break;
1743///
1744/// and add them to the ResultStmts vector. If it is unsafe to do this
1745/// transformation (for example, one of the elided statements contains a label
1746/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1747/// should include statements after it (e.g. the printf() line is a substmt of
1748/// the case) then return CSFC_FallThrough. If we handled it and found a break
1749/// statement, then return CSFC_Success.
1750///
1751/// If Case is non-null, then we are looking for the specified case, checking
1752/// that nothing we jump over contains labels. If Case is null, then we found
1753/// the case and are looking for the break.
1754///
1755/// If the recursive walk actually finds our Case, then we set FoundCase to
1756/// true.
1757///
1760 const SwitchCase *Case,
1761 bool &FoundCase,
1762 SmallVectorImpl<const Stmt*> &ResultStmts) {
1763 // If this is a null statement, just succeed.
1764 if (!S)
1765 return Case ? CSFC_Success : CSFC_FallThrough;
1766
1767 // If this is the switchcase (case 4: or default) that we're looking for, then
1768 // we're in business. Just add the substatement.
1769 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1770 if (S == Case) {
1771 FoundCase = true;
1772 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1773 ResultStmts);
1774 }
1775
1776 // Otherwise, this is some other case or default statement, just ignore it.
1777 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1778 ResultStmts);
1779 }
1780
1781 // If we are in the live part of the code and we found our break statement,
1782 // return a success!
1783 if (!Case && isa<BreakStmt>(S))
1784 return CSFC_Success;
1785
1786 // If this is a switch statement, then it might contain the SwitchCase, the
1787 // break, or neither.
1788 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1789 // Handle this as two cases: we might be looking for the SwitchCase (if so
1790 // the skipped statements must be skippable) or we might already have it.
1791 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1792 bool StartedInLiveCode = FoundCase;
1793 unsigned StartSize = ResultStmts.size();
1794
1795 // If we've not found the case yet, scan through looking for it.
1796 if (Case) {
1797 // Keep track of whether we see a skipped declaration. The code could be
1798 // using the declaration even if it is skipped, so we can't optimize out
1799 // the decl if the kept statements might refer to it.
1800 bool HadSkippedDecl = false;
1801
1802 // If we're looking for the case, just see if we can skip each of the
1803 // substatements.
1804 for (; Case && I != E; ++I) {
1805 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1806
1807 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1808 case CSFC_Failure: return CSFC_Failure;
1809 case CSFC_Success:
1810 // A successful result means that either 1) that the statement doesn't
1811 // have the case and is skippable, or 2) does contain the case value
1812 // and also contains the break to exit the switch. In the later case,
1813 // we just verify the rest of the statements are elidable.
1814 if (FoundCase) {
1815 // If we found the case and skipped declarations, we can't do the
1816 // optimization.
1817 if (HadSkippedDecl)
1818 return CSFC_Failure;
1819
1820 for (++I; I != E; ++I)
1821 if (CodeGenFunction::ContainsLabel(*I, true))
1822 return CSFC_Failure;
1823 return CSFC_Success;
1824 }
1825 break;
1826 case CSFC_FallThrough:
1827 // If we have a fallthrough condition, then we must have found the
1828 // case started to include statements. Consider the rest of the
1829 // statements in the compound statement as candidates for inclusion.
1830 assert(FoundCase && "Didn't find case but returned fallthrough?");
1831 // We recursively found Case, so we're not looking for it anymore.
1832 Case = nullptr;
1833
1834 // If we found the case and skipped declarations, we can't do the
1835 // optimization.
1836 if (HadSkippedDecl)
1837 return CSFC_Failure;
1838 break;
1839 }
1840 }
1841
1842 if (!FoundCase)
1843 return CSFC_Success;
1844
1845 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1846 }
1847
1848 // If we have statements in our range, then we know that the statements are
1849 // live and need to be added to the set of statements we're tracking.
1850 bool AnyDecls = false;
1851 for (; I != E; ++I) {
1853
1854 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1855 case CSFC_Failure: return CSFC_Failure;
1856 case CSFC_FallThrough:
1857 // A fallthrough result means that the statement was simple and just
1858 // included in ResultStmt, keep adding them afterwards.
1859 break;
1860 case CSFC_Success:
1861 // A successful result means that we found the break statement and
1862 // stopped statement inclusion. We just ensure that any leftover stmts
1863 // are skippable and return success ourselves.
1864 for (++I; I != E; ++I)
1865 if (CodeGenFunction::ContainsLabel(*I, true))
1866 return CSFC_Failure;
1867 return CSFC_Success;
1868 }
1869 }
1870
1871 // If we're about to fall out of a scope without hitting a 'break;', we
1872 // can't perform the optimization if there were any decls in that scope
1873 // (we'd lose their end-of-lifetime).
1874 if (AnyDecls) {
1875 // If the entire compound statement was live, there's one more thing we
1876 // can try before giving up: emit the whole thing as a single statement.
1877 // We can do that unless the statement contains a 'break;'.
1878 // FIXME: Such a break must be at the end of a construct within this one.
1879 // We could emit this by just ignoring the BreakStmts entirely.
1880 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1881 ResultStmts.resize(StartSize);
1882 ResultStmts.push_back(S);
1883 } else {
1884 return CSFC_Failure;
1885 }
1886 }
1887
1888 return CSFC_FallThrough;
1889 }
1890
1891 // Okay, this is some other statement that we don't handle explicitly, like a
1892 // for statement or increment etc. If we are skipping over this statement,
1893 // just verify it doesn't have labels, which would make it invalid to elide.
1894 if (Case) {
1895 if (CodeGenFunction::ContainsLabel(S, true))
1896 return CSFC_Failure;
1897 return CSFC_Success;
1898 }
1899
1900 // Otherwise, we want to include this statement. Everything is cool with that
1901 // so long as it doesn't contain a break out of the switch we're in.
1903
1904 // Otherwise, everything is great. Include the statement and tell the caller
1905 // that we fall through and include the next statement as well.
1906 ResultStmts.push_back(S);
1907 return CSFC_FallThrough;
1908}
1909
1910/// FindCaseStatementsForValue - Find the case statement being jumped to and
1911/// then invoke CollectStatementsForCase to find the list of statements to emit
1912/// for a switch on constant. See the comment above CollectStatementsForCase
1913/// for more details.
1915 const llvm::APSInt &ConstantCondValue,
1916 SmallVectorImpl<const Stmt*> &ResultStmts,
1917 ASTContext &C,
1918 const SwitchCase *&ResultCase) {
1919 // First step, find the switch case that is being branched to. We can do this
1920 // efficiently by scanning the SwitchCase list.
1921 const SwitchCase *Case = S.getSwitchCaseList();
1922 const DefaultStmt *DefaultCase = nullptr;
1923
1924 for (; Case; Case = Case->getNextSwitchCase()) {
1925 // It's either a default or case. Just remember the default statement in
1926 // case we're not jumping to any numbered cases.
1927 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1928 DefaultCase = DS;
1929 continue;
1930 }
1931
1932 // Check to see if this case is the one we're looking for.
1933 const CaseStmt *CS = cast<CaseStmt>(Case);
1934 // Don't handle case ranges yet.
1935 if (CS->getRHS()) return false;
1936
1937 // If we found our case, remember it as 'case'.
1938 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1939 break;
1940 }
1941
1942 // If we didn't find a matching case, we use a default if it exists, or we
1943 // elide the whole switch body!
1944 if (!Case) {
1945 // It is safe to elide the body of the switch if it doesn't contain labels
1946 // etc. If it is safe, return successfully with an empty ResultStmts list.
1947 if (!DefaultCase)
1949 Case = DefaultCase;
1950 }
1951
1952 // Ok, we know which case is being jumped to, try to collect all the
1953 // statements that follow it. This can fail for a variety of reasons. Also,
1954 // check to see that the recursive walk actually found our case statement.
1955 // Insane cases like this can fail to find it in the recursive walk since we
1956 // don't handle every stmt kind:
1957 // switch (4) {
1958 // while (1) {
1959 // case 4: ...
1960 bool FoundCase = false;
1961 ResultCase = Case;
1962 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1963 ResultStmts) != CSFC_Failure &&
1964 FoundCase;
1965}
1966
1967static std::optional<SmallVector<uint64_t, 16>>
1969 // Are there enough branches to weight them?
1970 if (Likelihoods.size() <= 1)
1971 return std::nullopt;
1972
1973 uint64_t NumUnlikely = 0;
1974 uint64_t NumNone = 0;
1975 uint64_t NumLikely = 0;
1976 for (const auto LH : Likelihoods) {
1977 switch (LH) {
1978 case Stmt::LH_Unlikely:
1979 ++NumUnlikely;
1980 break;
1981 case Stmt::LH_None:
1982 ++NumNone;
1983 break;
1984 case Stmt::LH_Likely:
1985 ++NumLikely;
1986 break;
1987 }
1988 }
1989
1990 // Is there a likelihood attribute used?
1991 if (NumUnlikely == 0 && NumLikely == 0)
1992 return std::nullopt;
1993
1994 // When multiple cases share the same code they can be combined during
1995 // optimization. In that case the weights of the branch will be the sum of
1996 // the individual weights. Make sure the combined sum of all neutral cases
1997 // doesn't exceed the value of a single likely attribute.
1998 // The additions both avoid divisions by 0 and make sure the weights of None
1999 // don't exceed the weight of Likely.
2000 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2001 const uint64_t None = Likely / (NumNone + 1);
2002 const uint64_t Unlikely = 0;
2003
2005 Result.reserve(Likelihoods.size());
2006 for (const auto LH : Likelihoods) {
2007 switch (LH) {
2008 case Stmt::LH_Unlikely:
2009 Result.push_back(Unlikely);
2010 break;
2011 case Stmt::LH_None:
2012 Result.push_back(None);
2013 break;
2014 case Stmt::LH_Likely:
2015 Result.push_back(Likely);
2016 break;
2017 }
2018 }
2019
2020 return Result;
2021}
2022
2024 // Handle nested switch statements.
2025 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2026 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2027 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2028 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2029
2030 // See if we can constant fold the condition of the switch and therefore only
2031 // emit the live case statement (if any) of the switch.
2032 llvm::APSInt ConstantCondValue;
2033 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2035 const SwitchCase *Case = nullptr;
2036 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2037 getContext(), Case)) {
2038 if (Case)
2040 RunCleanupsScope ExecutedScope(*this);
2041
2042 if (S.getInit())
2043 EmitStmt(S.getInit());
2044
2045 // Emit the condition variable if needed inside the entire cleanup scope
2046 // used by this special case for constant folded switches.
2047 if (S.getConditionVariable())
2048 EmitDecl(*S.getConditionVariable());
2049
2050 // At this point, we are no longer "within" a switch instance, so
2051 // we can temporarily enforce this to ensure that any embedded case
2052 // statements are not emitted.
2053 SwitchInsn = nullptr;
2054
2055 // Okay, we can dead code eliminate everything except this case. Emit the
2056 // specified series of statements and we're good.
2057 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2058 EmitStmt(CaseStmts[i]);
2060
2061 // Now we want to restore the saved switch instance so that nested
2062 // switches continue to function properly
2063 SwitchInsn = SavedSwitchInsn;
2064
2065 return;
2066 }
2067 }
2068
2069 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2070
2071 RunCleanupsScope ConditionScope(*this);
2072
2073 if (S.getInit())
2074 EmitStmt(S.getInit());
2075
2076 if (S.getConditionVariable())
2077 EmitDecl(*S.getConditionVariable());
2078 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2079
2080 // Create basic block to hold stuff that comes after switch
2081 // statement. We also need to create a default block now so that
2082 // explicit case ranges tests can have a place to jump to on
2083 // failure.
2084 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2085 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2086 if (PGO.haveRegionCounts()) {
2087 // Walk the SwitchCase list to find how many there are.
2088 uint64_t DefaultCount = 0;
2089 unsigned NumCases = 0;
2090 for (const SwitchCase *Case = S.getSwitchCaseList();
2091 Case;
2092 Case = Case->getNextSwitchCase()) {
2093 if (isa<DefaultStmt>(Case))
2094 DefaultCount = getProfileCount(Case);
2095 NumCases += 1;
2096 }
2097 SwitchWeights = new SmallVector<uint64_t, 16>();
2098 SwitchWeights->reserve(NumCases);
2099 // The default needs to be first. We store the edge count, so we already
2100 // know the right weight.
2101 SwitchWeights->push_back(DefaultCount);
2102 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2103 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2104 // Initialize the default case.
2105 SwitchLikelihood->push_back(Stmt::LH_None);
2106 }
2107
2108 CaseRangeBlock = DefaultBlock;
2109
2110 // Clear the insertion point to indicate we are in unreachable code.
2111 Builder.ClearInsertionPoint();
2112
2113 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2114 // then reuse last ContinueBlock.
2115 JumpDest OuterContinue;
2116 if (!BreakContinueStack.empty())
2117 OuterContinue = BreakContinueStack.back().ContinueBlock;
2118
2119 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2120
2121 // Emit switch body.
2122 EmitStmt(S.getBody());
2123
2124 BreakContinueStack.pop_back();
2125
2126 // Update the default block in case explicit case range tests have
2127 // been chained on top.
2128 SwitchInsn->setDefaultDest(CaseRangeBlock);
2129
2130 // If a default was never emitted:
2131 if (!DefaultBlock->getParent()) {
2132 // If we have cleanups, emit the default block so that there's a
2133 // place to jump through the cleanups from.
2134 if (ConditionScope.requiresCleanups()) {
2135 EmitBlock(DefaultBlock);
2136
2137 // Otherwise, just forward the default block to the switch end.
2138 } else {
2139 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2140 delete DefaultBlock;
2141 }
2142 }
2143
2144 ConditionScope.ForceCleanup();
2145
2146 // Emit continuation.
2147 EmitBlock(SwitchExit.getBlock(), true);
2149
2150 // If the switch has a condition wrapped by __builtin_unpredictable,
2151 // create metadata that specifies that the switch is unpredictable.
2152 // Don't bother if not optimizing because that metadata would not be used.
2153 auto *Call = dyn_cast<CallExpr>(S.getCond());
2154 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2155 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2156 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2157 llvm::MDBuilder MDHelper(getLLVMContext());
2158 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2159 MDHelper.createUnpredictable());
2160 }
2161 }
2162
2163 if (SwitchWeights) {
2164 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2165 "switch weights do not match switch cases");
2166 // If there's only one jump destination there's no sense weighting it.
2167 if (SwitchWeights->size() > 1)
2168 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2169 createProfileWeights(*SwitchWeights));
2170 delete SwitchWeights;
2171 } else if (SwitchLikelihood) {
2172 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2173 "switch likelihoods do not match switch cases");
2174 std::optional<SmallVector<uint64_t, 16>> LHW =
2175 getLikelihoodWeights(*SwitchLikelihood);
2176 if (LHW) {
2177 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2178 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2179 createProfileWeights(*LHW));
2180 }
2181 delete SwitchLikelihood;
2182 }
2183 SwitchInsn = SavedSwitchInsn;
2184 SwitchWeights = SavedSwitchWeights;
2185 SwitchLikelihood = SavedSwitchLikelihood;
2186 CaseRangeBlock = SavedCRBlock;
2187}
2188
2189static std::string
2190SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2192 std::string Result;
2193
2194 while (*Constraint) {
2195 switch (*Constraint) {
2196 default:
2197 Result += Target.convertConstraint(Constraint);
2198 break;
2199 // Ignore these
2200 case '*':
2201 case '?':
2202 case '!':
2203 case '=': // Will see this and the following in mult-alt constraints.
2204 case '+':
2205 break;
2206 case '#': // Ignore the rest of the constraint alternative.
2207 while (Constraint[1] && Constraint[1] != ',')
2208 Constraint++;
2209 break;
2210 case '&':
2211 case '%':
2212 Result += *Constraint;
2213 while (Constraint[1] && Constraint[1] == *Constraint)
2214 Constraint++;
2215 break;
2216 case ',':
2217 Result += "|";
2218 break;
2219 case 'g':
2220 Result += "imr";
2221 break;
2222 case '[': {
2223 assert(OutCons &&
2224 "Must pass output names to constraints with a symbolic name");
2225 unsigned Index;
2226 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2227 assert(result && "Could not resolve symbolic name"); (void)result;
2228 Result += llvm::utostr(Index);
2229 break;
2230 }
2231 }
2232
2233 Constraint++;
2234 }
2235
2236 return Result;
2237}
2238
2239/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2240/// as using a particular register add that as a constraint that will be used
2241/// in this asm stmt.
2242static std::string
2243AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2245 const AsmStmt &Stmt, const bool EarlyClobber,
2246 std::string *GCCReg = nullptr) {
2247 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2248 if (!AsmDeclRef)
2249 return Constraint;
2250 const ValueDecl &Value = *AsmDeclRef->getDecl();
2251 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2252 if (!Variable)
2253 return Constraint;
2254 if (Variable->getStorageClass() != SC_Register)
2255 return Constraint;
2256 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2257 if (!Attr)
2258 return Constraint;
2259 StringRef Register = Attr->getLabel();
2260 assert(Target.isValidGCCRegisterName(Register));
2261 // We're using validateOutputConstraint here because we only care if
2262 // this is a register constraint.
2263 TargetInfo::ConstraintInfo Info(Constraint, "");
2264 if (Target.validateOutputConstraint(Info) &&
2265 !Info.allowsRegister()) {
2266 CGM.ErrorUnsupported(&Stmt, "__asm__");
2267 return Constraint;
2268 }
2269 // Canonicalize the register here before returning it.
2270 Register = Target.getNormalizedGCCRegisterName(Register);
2271 if (GCCReg != nullptr)
2272 *GCCReg = Register.str();
2273 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2274}
2275
2276std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2277 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2278 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2279 if (Info.allowsRegister() || !Info.allowsMemory()) {
2281 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2282
2283 llvm::Type *Ty = ConvertType(InputType);
2284 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2285 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2286 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2287 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2288
2289 return {
2290 Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
2291 nullptr};
2292 }
2293 }
2294
2295 Address Addr = InputValue.getAddress(*this);
2296 ConstraintStr += '*';
2297 return {InputValue.getPointer(*this), Addr.getElementType()};
2298}
2299
2300std::pair<llvm::Value *, llvm::Type *>
2301CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2302 const Expr *InputExpr,
2303 std::string &ConstraintStr) {
2304 // If this can't be a register or memory, i.e., has to be a constant
2305 // (immediate or symbolic), try to emit it as such.
2306 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2307 if (Info.requiresImmediateConstant()) {
2308 Expr::EvalResult EVResult;
2309 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2310
2311 llvm::APSInt IntResult;
2312 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2313 getContext()))
2314 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2315 }
2316
2318 if (InputExpr->EvaluateAsInt(Result, getContext()))
2319 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2320 nullptr};
2321 }
2322
2323 if (Info.allowsRegister() || !Info.allowsMemory())
2325 return {EmitScalarExpr(InputExpr), nullptr};
2326 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2327 return {EmitScalarExpr(InputExpr), nullptr};
2328 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2329 LValue Dest = EmitLValue(InputExpr);
2330 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2331 InputExpr->getExprLoc());
2332}
2333
2334/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2335/// asm call instruction. The !srcloc MDNode contains a list of constant
2336/// integers which are the source locations of the start of each line in the
2337/// asm.
2338static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2339 CodeGenFunction &CGF) {
2341 // Add the location of the first line to the MDNode.
2342 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2343 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2344 StringRef StrVal = Str->getString();
2345 if (!StrVal.empty()) {
2347 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2348 unsigned StartToken = 0;
2349 unsigned ByteOffset = 0;
2350
2351 // Add the location of the start of each subsequent line of the asm to the
2352 // MDNode.
2353 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2354 if (StrVal[i] != '\n') continue;
2355 SourceLocation LineLoc = Str->getLocationOfByte(
2356 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2357 Locs.push_back(llvm::ConstantAsMetadata::get(
2358 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2359 }
2360 }
2361
2362 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2363}
2364
2365static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2366 bool HasUnwindClobber, bool ReadOnly,
2367 bool ReadNone, bool NoMerge, const AsmStmt &S,
2368 const std::vector<llvm::Type *> &ResultRegTypes,
2369 const std::vector<llvm::Type *> &ArgElemTypes,
2370 CodeGenFunction &CGF,
2371 std::vector<llvm::Value *> &RegResults) {
2372 if (!HasUnwindClobber)
2373 Result.addFnAttr(llvm::Attribute::NoUnwind);
2374
2375 if (NoMerge)
2376 Result.addFnAttr(llvm::Attribute::NoMerge);
2377 // Attach readnone and readonly attributes.
2378 if (!HasSideEffect) {
2379 if (ReadNone)
2380 Result.setDoesNotAccessMemory();
2381 else if (ReadOnly)
2382 Result.setOnlyReadsMemory();
2383 }
2384
2385 // Add elementtype attribute for indirect constraints.
2386 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2387 if (Pair.value()) {
2388 auto Attr = llvm::Attribute::get(
2389 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2390 Result.addParamAttr(Pair.index(), Attr);
2391 }
2392 }
2393
2394 // Slap the source location of the inline asm into a !srcloc metadata on the
2395 // call.
2396 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2397 Result.setMetadata("srcloc",
2398 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2399 else {
2400 // At least put the line number on MS inline asm blobs.
2401 llvm::Constant *Loc =
2402 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2403 Result.setMetadata("srcloc",
2404 llvm::MDNode::get(CGF.getLLVMContext(),
2405 llvm::ConstantAsMetadata::get(Loc)));
2406 }
2407
2409 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2410 // convergent (meaning, they may call an intrinsically convergent op, such
2411 // as bar.sync, and so can't have certain optimizations applied around
2412 // them).
2413 Result.addFnAttr(llvm::Attribute::Convergent);
2414 // Extract all of the register value results from the asm.
2415 if (ResultRegTypes.size() == 1) {
2416 RegResults.push_back(&Result);
2417 } else {
2418 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2419 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2420 RegResults.push_back(Tmp);
2421 }
2422 }
2423}
2424
2425static void
2427 const llvm::ArrayRef<llvm::Value *> RegResults,
2428 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2429 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2430 const llvm::ArrayRef<LValue> ResultRegDests,
2431 const llvm::ArrayRef<QualType> ResultRegQualTys,
2432 const llvm::BitVector &ResultTypeRequiresCast,
2433 const llvm::BitVector &ResultRegIsFlagReg) {
2435 CodeGenModule &CGM = CGF.CGM;
2436 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2437
2438 assert(RegResults.size() == ResultRegTypes.size());
2439 assert(RegResults.size() == ResultTruncRegTypes.size());
2440 assert(RegResults.size() == ResultRegDests.size());
2441 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2442 // in which case its size may grow.
2443 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2444 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2445
2446 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2447 llvm::Value *Tmp = RegResults[i];
2448 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2449
2450 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2451 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2452 // value.
2453 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2454 llvm::Value *IsBooleanValue =
2455 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2456 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2457 Builder.CreateCall(FnAssume, IsBooleanValue);
2458 }
2459
2460 // If the result type of the LLVM IR asm doesn't match the result type of
2461 // the expression, do the conversion.
2462 if (ResultRegTypes[i] != TruncTy) {
2463
2464 // Truncate the integer result to the right size, note that TruncTy can be
2465 // a pointer.
2466 if (TruncTy->isFloatingPointTy())
2467 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2468 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2469 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2470 Tmp = Builder.CreateTrunc(
2471 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2472 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2473 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2474 uint64_t TmpSize =
2475 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2476 Tmp = Builder.CreatePtrToInt(
2477 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2478 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2479 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2480 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2481 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2482 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2483 }
2484 }
2485
2486 LValue Dest = ResultRegDests[i];
2487 // ResultTypeRequiresCast elements correspond to the first
2488 // ResultTypeRequiresCast.size() elements of RegResults.
2489 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2490 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2491 Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
2492 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2493 Builder.CreateStore(Tmp, A);
2494 continue;
2495 }
2496
2497 QualType Ty =
2498 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2499 if (Ty.isNull()) {
2500 const Expr *OutExpr = S.getOutputExpr(i);
2501 CGM.getDiags().Report(OutExpr->getExprLoc(),
2502 diag::err_store_value_to_reg);
2503 return;
2504 }
2505 Dest = CGF.MakeAddrLValue(A, Ty);
2506 }
2507 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2508 }
2509}
2510
2512 const AsmStmt &S) {
2513 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2514
2515 StringRef Asm;
2516 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2517 Asm = GCCAsm->getAsmString()->getString();
2518
2519 auto &Ctx = CGF->CGM.getLLVMContext();
2520
2521 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2522 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2523 {StrTy->getType()}, false);
2524 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2525
2526 CGF->Builder.CreateCall(UBF, {StrTy});
2527}
2528
2530 // Pop all cleanup blocks at the end of the asm statement.
2531 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2532
2533 // Assemble the final asm string.
2534 std::string AsmString = S.generateAsmString(getContext());
2535
2536 // Get all the output and input constraints together.
2537 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2538 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2539
2540 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2541 bool IsValidTargetAsm = true;
2542 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2543 StringRef Name;
2544 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2545 Name = GAS->getOutputName(i);
2546 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2547 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2548 if (IsHipStdPar && !IsValid)
2549 IsValidTargetAsm = false;
2550 else
2551 assert(IsValid && "Failed to parse output constraint");
2552 OutputConstraintInfos.push_back(Info);
2553 }
2554
2555 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2556 StringRef Name;
2557 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2558 Name = GAS->getInputName(i);
2559 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2560 bool IsValid =
2561 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2562 if (IsHipStdPar && !IsValid)
2563 IsValidTargetAsm = false;
2564 else
2565 assert(IsValid && "Failed to parse input constraint");
2566 InputConstraintInfos.push_back(Info);
2567 }
2568
2569 if (!IsValidTargetAsm)
2570 return EmitHipStdParUnsupportedAsm(this, S);
2571
2572 std::string Constraints;
2573
2574 std::vector<LValue> ResultRegDests;
2575 std::vector<QualType> ResultRegQualTys;
2576 std::vector<llvm::Type *> ResultRegTypes;
2577 std::vector<llvm::Type *> ResultTruncRegTypes;
2578 std::vector<llvm::Type *> ArgTypes;
2579 std::vector<llvm::Type *> ArgElemTypes;
2580 std::vector<llvm::Value*> Args;
2581 llvm::BitVector ResultTypeRequiresCast;
2582 llvm::BitVector ResultRegIsFlagReg;
2583
2584 // Keep track of inout constraints.
2585 std::string InOutConstraints;
2586 std::vector<llvm::Value*> InOutArgs;
2587 std::vector<llvm::Type*> InOutArgTypes;
2588 std::vector<llvm::Type*> InOutArgElemTypes;
2589
2590 // Keep track of out constraints for tied input operand.
2591 std::vector<std::string> OutputConstraints;
2592
2593 // Keep track of defined physregs.
2594 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2595
2596 // An inline asm can be marked readonly if it meets the following conditions:
2597 // - it doesn't have any sideeffects
2598 // - it doesn't clobber memory
2599 // - it doesn't return a value by-reference
2600 // It can be marked readnone if it doesn't have any input memory constraints
2601 // in addition to meeting the conditions listed above.
2602 bool ReadOnly = true, ReadNone = true;
2603
2604 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2605 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2606
2607 // Simplify the output constraint.
2608 std::string OutputConstraint(S.getOutputConstraint(i));
2609 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2610 getTarget(), &OutputConstraintInfos);
2611
2612 const Expr *OutExpr = S.getOutputExpr(i);
2613 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2614
2615 std::string GCCReg;
2616 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2617 getTarget(), CGM, S,
2618 Info.earlyClobber(),
2619 &GCCReg);
2620 // Give an error on multiple outputs to same physreg.
2621 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2622 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2623
2624 OutputConstraints.push_back(OutputConstraint);
2625 LValue Dest = EmitLValue(OutExpr);
2626 if (!Constraints.empty())
2627 Constraints += ',';
2628
2629 // If this is a register output, then make the inline asm return it
2630 // by-value. If this is a memory result, return the value by-reference.
2631 QualType QTy = OutExpr->getType();
2632 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2634 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2635
2636 Constraints += "=" + OutputConstraint;
2637 ResultRegQualTys.push_back(QTy);
2638 ResultRegDests.push_back(Dest);
2639
2640 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2641 ResultRegIsFlagReg.push_back(IsFlagReg);
2642
2643 llvm::Type *Ty = ConvertTypeForMem(QTy);
2644 const bool RequiresCast = Info.allowsRegister() &&
2646 Ty->isAggregateType());
2647
2648 ResultTruncRegTypes.push_back(Ty);
2649 ResultTypeRequiresCast.push_back(RequiresCast);
2650
2651 if (RequiresCast) {
2652 unsigned Size = getContext().getTypeSize(QTy);
2653 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2654 }
2655 ResultRegTypes.push_back(Ty);
2656 // If this output is tied to an input, and if the input is larger, then
2657 // we need to set the actual result type of the inline asm node to be the
2658 // same as the input type.
2659 if (Info.hasMatchingInput()) {
2660 unsigned InputNo;
2661 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2662 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2663 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2664 break;
2665 }
2666 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2667
2668 QualType InputTy = S.getInputExpr(InputNo)->getType();
2669 QualType OutputType = OutExpr->getType();
2670
2671 uint64_t InputSize = getContext().getTypeSize(InputTy);
2672 if (getContext().getTypeSize(OutputType) < InputSize) {
2673 // Form the asm to return the value as a larger integer or fp type.
2674 ResultRegTypes.back() = ConvertType(InputTy);
2675 }
2676 }
2677 if (llvm::Type* AdjTy =
2678 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2679 ResultRegTypes.back()))
2680 ResultRegTypes.back() = AdjTy;
2681 else {
2682 CGM.getDiags().Report(S.getAsmLoc(),
2683 diag::err_asm_invalid_type_in_input)
2684 << OutExpr->getType() << OutputConstraint;
2685 }
2686
2687 // Update largest vector width for any vector types.
2688 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2689 LargestVectorWidth =
2690 std::max((uint64_t)LargestVectorWidth,
2691 VT->getPrimitiveSizeInBits().getKnownMinValue());
2692 } else {
2693 Address DestAddr = Dest.getAddress(*this);
2694 // Matrix types in memory are represented by arrays, but accessed through
2695 // vector pointers, with the alignment specified on the access operation.
2696 // For inline assembly, update pointer arguments to use vector pointers.
2697 // Otherwise there will be a mis-match if the matrix is also an
2698 // input-argument which is represented as vector.
2699 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2700 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2701
2702 ArgTypes.push_back(DestAddr.getType());
2703 ArgElemTypes.push_back(DestAddr.getElementType());
2704 Args.push_back(DestAddr.emitRawPointer(*this));
2705 Constraints += "=*";
2706 Constraints += OutputConstraint;
2707 ReadOnly = ReadNone = false;
2708 }
2709
2710 if (Info.isReadWrite()) {
2711 InOutConstraints += ',';
2712
2713 const Expr *InputExpr = S.getOutputExpr(i);
2714 llvm::Value *Arg;
2715 llvm::Type *ArgElemType;
2716 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2717 Info, Dest, InputExpr->getType(), InOutConstraints,
2718 InputExpr->getExprLoc());
2719
2720 if (llvm::Type* AdjTy =
2721 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2722 Arg->getType()))
2723 Arg = Builder.CreateBitCast(Arg, AdjTy);
2724
2725 // Update largest vector width for any vector types.
2726 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2727 LargestVectorWidth =
2728 std::max((uint64_t)LargestVectorWidth,
2729 VT->getPrimitiveSizeInBits().getKnownMinValue());
2730 // Only tie earlyclobber physregs.
2731 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2732 InOutConstraints += llvm::utostr(i);
2733 else
2734 InOutConstraints += OutputConstraint;
2735
2736 InOutArgTypes.push_back(Arg->getType());
2737 InOutArgElemTypes.push_back(ArgElemType);
2738 InOutArgs.push_back(Arg);
2739 }
2740 }
2741
2742 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2743 // to the return value slot. Only do this when returning in registers.
2744 if (isa<MSAsmStmt>(&S)) {
2745 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2746 if (RetAI.isDirect() || RetAI.isExtend()) {
2747 // Make a fake lvalue for the return value slot.
2750 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2751 ResultRegDests, AsmString, S.getNumOutputs());
2752 SawAsmBlock = true;
2753 }
2754 }
2755
2756 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2757 const Expr *InputExpr = S.getInputExpr(i);
2758
2759 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2760
2761 if (Info.allowsMemory())
2762 ReadNone = false;
2763
2764 if (!Constraints.empty())
2765 Constraints += ',';
2766
2767 // Simplify the input constraint.
2768 std::string InputConstraint(S.getInputConstraint(i));
2769 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2770 &OutputConstraintInfos);
2771
2772 InputConstraint = AddVariableConstraints(
2773 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2774 getTarget(), CGM, S, false /* No EarlyClobber */);
2775
2776 std::string ReplaceConstraint (InputConstraint);
2777 llvm::Value *Arg;
2778 llvm::Type *ArgElemType;
2779 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2780
2781 // If this input argument is tied to a larger output result, extend the
2782 // input to be the same size as the output. The LLVM backend wants to see
2783 // the input and output of a matching constraint be the same size. Note
2784 // that GCC does not define what the top bits are here. We use zext because
2785 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2786 if (Info.hasTiedOperand()) {
2787 unsigned Output = Info.getTiedOperand();
2788 QualType OutputType = S.getOutputExpr(Output)->getType();
2789 QualType InputTy = InputExpr->getType();
2790
2791 if (getContext().getTypeSize(OutputType) >
2792 getContext().getTypeSize(InputTy)) {
2793 // Use ptrtoint as appropriate so that we can do our extension.
2794 if (isa<llvm::PointerType>(Arg->getType()))
2795 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2796 llvm::Type *OutputTy = ConvertType(OutputType);
2797 if (isa<llvm::IntegerType>(OutputTy))
2798 Arg = Builder.CreateZExt(Arg, OutputTy);
2799 else if (isa<llvm::PointerType>(OutputTy))
2800 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2801 else if (OutputTy->isFloatingPointTy())
2802 Arg = Builder.CreateFPExt(Arg, OutputTy);
2803 }
2804 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2805 ReplaceConstraint = OutputConstraints[Output];
2806 }
2807 if (llvm::Type* AdjTy =
2808 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2809 Arg->getType()))
2810 Arg = Builder.CreateBitCast(Arg, AdjTy);
2811 else
2812 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2813 << InputExpr->getType() << InputConstraint;
2814
2815 // Update largest vector width for any vector types.
2816 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2817 LargestVectorWidth =
2818 std::max((uint64_t)LargestVectorWidth,
2819 VT->getPrimitiveSizeInBits().getKnownMinValue());
2820
2821 ArgTypes.push_back(Arg->getType());
2822 ArgElemTypes.push_back(ArgElemType);
2823 Args.push_back(Arg);
2824 Constraints += InputConstraint;
2825 }
2826
2827 // Append the "input" part of inout constraints.
2828 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2829 ArgTypes.push_back(InOutArgTypes[i]);
2830 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2831 Args.push_back(InOutArgs[i]);
2832 }
2833 Constraints += InOutConstraints;
2834
2835 // Labels
2837 llvm::BasicBlock *Fallthrough = nullptr;
2838 bool IsGCCAsmGoto = false;
2839 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2840 IsGCCAsmGoto = GS->isAsmGoto();
2841 if (IsGCCAsmGoto) {
2842 for (const auto *E : GS->labels()) {
2843 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2844 Transfer.push_back(Dest.getBlock());
2845 if (!Constraints.empty())
2846 Constraints += ',';
2847 Constraints += "!i";
2848 }
2849 Fallthrough = createBasicBlock("asm.fallthrough");
2850 }
2851 }
2852
2853 bool HasUnwindClobber = false;
2854
2855 // Clobbers
2856 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2857 StringRef Clobber = S.getClobber(i);
2858
2859 if (Clobber == "memory")
2860 ReadOnly = ReadNone = false;
2861 else if (Clobber == "unwind") {
2862 HasUnwindClobber = true;
2863 continue;
2864 } else if (Clobber != "cc") {
2865 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2866 if (CGM.getCodeGenOpts().StackClashProtector &&
2867 getTarget().isSPRegName(Clobber)) {
2868 CGM.getDiags().Report(S.getAsmLoc(),
2869 diag::warn_stack_clash_protection_inline_asm);
2870 }
2871 }
2872
2873 if (isa<MSAsmStmt>(&S)) {
2874 if (Clobber == "eax" || Clobber == "edx") {
2875 if (Constraints.find("=&A") != std::string::npos)
2876 continue;
2877 std::string::size_type position1 =
2878 Constraints.find("={" + Clobber.str() + "}");
2879 if (position1 != std::string::npos) {
2880 Constraints.insert(position1 + 1, "&");
2881 continue;
2882 }
2883 std::string::size_type position2 = Constraints.find("=A");
2884 if (position2 != std::string::npos) {
2885 Constraints.insert(position2 + 1, "&");
2886 continue;
2887 }
2888 }
2889 }
2890 if (!Constraints.empty())
2891 Constraints += ',';
2892
2893 Constraints += "~{";
2894 Constraints += Clobber;
2895 Constraints += '}';
2896 }
2897
2898 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2899 "unwind clobber can't be used with asm goto");
2900
2901 // Add machine specific clobbers
2902 std::string_view MachineClobbers = getTarget().getClobbers();
2903 if (!MachineClobbers.empty()) {
2904 if (!Constraints.empty())
2905 Constraints += ',';
2906 Constraints += MachineClobbers;
2907 }
2908
2909 llvm::Type *ResultType;
2910 if (ResultRegTypes.empty())
2911 ResultType = VoidTy;
2912 else if (ResultRegTypes.size() == 1)
2913 ResultType = ResultRegTypes[0];
2914 else
2915 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2916
2917 llvm::FunctionType *FTy =
2918 llvm::FunctionType::get(ResultType, ArgTypes, false);
2919
2920 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2921
2922 llvm::InlineAsm::AsmDialect GnuAsmDialect =
2923 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2924 ? llvm::InlineAsm::AD_ATT
2925 : llvm::InlineAsm::AD_Intel;
2926 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2927 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2928
2929 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2930 FTy, AsmString, Constraints, HasSideEffect,
2931 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2932 std::vector<llvm::Value*> RegResults;
2933 llvm::CallBrInst *CBR;
2934 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
2935 CBRRegResults;
2936 if (IsGCCAsmGoto) {
2937 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2938 EmitBlock(Fallthrough);
2939 UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
2940 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2941 *this, RegResults);
2942 // Because we are emitting code top to bottom, we don't have enough
2943 // information at this point to know precisely whether we have a critical
2944 // edge. If we have outputs, split all indirect destinations.
2945 if (!RegResults.empty()) {
2946 unsigned i = 0;
2947 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
2948 llvm::Twine SynthName = Dest->getName() + ".split";
2949 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
2950 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2951 Builder.SetInsertPoint(SynthBB);
2952
2953 if (ResultRegTypes.size() == 1) {
2954 CBRRegResults[SynthBB].push_back(CBR);
2955 } else {
2956 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
2957 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
2958 CBRRegResults[SynthBB].push_back(Tmp);
2959 }
2960 }
2961
2962 EmitBranch(Dest);
2963 EmitBlock(SynthBB);
2964 CBR->setIndirectDest(i++, SynthBB);
2965 }
2966 }
2967 } else if (HasUnwindClobber) {
2968 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2969 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2970 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2971 *this, RegResults);
2972 } else {
2973 llvm::CallInst *Result =
2974 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2975 UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
2976 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2977 *this, RegResults);
2978 }
2979
2980 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
2981 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
2982 ResultRegIsFlagReg);
2983
2984 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
2985 // different insertion point; one for each indirect destination and with
2986 // CBRRegResults rather than RegResults.
2987 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
2988 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
2989 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2990 Builder.SetInsertPoint(Succ, --(Succ->end()));
2991 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
2992 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
2993 ResultTypeRequiresCast, ResultRegIsFlagReg);
2994 }
2995 }
2996}
2997
2999 const RecordDecl *RD = S.getCapturedRecordDecl();
3000 QualType RecordTy = getContext().getRecordType(RD);
3001
3002 // Initialize the captured struct.
3003 LValue SlotLV =
3004 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3005
3006 RecordDecl::field_iterator CurField = RD->field_begin();
3007 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3008 E = S.capture_init_end();
3009 I != E; ++I, ++CurField) {
3010 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3011 if (CurField->hasCapturedVLAType()) {
3012 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3013 } else {
3014 EmitInitializerForField(*CurField, LV, *I);
3015 }
3016 }
3017
3018 return SlotLV;
3019}
3020
3021/// Generate an outlined function for the body of a CapturedStmt, store any
3022/// captured variables into the captured struct, and call the outlined function.
3023llvm::Function *
3025 LValue CapStruct = InitCapturedStruct(S);
3026
3027 // Emit the CapturedDecl
3028 CodeGenFunction CGF(CGM, true);
3029 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3030 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3031 delete CGF.CapturedStmtInfo;
3032
3033 // Emit call to the helper function.
3034 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3035
3036 return F;
3037}
3038
3040 LValue CapStruct = InitCapturedStruct(S);
3041 return CapStruct.getAddress(*this);
3042}
3043
3044/// Creates the outlined function for a CapturedStmt.
3045llvm::Function *
3047 assert(CapturedStmtInfo &&
3048 "CapturedStmtInfo should be set when generating the captured function");
3049 const CapturedDecl *CD = S.getCapturedDecl();
3050 const RecordDecl *RD = S.getCapturedRecordDecl();
3051 SourceLocation Loc = S.getBeginLoc();
3052 assert(CD->hasBody() && "missing CapturedDecl body");
3053
3054 // Build the argument list.
3055 ASTContext &Ctx = CGM.getContext();
3056 FunctionArgList Args;
3057 Args.append(CD->param_begin(), CD->param_end());
3058
3059 // Create the function declaration.
3060 const CGFunctionInfo &FuncInfo =
3062 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3063
3064 llvm::Function *F =
3065 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3067 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3068 if (CD->isNothrow())
3069 F->addFnAttr(llvm::Attribute::NoUnwind);
3070
3071 // Generate the function.
3072 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3073 CD->getBody()->getBeginLoc());
3074 // Set the context parameter in CapturedStmtInfo.
3075 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3077
3078 // Initialize variable-length arrays.
3081 for (auto *FD : RD->fields()) {
3082 if (FD->hasCapturedVLAType()) {
3083 auto *ExprArg =
3084 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3085 .getScalarVal();
3086 auto VAT = FD->getCapturedVLAType();
3087 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3088 }
3089 }
3090
3091 // If 'this' is captured, load it into CXXThisValue.
3094 LValue ThisLValue = EmitLValueForField(Base, FD);
3095 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3096 }
3097
3098 PGO.assignRegionCounters(GlobalDecl(CD), F);
3099 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3101
3102 return F;
3103}
#define V(N, I)
Definition: ASTContext.h:3273
#define SM(sm)
Definition: Cuda.cpp:82
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2243
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:1914
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2511
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:1968
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2338
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2190
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1345
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2365
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1759
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2426
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1758
@ CSFC_Failure
Definition: CGStmt.cpp:1758
@ CSFC_Success
Definition: CGStmt.cpp:1758
@ CSFC_FallThrough
Definition: CGStmt.cpp:1758
llvm::MachO::Target Target
Definition: MachO.h:48
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:954
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
SourceManager & getSourceManager()
Definition: ASTContext.h:705
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2329
CanQualType VoidTy
Definition: ASTContext.h:1091
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3100
Attr - This represents one attribute.
Definition: Attr.h:42
Represents an attribute applied to a statement.
Definition: Stmt.h:2080
BreakStmt - This represents a break.
Definition: Stmt.h:2980
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2820
Expr * getCallee()
Definition: Expr.h:2970
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4687
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4749
bool isNothrow() const
Definition: Decl.cpp:5444
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4766
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4764
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5441
This captures a statement into a function.
Definition: Stmt.h:3757
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3921
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1422
CaseStmt - Represent a case statement.
Definition: Stmt.h:1801
Stmt * getSubStmt()
Definition: Stmt.h:1918
Expr * getLHS()
Definition: Stmt.h:1888
Expr * getRHS()
Definition: Stmt.h:1900
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
bool isValid() const
Definition: Address.h:154
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:176
An aggregate value slot.
Definition: CGValue.h:512
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:881
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:164
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:680
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
bool checkIfLoopMustProgress(bool HasConstantCond)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1627
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:680
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:352
LValue - This represents an lvalue references.
Definition: CGValue.h:181
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:370
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:825
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:41
bool isScalar() const
Definition: CGValue.h:63
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
bool isAggregate() const
Definition: CGValue.h:65
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:82
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:77
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:184
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:178
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1606
Stmt *const * const_body_iterator
Definition: Stmt.h:1673
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
ContinueStmt - This represents a continue.
Definition: Stmt.h:2950
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2344
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
ValueDecl * getDecl()
Definition: Expr.h:1328
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1497
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1029
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1085
SourceLocation getLocation() const
Definition: DeclBase.h:447
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2725
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3086
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3055
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3556
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3058
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2781
const Expr * getSubExpr() const
Definition: Expr.h:1052
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4046
CallingConv getCallConv() const
Definition: Type.h:4374
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3259
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2862
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2138
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2901
Represents the declaration of a label.
Definition: Decl.h:499
LabelStmt * getStmt() const
Definition: Decl.h:523
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2031
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:454
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:635
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:738
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:805
QualType getCanonicalType() const
Definition: Type.h:7201
The collection of all-type qualifiers we support.
Definition: Type.h:148
Represents a struct/union/class.
Definition: Decl.h:4169
field_range fields() const
Definition: Decl.h:4375
field_iterator field_begin() const
Definition: Decl.cpp:5070
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3019
Expr * getRetValue()
Definition: Stmt.h:3050
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1358
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1301
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1302
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1303
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1305
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:163
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:155
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1773
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1954
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1329
StringRef getString() const
Definition: Expr.h:1850
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1774
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2388
Exposes information about the current target.
Definition: TargetInfo.h:213
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:812
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:674
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:715
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition: Type.h:7695
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7980
bool isReferenceType() const
Definition: Type.h:7414
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:694
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:706
Represents a variable declaration or definition.
Definition: Decl.h:918
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2584
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:419
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:254
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:291
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1104
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1111