clang 19.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPForDirectiveClass:
226 EmitOMPForDirective(cast<OMPForDirective>(*S));
227 break;
228 case Stmt::OMPForSimdDirectiveClass:
229 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
230 break;
231 case Stmt::OMPSectionsDirectiveClass:
232 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
233 break;
234 case Stmt::OMPSectionDirectiveClass:
235 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
236 break;
237 case Stmt::OMPSingleDirectiveClass:
238 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
239 break;
240 case Stmt::OMPMasterDirectiveClass:
241 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
242 break;
243 case Stmt::OMPCriticalDirectiveClass:
244 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
245 break;
246 case Stmt::OMPParallelForDirectiveClass:
247 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
248 break;
249 case Stmt::OMPParallelForSimdDirectiveClass:
250 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
251 break;
252 case Stmt::OMPParallelMasterDirectiveClass:
253 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
254 break;
255 case Stmt::OMPParallelSectionsDirectiveClass:
256 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
257 break;
258 case Stmt::OMPTaskDirectiveClass:
259 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
260 break;
261 case Stmt::OMPTaskyieldDirectiveClass:
262 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
263 break;
264 case Stmt::OMPErrorDirectiveClass:
265 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
266 break;
267 case Stmt::OMPBarrierDirectiveClass:
268 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
269 break;
270 case Stmt::OMPTaskwaitDirectiveClass:
271 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
272 break;
273 case Stmt::OMPTaskgroupDirectiveClass:
274 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
275 break;
276 case Stmt::OMPFlushDirectiveClass:
277 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
278 break;
279 case Stmt::OMPDepobjDirectiveClass:
280 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
281 break;
282 case Stmt::OMPScanDirectiveClass:
283 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
284 break;
285 case Stmt::OMPOrderedDirectiveClass:
286 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
287 break;
288 case Stmt::OMPAtomicDirectiveClass:
289 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
290 break;
291 case Stmt::OMPTargetDirectiveClass:
292 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
293 break;
294 case Stmt::OMPTeamsDirectiveClass:
295 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
296 break;
297 case Stmt::OMPCancellationPointDirectiveClass:
298 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
299 break;
300 case Stmt::OMPCancelDirectiveClass:
301 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
302 break;
303 case Stmt::OMPTargetDataDirectiveClass:
304 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
305 break;
306 case Stmt::OMPTargetEnterDataDirectiveClass:
307 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
308 break;
309 case Stmt::OMPTargetExitDataDirectiveClass:
310 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetParallelDirectiveClass:
313 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
314 break;
315 case Stmt::OMPTargetParallelForDirectiveClass:
316 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
317 break;
318 case Stmt::OMPTaskLoopDirectiveClass:
319 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
320 break;
321 case Stmt::OMPTaskLoopSimdDirectiveClass:
322 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
323 break;
324 case Stmt::OMPMasterTaskLoopDirectiveClass:
325 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPMaskedTaskLoopDirectiveClass:
328 llvm_unreachable("masked taskloop directive not supported yet.");
329 break;
330 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
332 cast<OMPMasterTaskLoopSimdDirective>(*S));
333 break;
334 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
335 llvm_unreachable("masked taskloop simd directive not supported yet.");
336 break;
337 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
339 cast<OMPParallelMasterTaskLoopDirective>(*S));
340 break;
341 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
342 llvm_unreachable("parallel masked taskloop directive not supported yet.");
343 break;
344 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
346 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
347 break;
348 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
349 llvm_unreachable(
350 "parallel masked taskloop simd directive not supported yet.");
351 break;
352 case Stmt::OMPDistributeDirectiveClass:
353 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
354 break;
355 case Stmt::OMPTargetUpdateDirectiveClass:
356 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
357 break;
358 case Stmt::OMPDistributeParallelForDirectiveClass:
360 cast<OMPDistributeParallelForDirective>(*S));
361 break;
362 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
364 cast<OMPDistributeParallelForSimdDirective>(*S));
365 break;
366 case Stmt::OMPDistributeSimdDirectiveClass:
367 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
368 break;
369 case Stmt::OMPTargetParallelForSimdDirectiveClass:
371 cast<OMPTargetParallelForSimdDirective>(*S));
372 break;
373 case Stmt::OMPTargetSimdDirectiveClass:
374 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
375 break;
376 case Stmt::OMPTeamsDistributeDirectiveClass:
377 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
378 break;
379 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
381 cast<OMPTeamsDistributeSimdDirective>(*S));
382 break;
383 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
385 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
386 break;
387 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
389 cast<OMPTeamsDistributeParallelForDirective>(*S));
390 break;
391 case Stmt::OMPTargetTeamsDirectiveClass:
392 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
393 break;
394 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
396 cast<OMPTargetTeamsDistributeDirective>(*S));
397 break;
398 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
400 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
401 break;
402 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
404 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
405 break;
406 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
408 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
409 break;
410 case Stmt::OMPInteropDirectiveClass:
411 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
412 break;
413 case Stmt::OMPDispatchDirectiveClass:
414 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
415 break;
416 case Stmt::OMPScopeDirectiveClass:
417 llvm_unreachable("scope not supported with FE outlining");
418 case Stmt::OMPMaskedDirectiveClass:
419 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
420 break;
421 case Stmt::OMPGenericLoopDirectiveClass:
422 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
423 break;
424 case Stmt::OMPTeamsGenericLoopDirectiveClass:
425 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
426 break;
427 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
429 cast<OMPTargetTeamsGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPParallelGenericLoopDirectiveClass:
433 cast<OMPParallelGenericLoopDirective>(*S));
434 break;
435 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
437 cast<OMPTargetParallelGenericLoopDirective>(*S));
438 break;
439 case Stmt::OMPParallelMaskedDirectiveClass:
440 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
441 break;
442 case Stmt::OpenACCComputeConstructClass:
443 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
444 break;
445 }
446}
447
450 switch (S->getStmtClass()) {
451 default:
452 return false;
453 case Stmt::NullStmtClass:
454 break;
455 case Stmt::CompoundStmtClass:
456 EmitCompoundStmt(cast<CompoundStmt>(*S));
457 break;
458 case Stmt::DeclStmtClass:
459 EmitDeclStmt(cast<DeclStmt>(*S));
460 break;
461 case Stmt::LabelStmtClass:
462 EmitLabelStmt(cast<LabelStmt>(*S));
463 break;
464 case Stmt::AttributedStmtClass:
465 EmitAttributedStmt(cast<AttributedStmt>(*S));
466 break;
467 case Stmt::GotoStmtClass:
468 EmitGotoStmt(cast<GotoStmt>(*S));
469 break;
470 case Stmt::BreakStmtClass:
471 EmitBreakStmt(cast<BreakStmt>(*S));
472 break;
473 case Stmt::ContinueStmtClass:
474 EmitContinueStmt(cast<ContinueStmt>(*S));
475 break;
476 case Stmt::DefaultStmtClass:
477 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
478 break;
479 case Stmt::CaseStmtClass:
480 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
481 break;
482 case Stmt::SEHLeaveStmtClass:
483 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
484 break;
485 }
486 return true;
487}
488
489/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
490/// this captures the expression result of the last sub-statement and returns it
491/// (for use by the statement expression extension).
493 AggValueSlot AggSlot) {
494 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
495 "LLVM IR generation of compound statement ('{}')");
496
497 // Keep track of the current cleanup stack depth, including debug scopes.
498 LexicalScope Scope(*this, S.getSourceRange());
499
500 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
501}
502
505 bool GetLast,
506 AggValueSlot AggSlot) {
507
508 const Stmt *ExprResult = S.getStmtExprResult();
509 assert((!GetLast || (GetLast && ExprResult)) &&
510 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
511
512 Address RetAlloca = Address::invalid();
513
514 for (auto *CurStmt : S.body()) {
515 if (GetLast && ExprResult == CurStmt) {
516 // We have to special case labels here. They are statements, but when put
517 // at the end of a statement expression, they yield the value of their
518 // subexpression. Handle this by walking through all labels we encounter,
519 // emitting them before we evaluate the subexpr.
520 // Similar issues arise for attributed statements.
521 while (!isa<Expr>(ExprResult)) {
522 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
523 EmitLabel(LS->getDecl());
524 ExprResult = LS->getSubStmt();
525 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
526 // FIXME: Update this if we ever have attributes that affect the
527 // semantics of an expression.
528 ExprResult = AS->getSubStmt();
529 } else {
530 llvm_unreachable("unknown value statement");
531 }
532 }
533
535
536 const Expr *E = cast<Expr>(ExprResult);
537 QualType ExprTy = E->getType();
538 if (hasAggregateEvaluationKind(ExprTy)) {
539 EmitAggExpr(E, AggSlot);
540 } else {
541 // We can't return an RValue here because there might be cleanups at
542 // the end of the StmtExpr. Because of that, we have to emit the result
543 // here into a temporary alloca.
544 RetAlloca = CreateMemTemp(ExprTy);
545 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
546 /*IsInit*/ false);
547 }
548 } else {
549 EmitStmt(CurStmt);
550 }
551 }
552
553 return RetAlloca;
554}
555
556void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
557 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
558
559 // If there is a cleanup stack, then we it isn't worth trying to
560 // simplify this block (we would need to remove it from the scope map
561 // and cleanup entry).
562 if (!EHStack.empty())
563 return;
564
565 // Can only simplify direct branches.
566 if (!BI || !BI->isUnconditional())
567 return;
568
569 // Can only simplify empty blocks.
570 if (BI->getIterator() != BB->begin())
571 return;
572
573 BB->replaceAllUsesWith(BI->getSuccessor(0));
574 BI->eraseFromParent();
575 BB->eraseFromParent();
576}
577
578void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
579 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
580
581 // Fall out of the current block (if necessary).
582 EmitBranch(BB);
583
584 if (IsFinished && BB->use_empty()) {
585 delete BB;
586 return;
587 }
588
589 // Place the block after the current block, if possible, or else at
590 // the end of the function.
591 if (CurBB && CurBB->getParent())
592 CurFn->insert(std::next(CurBB->getIterator()), BB);
593 else
594 CurFn->insert(CurFn->end(), BB);
595 Builder.SetInsertPoint(BB);
596}
597
598void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
599 // Emit a branch from the current block to the target one if this
600 // was a real block. If this was just a fall-through block after a
601 // terminator, don't emit it.
602 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
603
604 if (!CurBB || CurBB->getTerminator()) {
605 // If there is no insert point or the previous block is already
606 // terminated, don't touch it.
607 } else {
608 // Otherwise, create a fall-through branch.
609 Builder.CreateBr(Target);
610 }
611
612 Builder.ClearInsertionPoint();
613}
614
615void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
616 bool inserted = false;
617 for (llvm::User *u : block->users()) {
618 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
619 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
620 inserted = true;
621 break;
622 }
623 }
624
625 if (!inserted)
626 CurFn->insert(CurFn->end(), block);
627
628 Builder.SetInsertPoint(block);
629}
630
631CodeGenFunction::JumpDest
633 JumpDest &Dest = LabelMap[D];
634 if (Dest.isValid()) return Dest;
635
636 // Create, but don't insert, the new block.
637 Dest = JumpDest(createBasicBlock(D->getName()),
640 return Dest;
641}
642
644 // Add this label to the current lexical scope if we're within any
645 // normal cleanups. Jumps "in" to this label --- when permitted by
646 // the language --- may need to be routed around such cleanups.
647 if (EHStack.hasNormalCleanups() && CurLexicalScope)
648 CurLexicalScope->addLabel(D);
649
650 JumpDest &Dest = LabelMap[D];
651
652 // If we didn't need a forward reference to this label, just go
653 // ahead and create a destination at the current scope.
654 if (!Dest.isValid()) {
656
657 // Otherwise, we need to give this label a target depth and remove
658 // it from the branch-fixups list.
659 } else {
660 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
661 Dest.setScopeDepth(EHStack.stable_begin());
662 ResolveBranchFixups(Dest.getBlock());
663 }
664
665 EmitBlock(Dest.getBlock());
666
667 // Emit debug info for labels.
668 if (CGDebugInfo *DI = getDebugInfo()) {
670 DI->setLocation(D->getLocation());
671 DI->EmitLabel(D, Builder);
672 }
673 }
674
676}
677
678/// Change the cleanup scope of the labels in this lexical scope to
679/// match the scope of the enclosing context.
681 assert(!Labels.empty());
682 EHScopeStack::stable_iterator innermostScope
684
685 // Change the scope depth of all the labels.
687 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
688 assert(CGF.LabelMap.count(*i));
689 JumpDest &dest = CGF.LabelMap.find(*i)->second;
690 assert(dest.getScopeDepth().isValid());
691 assert(innermostScope.encloses(dest.getScopeDepth()));
692 dest.setScopeDepth(innermostScope);
693 }
694
695 // Reparent the labels if the new scope also has cleanups.
696 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
697 ParentScope->Labels.append(Labels.begin(), Labels.end());
698 }
699}
700
701
703 EmitLabel(S.getDecl());
704
705 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
706 if (getLangOpts().EHAsynch && S.isSideEntry())
708
709 EmitStmt(S.getSubStmt());
710}
711
713 bool nomerge = false;
714 bool noinline = false;
715 bool alwaysinline = false;
716 const CallExpr *musttail = nullptr;
717
718 for (const auto *A : S.getAttrs()) {
719 switch (A->getKind()) {
720 default:
721 break;
722 case attr::NoMerge:
723 nomerge = true;
724 break;
725 case attr::NoInline:
726 noinline = true;
727 break;
728 case attr::AlwaysInline:
729 alwaysinline = true;
730 break;
731 case attr::MustTail: {
732 const Stmt *Sub = S.getSubStmt();
733 const ReturnStmt *R = cast<ReturnStmt>(Sub);
734 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
735 } break;
736 case attr::CXXAssume: {
737 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
738 if (getLangOpts().CXXAssumptions &&
739 !Assumption->HasSideEffects(getContext())) {
740 llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption);
741 Builder.CreateAssumption(AssumptionVal);
742 }
743 } break;
744 }
745 }
746 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
747 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
748 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
749 SaveAndRestore save_musttail(MustTailCall, musttail);
750 EmitStmt(S.getSubStmt(), S.getAttrs());
751}
752
754 // If this code is reachable then emit a stop point (if generating
755 // debug info). We have to do this ourselves because we are on the
756 // "simple" statement path.
757 if (HaveInsertPoint())
758 EmitStopPoint(&S);
759
761}
762
763
765 if (const LabelDecl *Target = S.getConstantTarget()) {
767 return;
768 }
769
770 // Ensure that we have an i8* for our PHI node.
771 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
772 Int8PtrTy, "addr");
773 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
774
775 // Get the basic block for the indirect goto.
776 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
777
778 // The first instruction in the block has to be the PHI for the switch dest,
779 // add an entry for this branch.
780 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
781
782 EmitBranch(IndGotoBB);
783}
784
785void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
786 // The else branch of a consteval if statement is always the only branch that
787 // can be runtime evaluated.
788 if (S.isConsteval()) {
789 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
790 if (Executed) {
791 RunCleanupsScope ExecutedScope(*this);
792 EmitStmt(Executed);
793 }
794 return;
795 }
796
797 // C99 6.8.4.1: The first substatement is executed if the expression compares
798 // unequal to 0. The condition must be a scalar type.
799 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
800
801 if (S.getInit())
802 EmitStmt(S.getInit());
803
804 if (S.getConditionVariable())
805 EmitDecl(*S.getConditionVariable());
806
807 // If the condition constant folds and can be elided, try to avoid emitting
808 // the condition and the dead arm of the if/else.
809 bool CondConstant;
810 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
811 S.isConstexpr())) {
812 // Figure out which block (then or else) is executed.
813 const Stmt *Executed = S.getThen();
814 const Stmt *Skipped = S.getElse();
815 if (!CondConstant) // Condition false?
816 std::swap(Executed, Skipped);
817
818 // If the skipped block has no labels in it, just emit the executed block.
819 // This avoids emitting dead code and simplifies the CFG substantially.
820 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
821 if (CondConstant)
823 if (Executed) {
824 RunCleanupsScope ExecutedScope(*this);
825 EmitStmt(Executed);
826 }
827 return;
828 }
829 }
830
831 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
832 // the conditional branch.
833 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
834 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
835 llvm::BasicBlock *ElseBlock = ContBlock;
836 if (S.getElse())
837 ElseBlock = createBasicBlock("if.else");
838
839 // Prefer the PGO based weights over the likelihood attribute.
840 // When the build isn't optimized the metadata isn't used, so don't generate
841 // it.
842 // Also, differentiate between disabled PGO and a never executed branch with
843 // PGO. Assuming PGO is in use:
844 // - we want to ignore the [[likely]] attribute if the branch is never
845 // executed,
846 // - assuming the profile is poor, preserving the attribute may still be
847 // beneficial.
848 // As an approximation, preserve the attribute only if both the branch and the
849 // parent context were not executed.
851 uint64_t ThenCount = getProfileCount(S.getThen());
852 if (!ThenCount && !getCurrentProfileCount() &&
853 CGM.getCodeGenOpts().OptimizationLevel)
854 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
855
856 // When measuring MC/DC, always fully evaluate the condition up front using
857 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
858 // executing the body of the if.then or if.else. This is useful for when
859 // there is a 'return' within the body, but this is particularly beneficial
860 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
861 // updates are kept linear and consistent.
862 if (!CGM.getCodeGenOpts().MCDCCoverage)
863 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
864 else {
865 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
866 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
867 }
868
869 // Emit the 'then' code.
870 EmitBlock(ThenBlock);
872 incrementProfileCounter(S.getThen());
873 else
875 {
876 RunCleanupsScope ThenScope(*this);
877 EmitStmt(S.getThen());
878 }
879 EmitBranch(ContBlock);
880
881 // Emit the 'else' code if present.
882 if (const Stmt *Else = S.getElse()) {
883 {
884 // There is no need to emit line number for an unconditional branch.
885 auto NL = ApplyDebugLocation::CreateEmpty(*this);
886 EmitBlock(ElseBlock);
887 }
888 // When single byte coverage mode is enabled, add a counter to else block.
891 {
892 RunCleanupsScope ElseScope(*this);
893 EmitStmt(Else);
894 }
895 {
896 // There is no need to emit line number for an unconditional branch.
897 auto NL = ApplyDebugLocation::CreateEmpty(*this);
898 EmitBranch(ContBlock);
899 }
900 }
901
902 // Emit the continuation block for code after the if.
903 EmitBlock(ContBlock, true);
904
905 // When single byte coverage mode is enabled, add a counter to continuation
906 // block.
909}
910
912 ArrayRef<const Attr *> WhileAttrs) {
913 // Emit the header for the loop, which will also become
914 // the continue target.
915 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
916 EmitBlock(LoopHeader.getBlock());
917
918 // Create an exit block for when the condition fails, which will
919 // also become the break target.
920 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
921
922 // Store the blocks to use for break and continue.
923 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
924
925 // C++ [stmt.while]p2:
926 // When the condition of a while statement is a declaration, the
927 // scope of the variable that is declared extends from its point
928 // of declaration (3.3.2) to the end of the while statement.
929 // [...]
930 // The object created in a condition is destroyed and created
931 // with each iteration of the loop.
932 RunCleanupsScope ConditionScope(*this);
933
934 if (S.getConditionVariable())
935 EmitDecl(*S.getConditionVariable());
936
937 // Evaluate the conditional in the while header. C99 6.8.5.1: The
938 // evaluation of the controlling expression takes place before each
939 // execution of the loop body.
940 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
941
942 // while(1) is common, avoid extra exit blocks. Be sure
943 // to correctly handle break/continue though.
944 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
945 bool CondIsConstInt = C != nullptr;
946 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
947 const SourceRange &R = S.getSourceRange();
948 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
949 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
951 checkIfLoopMustProgress(CondIsConstInt));
952
953 // When single byte coverage mode is enabled, add a counter to loop condition.
955 incrementProfileCounter(S.getCond());
956
957 // As long as the condition is true, go to the loop body.
958 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
959 if (EmitBoolCondBranch) {
960 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
961 if (ConditionScope.requiresCleanups())
962 ExitBlock = createBasicBlock("while.exit");
963 llvm::MDNode *Weights =
964 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
965 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
966 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
967 BoolCondVal, Stmt::getLikelihood(S.getBody()));
968 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
969
970 if (ExitBlock != LoopExit.getBlock()) {
971 EmitBlock(ExitBlock);
973 }
974 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
975 CGM.getDiags().Report(A->getLocation(),
976 diag::warn_attribute_has_no_effect_on_infinite_loop)
977 << A << A->getRange();
979 S.getWhileLoc(),
980 diag::note_attribute_has_no_effect_on_infinite_loop_here)
981 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
982 }
983
984 // Emit the loop body. We have to emit this in a cleanup scope
985 // because it might be a singleton DeclStmt.
986 {
987 RunCleanupsScope BodyScope(*this);
988 EmitBlock(LoopBody);
989 // When single byte coverage mode is enabled, add a counter to the body.
991 incrementProfileCounter(S.getBody());
992 else
994 EmitStmt(S.getBody());
995 }
996
997 BreakContinueStack.pop_back();
998
999 // Immediately force cleanup.
1000 ConditionScope.ForceCleanup();
1001
1002 EmitStopPoint(&S);
1003 // Branch to the loop header again.
1004 EmitBranch(LoopHeader.getBlock());
1005
1006 LoopStack.pop();
1007
1008 // Emit the exit block.
1009 EmitBlock(LoopExit.getBlock(), true);
1010
1011 // The LoopHeader typically is just a branch if we skipped emitting
1012 // a branch, try to erase it.
1013 if (!EmitBoolCondBranch)
1014 SimplifyForwardingBlocks(LoopHeader.getBlock());
1015
1016 // When single byte coverage mode is enabled, add a counter to continuation
1017 // block.
1020}
1021
1023 ArrayRef<const Attr *> DoAttrs) {
1024 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1025 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1026
1027 uint64_t ParentCount = getCurrentProfileCount();
1028
1029 // Store the blocks to use for break and continue.
1030 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1031
1032 // Emit the body of the loop.
1033 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1034
1036 EmitBlockWithFallThrough(LoopBody, S.getBody());
1037 else
1038 EmitBlockWithFallThrough(LoopBody, &S);
1039 {
1040 RunCleanupsScope BodyScope(*this);
1041 EmitStmt(S.getBody());
1042 }
1043
1044 EmitBlock(LoopCond.getBlock());
1045 // When single byte coverage mode is enabled, add a counter to loop condition.
1047 incrementProfileCounter(S.getCond());
1048
1049 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1050 // after each execution of the loop body."
1051
1052 // Evaluate the conditional in the while header.
1053 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1054 // compares unequal to 0. The condition must be a scalar type.
1055 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1056
1057 BreakContinueStack.pop_back();
1058
1059 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1060 // to correctly handle break/continue though.
1061 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1062 bool CondIsConstInt = C;
1063 bool EmitBoolCondBranch = !C || !C->isZero();
1064
1065 const SourceRange &R = S.getSourceRange();
1066 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1069 checkIfLoopMustProgress(CondIsConstInt));
1070
1071 // As long as the condition is true, iterate the loop.
1072 if (EmitBoolCondBranch) {
1073 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1074 Builder.CreateCondBr(
1075 BoolCondVal, LoopBody, LoopExit.getBlock(),
1076 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1077 }
1078
1079 LoopStack.pop();
1080
1081 // Emit the exit block.
1082 EmitBlock(LoopExit.getBlock());
1083
1084 // The DoCond block typically is just a branch if we skipped
1085 // emitting a branch, try to erase it.
1086 if (!EmitBoolCondBranch)
1087 SimplifyForwardingBlocks(LoopCond.getBlock());
1088
1089 // When single byte coverage mode is enabled, add a counter to continuation
1090 // block.
1093}
1094
1096 ArrayRef<const Attr *> ForAttrs) {
1097 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1098
1099 LexicalScope ForScope(*this, S.getSourceRange());
1100
1101 // Evaluate the first part before the loop.
1102 if (S.getInit())
1103 EmitStmt(S.getInit());
1104
1105 // Start the loop with a block that tests the condition.
1106 // If there's an increment, the continue scope will be overwritten
1107 // later.
1108 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1109 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1110 EmitBlock(CondBlock);
1111
1113 bool CondIsConstInt =
1114 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
1115
1116 const SourceRange &R = S.getSourceRange();
1117 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1120 checkIfLoopMustProgress(CondIsConstInt));
1121
1122 // Create a cleanup scope for the condition variable cleanups.
1123 LexicalScope ConditionScope(*this, S.getSourceRange());
1124
1125 // If the for loop doesn't have an increment we can just use the condition as
1126 // the continue block. Otherwise, if there is no condition variable, we can
1127 // form the continue block now. If there is a condition variable, we can't
1128 // form the continue block until after we've emitted the condition, because
1129 // the condition is in scope in the increment, but Sema's jump diagnostics
1130 // ensure that there are no continues from the condition variable that jump
1131 // to the loop increment.
1132 JumpDest Continue;
1133 if (!S.getInc())
1134 Continue = CondDest;
1135 else if (!S.getConditionVariable())
1136 Continue = getJumpDestInCurrentScope("for.inc");
1137 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1138
1139 if (S.getCond()) {
1140 // If the for statement has a condition scope, emit the local variable
1141 // declaration.
1142 if (S.getConditionVariable()) {
1143 EmitDecl(*S.getConditionVariable());
1144
1145 // We have entered the condition variable's scope, so we're now able to
1146 // jump to the continue block.
1147 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1148 BreakContinueStack.back().ContinueBlock = Continue;
1149 }
1150
1151 // When single byte coverage mode is enabled, add a counter to loop
1152 // condition.
1154 incrementProfileCounter(S.getCond());
1155
1156 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1157 // If there are any cleanups between here and the loop-exit scope,
1158 // create a block to stage a loop exit along.
1159 if (ForScope.requiresCleanups())
1160 ExitBlock = createBasicBlock("for.cond.cleanup");
1161
1162 // As long as the condition is true, iterate the loop.
1163 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1164
1165 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1166 // compares unequal to 0. The condition must be a scalar type.
1167 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1168 llvm::MDNode *Weights =
1169 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1170 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1171 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1172 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1173
1174 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1175
1176 if (ExitBlock != LoopExit.getBlock()) {
1177 EmitBlock(ExitBlock);
1179 }
1180
1181 EmitBlock(ForBody);
1182 } else {
1183 // Treat it as a non-zero constant. Don't even create a new block for the
1184 // body, just fall into it.
1185 }
1186
1187 // When single byte coverage mode is enabled, add a counter to the body.
1189 incrementProfileCounter(S.getBody());
1190 else
1192 {
1193 // Create a separate cleanup scope for the body, in case it is not
1194 // a compound statement.
1195 RunCleanupsScope BodyScope(*this);
1196 EmitStmt(S.getBody());
1197 }
1198
1199 // If there is an increment, emit it next.
1200 if (S.getInc()) {
1201 EmitBlock(Continue.getBlock());
1202 EmitStmt(S.getInc());
1204 incrementProfileCounter(S.getInc());
1205 }
1206
1207 BreakContinueStack.pop_back();
1208
1209 ConditionScope.ForceCleanup();
1210
1211 EmitStopPoint(&S);
1212 EmitBranch(CondBlock);
1213
1214 ForScope.ForceCleanup();
1215
1216 LoopStack.pop();
1217
1218 // Emit the fall-through block.
1219 EmitBlock(LoopExit.getBlock(), true);
1220
1221 // When single byte coverage mode is enabled, add a counter to continuation
1222 // block.
1225}
1226
1227void
1229 ArrayRef<const Attr *> ForAttrs) {
1230 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1231
1232 LexicalScope ForScope(*this, S.getSourceRange());
1233
1234 // Evaluate the first pieces before the loop.
1235 if (S.getInit())
1236 EmitStmt(S.getInit());
1237 EmitStmt(S.getRangeStmt());
1238 EmitStmt(S.getBeginStmt());
1239 EmitStmt(S.getEndStmt());
1240
1241 // Start the loop with a block that tests the condition.
1242 // If there's an increment, the continue scope will be overwritten
1243 // later.
1244 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1245 EmitBlock(CondBlock);
1246
1247 const SourceRange &R = S.getSourceRange();
1248 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1251
1252 // If there are any cleanups between here and the loop-exit scope,
1253 // create a block to stage a loop exit along.
1254 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1255 if (ForScope.requiresCleanups())
1256 ExitBlock = createBasicBlock("for.cond.cleanup");
1257
1258 // The loop body, consisting of the specified body and the loop variable.
1259 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1260
1261 // The body is executed if the expression, contextually converted
1262 // to bool, is true.
1263 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1264 llvm::MDNode *Weights =
1265 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1266 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1267 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1268 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1269 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1270
1271 if (ExitBlock != LoopExit.getBlock()) {
1272 EmitBlock(ExitBlock);
1274 }
1275
1276 EmitBlock(ForBody);
1278 incrementProfileCounter(S.getBody());
1279 else
1281
1282 // Create a block for the increment. In case of a 'continue', we jump there.
1283 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1284
1285 // Store the blocks to use for break and continue.
1286 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1287
1288 {
1289 // Create a separate cleanup scope for the loop variable and body.
1290 LexicalScope BodyScope(*this, S.getSourceRange());
1291 EmitStmt(S.getLoopVarStmt());
1292 EmitStmt(S.getBody());
1293 }
1294
1295 EmitStopPoint(&S);
1296 // If there is an increment, emit it next.
1297 EmitBlock(Continue.getBlock());
1298 EmitStmt(S.getInc());
1299
1300 BreakContinueStack.pop_back();
1301
1302 EmitBranch(CondBlock);
1303
1304 ForScope.ForceCleanup();
1305
1306 LoopStack.pop();
1307
1308 // Emit the fall-through block.
1309 EmitBlock(LoopExit.getBlock(), true);
1310
1311 // When single byte coverage mode is enabled, add a counter to continuation
1312 // block.
1315}
1316
1317void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1318 if (RV.isScalar()) {
1320 } else if (RV.isAggregate()) {
1321 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1324 } else {
1326 /*init*/ true);
1327 }
1329}
1330
1331namespace {
1332// RAII struct used to save and restore a return statment's result expression.
1333struct SaveRetExprRAII {
1334 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1335 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1336 CGF.RetExpr = RetExpr;
1337 }
1338 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1339 const Expr *OldRetExpr;
1340 CodeGenFunction &CGF;
1341};
1342} // namespace
1343
1344/// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1345/// codegen it as 'tail call ...; ret void;'.
1347 const CGFunctionInfo *CurFnInfo) {
1348 auto calleeQualType = CE->getCallee()->getType();
1349 const FunctionType *calleeType = nullptr;
1350 if (calleeQualType->isFunctionPointerType() ||
1351 calleeQualType->isFunctionReferenceType() ||
1352 calleeQualType->isBlockPointerType() ||
1353 calleeQualType->isMemberFunctionPointerType()) {
1354 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1355 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1356 calleeType = ty;
1357 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1358 if (auto methodDecl = CMCE->getMethodDecl()) {
1359 // getMethodDecl() doesn't handle member pointers at the moment.
1360 calleeType = methodDecl->getType()->castAs<FunctionType>();
1361 } else {
1362 return;
1363 }
1364 } else {
1365 return;
1366 }
1367 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1369 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1370 CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1371 Builder.CreateRetVoid();
1372 Builder.ClearInsertionPoint();
1373 }
1374}
1375
1376/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1377/// if the function returns void, or may be missing one if the function returns
1378/// non-void. Fun stuff :).
1380 if (requiresReturnValueCheck()) {
1381 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1382 auto *SLocPtr =
1383 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1384 llvm::GlobalVariable::PrivateLinkage, SLoc);
1385 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1387 assert(ReturnLocation.isValid() && "No valid return location");
1388 Builder.CreateStore(SLocPtr, ReturnLocation);
1389 }
1390
1391 // Returning from an outlined SEH helper is UB, and we already warn on it.
1392 if (IsOutlinedSEHHelper) {
1393 Builder.CreateUnreachable();
1394 Builder.ClearInsertionPoint();
1395 }
1396
1397 // Emit the result value, even if unused, to evaluate the side effects.
1398 const Expr *RV = S.getRetValue();
1399
1400 // Record the result expression of the return statement. The recorded
1401 // expression is used to determine whether a block capture's lifetime should
1402 // end at the end of the full expression as opposed to the end of the scope
1403 // enclosing the block expression.
1404 //
1405 // This permits a small, easily-implemented exception to our over-conservative
1406 // rules about not jumping to statements following block literals with
1407 // non-trivial cleanups.
1408 SaveRetExprRAII SaveRetExpr(RV, *this);
1409
1410 RunCleanupsScope cleanupScope(*this);
1411 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1412 RV = EWC->getSubExpr();
1413 // FIXME: Clean this up by using an LValue for ReturnTemp,
1414 // EmitStoreThroughLValue, and EmitAnyExpr.
1415 // Check if the NRVO candidate was not globalized in OpenMP mode.
1416 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1417 S.getNRVOCandidate()->isNRVOVariable() &&
1418 (!getLangOpts().OpenMP ||
1420 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1421 .isValid())) {
1422 // Apply the named return value optimization for this return statement,
1423 // which means doing nothing: the appropriate result has already been
1424 // constructed into the NRVO variable.
1425
1426 // If there is an NRVO flag for this variable, set it to 1 into indicate
1427 // that the cleanup code should not destroy the variable.
1428 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1429 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1430 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1431 // Make sure not to return anything, but evaluate the expression
1432 // for side effects.
1433 if (RV) {
1434 EmitAnyExpr(RV);
1435 if (auto *CE = dyn_cast<CallExpr>(RV))
1437 }
1438 } else if (!RV) {
1439 // Do nothing (return value is left uninitialized)
1440 } else if (FnRetTy->isReferenceType()) {
1441 // If this function returns a reference, take the address of the expression
1442 // rather than the value.
1444 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1445 } else {
1446 switch (getEvaluationKind(RV->getType())) {
1447 case TEK_Scalar:
1449 break;
1450 case TEK_Complex:
1452 /*isInit*/ true);
1453 break;
1454 case TEK_Aggregate:
1461 break;
1462 }
1463 }
1464
1465 ++NumReturnExprs;
1466 if (!RV || RV->isEvaluatable(getContext()))
1467 ++NumSimpleReturnExprs;
1468
1469 cleanupScope.ForceCleanup();
1471}
1472
1474 // As long as debug info is modeled with instructions, we have to ensure we
1475 // have a place to insert here and write the stop point here.
1476 if (HaveInsertPoint())
1477 EmitStopPoint(&S);
1478
1479 for (const auto *I : S.decls())
1480 EmitDecl(*I);
1481}
1482
1484 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1485
1486 // If this code is reachable then emit a stop point (if generating
1487 // debug info). We have to do this ourselves because we are on the
1488 // "simple" statement path.
1489 if (HaveInsertPoint())
1490 EmitStopPoint(&S);
1491
1492 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1493}
1494
1496 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1497
1498 // If this code is reachable then emit a stop point (if generating
1499 // debug info). We have to do this ourselves because we are on the
1500 // "simple" statement path.
1501 if (HaveInsertPoint())
1502 EmitStopPoint(&S);
1503
1504 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1505}
1506
1507/// EmitCaseStmtRange - If case statement range is not too big then
1508/// add multiple cases to switch instruction, one for each value within
1509/// the range. If range is too big then emit "if" condition check.
1511 ArrayRef<const Attr *> Attrs) {
1512 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1513
1514 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1515 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1516
1517 // Emit the code for this case. We do this first to make sure it is
1518 // properly chained from our predecessor before generating the
1519 // switch machinery to enter this block.
1520 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1521 EmitBlockWithFallThrough(CaseDest, &S);
1522 EmitStmt(S.getSubStmt());
1523
1524 // If range is empty, do nothing.
1525 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1526 return;
1527
1529 llvm::APInt Range = RHS - LHS;
1530 // FIXME: parameters such as this should not be hardcoded.
1531 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1532 // Range is small enough to add multiple switch instruction cases.
1533 uint64_t Total = getProfileCount(&S);
1534 unsigned NCases = Range.getZExtValue() + 1;
1535 // We only have one region counter for the entire set of cases here, so we
1536 // need to divide the weights evenly between the generated cases, ensuring
1537 // that the total weight is preserved. E.g., a weight of 5 over three cases
1538 // will be distributed as weights of 2, 2, and 1.
1539 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1540 for (unsigned I = 0; I != NCases; ++I) {
1541 if (SwitchWeights)
1542 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1543 else if (SwitchLikelihood)
1544 SwitchLikelihood->push_back(LH);
1545
1546 if (Rem)
1547 Rem--;
1548 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1549 ++LHS;
1550 }
1551 return;
1552 }
1553
1554 // The range is too big. Emit "if" condition into a new block,
1555 // making sure to save and restore the current insertion point.
1556 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1557
1558 // Push this test onto the chain of range checks (which terminates
1559 // in the default basic block). The switch's default will be changed
1560 // to the top of this chain after switch emission is complete.
1561 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1562 CaseRangeBlock = createBasicBlock("sw.caserange");
1563
1564 CurFn->insert(CurFn->end(), CaseRangeBlock);
1565 Builder.SetInsertPoint(CaseRangeBlock);
1566
1567 // Emit range check.
1568 llvm::Value *Diff =
1569 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1570 llvm::Value *Cond =
1571 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1572
1573 llvm::MDNode *Weights = nullptr;
1574 if (SwitchWeights) {
1575 uint64_t ThisCount = getProfileCount(&S);
1576 uint64_t DefaultCount = (*SwitchWeights)[0];
1577 Weights = createProfileWeights(ThisCount, DefaultCount);
1578
1579 // Since we're chaining the switch default through each large case range, we
1580 // need to update the weight for the default, ie, the first case, to include
1581 // this case.
1582 (*SwitchWeights)[0] += ThisCount;
1583 } else if (SwitchLikelihood)
1584 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1585
1586 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1587
1588 // Restore the appropriate insertion point.
1589 if (RestoreBB)
1590 Builder.SetInsertPoint(RestoreBB);
1591 else
1592 Builder.ClearInsertionPoint();
1593}
1594
1596 ArrayRef<const Attr *> Attrs) {
1597 // If there is no enclosing switch instance that we're aware of, then this
1598 // case statement and its block can be elided. This situation only happens
1599 // when we've constant-folded the switch, are emitting the constant case,
1600 // and part of the constant case includes another case statement. For
1601 // instance: switch (4) { case 4: do { case 5: } while (1); }
1602 if (!SwitchInsn) {
1603 EmitStmt(S.getSubStmt());
1604 return;
1605 }
1606
1607 // Handle case ranges.
1608 if (S.getRHS()) {
1609 EmitCaseStmtRange(S, Attrs);
1610 return;
1611 }
1612
1613 llvm::ConstantInt *CaseVal =
1614 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1615
1616 // Emit debuginfo for the case value if it is an enum value.
1617 const ConstantExpr *CE;
1618 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1619 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1620 else
1621 CE = dyn_cast<ConstantExpr>(S.getLHS());
1622 if (CE) {
1623 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1624 if (CGDebugInfo *Dbg = getDebugInfo())
1626 Dbg->EmitGlobalVariable(DE->getDecl(),
1627 APValue(llvm::APSInt(CaseVal->getValue())));
1628 }
1629
1630 if (SwitchLikelihood)
1631 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1632
1633 // If the body of the case is just a 'break', try to not emit an empty block.
1634 // If we're profiling or we're not optimizing, leave the block in for better
1635 // debug and coverage analysis.
1637 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1638 isa<BreakStmt>(S.getSubStmt())) {
1639 JumpDest Block = BreakContinueStack.back().BreakBlock;
1640
1641 // Only do this optimization if there are no cleanups that need emitting.
1643 if (SwitchWeights)
1644 SwitchWeights->push_back(getProfileCount(&S));
1645 SwitchInsn->addCase(CaseVal, Block.getBlock());
1646
1647 // If there was a fallthrough into this case, make sure to redirect it to
1648 // the end of the switch as well.
1649 if (Builder.GetInsertBlock()) {
1650 Builder.CreateBr(Block.getBlock());
1651 Builder.ClearInsertionPoint();
1652 }
1653 return;
1654 }
1655 }
1656
1657 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1658 EmitBlockWithFallThrough(CaseDest, &S);
1659 if (SwitchWeights)
1660 SwitchWeights->push_back(getProfileCount(&S));
1661 SwitchInsn->addCase(CaseVal, CaseDest);
1662
1663 // Recursively emitting the statement is acceptable, but is not wonderful for
1664 // code where we have many case statements nested together, i.e.:
1665 // case 1:
1666 // case 2:
1667 // case 3: etc.
1668 // Handling this recursively will create a new block for each case statement
1669 // that falls through to the next case which is IR intensive. It also causes
1670 // deep recursion which can run into stack depth limitations. Handle
1671 // sequential non-range case statements specially.
1672 //
1673 // TODO When the next case has a likelihood attribute the code returns to the
1674 // recursive algorithm. Maybe improve this case if it becomes common practice
1675 // to use a lot of attributes.
1676 const CaseStmt *CurCase = &S;
1677 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1678
1679 // Otherwise, iteratively add consecutive cases to this switch stmt.
1680 while (NextCase && NextCase->getRHS() == nullptr) {
1681 CurCase = NextCase;
1682 llvm::ConstantInt *CaseVal =
1683 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1684
1685 if (SwitchWeights)
1686 SwitchWeights->push_back(getProfileCount(NextCase));
1688 CaseDest = createBasicBlock("sw.bb");
1689 EmitBlockWithFallThrough(CaseDest, CurCase);
1690 }
1691 // Since this loop is only executed when the CaseStmt has no attributes
1692 // use a hard-coded value.
1693 if (SwitchLikelihood)
1694 SwitchLikelihood->push_back(Stmt::LH_None);
1695
1696 SwitchInsn->addCase(CaseVal, CaseDest);
1697 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1698 }
1699
1700 // Generate a stop point for debug info if the case statement is
1701 // followed by a default statement. A fallthrough case before a
1702 // default case gets its own branch target.
1703 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1704 EmitStopPoint(CurCase);
1705
1706 // Normal default recursion for non-cases.
1707 EmitStmt(CurCase->getSubStmt());
1708}
1709
1711 ArrayRef<const Attr *> Attrs) {
1712 // If there is no enclosing switch instance that we're aware of, then this
1713 // default statement can be elided. This situation only happens when we've
1714 // constant-folded the switch.
1715 if (!SwitchInsn) {
1716 EmitStmt(S.getSubStmt());
1717 return;
1718 }
1719
1720 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1721 assert(DefaultBlock->empty() &&
1722 "EmitDefaultStmt: Default block already defined?");
1723
1724 if (SwitchLikelihood)
1725 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1726
1727 EmitBlockWithFallThrough(DefaultBlock, &S);
1728
1729 EmitStmt(S.getSubStmt());
1730}
1731
1732/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1733/// constant value that is being switched on, see if we can dead code eliminate
1734/// the body of the switch to a simple series of statements to emit. Basically,
1735/// on a switch (5) we want to find these statements:
1736/// case 5:
1737/// printf(...); <--
1738/// ++i; <--
1739/// break;
1740///
1741/// and add them to the ResultStmts vector. If it is unsafe to do this
1742/// transformation (for example, one of the elided statements contains a label
1743/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1744/// should include statements after it (e.g. the printf() line is a substmt of
1745/// the case) then return CSFC_FallThrough. If we handled it and found a break
1746/// statement, then return CSFC_Success.
1747///
1748/// If Case is non-null, then we are looking for the specified case, checking
1749/// that nothing we jump over contains labels. If Case is null, then we found
1750/// the case and are looking for the break.
1751///
1752/// If the recursive walk actually finds our Case, then we set FoundCase to
1753/// true.
1754///
1757 const SwitchCase *Case,
1758 bool &FoundCase,
1759 SmallVectorImpl<const Stmt*> &ResultStmts) {
1760 // If this is a null statement, just succeed.
1761 if (!S)
1762 return Case ? CSFC_Success : CSFC_FallThrough;
1763
1764 // If this is the switchcase (case 4: or default) that we're looking for, then
1765 // we're in business. Just add the substatement.
1766 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1767 if (S == Case) {
1768 FoundCase = true;
1769 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1770 ResultStmts);
1771 }
1772
1773 // Otherwise, this is some other case or default statement, just ignore it.
1774 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1775 ResultStmts);
1776 }
1777
1778 // If we are in the live part of the code and we found our break statement,
1779 // return a success!
1780 if (!Case && isa<BreakStmt>(S))
1781 return CSFC_Success;
1782
1783 // If this is a switch statement, then it might contain the SwitchCase, the
1784 // break, or neither.
1785 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1786 // Handle this as two cases: we might be looking for the SwitchCase (if so
1787 // the skipped statements must be skippable) or we might already have it.
1788 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1789 bool StartedInLiveCode = FoundCase;
1790 unsigned StartSize = ResultStmts.size();
1791
1792 // If we've not found the case yet, scan through looking for it.
1793 if (Case) {
1794 // Keep track of whether we see a skipped declaration. The code could be
1795 // using the declaration even if it is skipped, so we can't optimize out
1796 // the decl if the kept statements might refer to it.
1797 bool HadSkippedDecl = false;
1798
1799 // If we're looking for the case, just see if we can skip each of the
1800 // substatements.
1801 for (; Case && I != E; ++I) {
1802 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1803
1804 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1805 case CSFC_Failure: return CSFC_Failure;
1806 case CSFC_Success:
1807 // A successful result means that either 1) that the statement doesn't
1808 // have the case and is skippable, or 2) does contain the case value
1809 // and also contains the break to exit the switch. In the later case,
1810 // we just verify the rest of the statements are elidable.
1811 if (FoundCase) {
1812 // If we found the case and skipped declarations, we can't do the
1813 // optimization.
1814 if (HadSkippedDecl)
1815 return CSFC_Failure;
1816
1817 for (++I; I != E; ++I)
1818 if (CodeGenFunction::ContainsLabel(*I, true))
1819 return CSFC_Failure;
1820 return CSFC_Success;
1821 }
1822 break;
1823 case CSFC_FallThrough:
1824 // If we have a fallthrough condition, then we must have found the
1825 // case started to include statements. Consider the rest of the
1826 // statements in the compound statement as candidates for inclusion.
1827 assert(FoundCase && "Didn't find case but returned fallthrough?");
1828 // We recursively found Case, so we're not looking for it anymore.
1829 Case = nullptr;
1830
1831 // If we found the case and skipped declarations, we can't do the
1832 // optimization.
1833 if (HadSkippedDecl)
1834 return CSFC_Failure;
1835 break;
1836 }
1837 }
1838
1839 if (!FoundCase)
1840 return CSFC_Success;
1841
1842 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1843 }
1844
1845 // If we have statements in our range, then we know that the statements are
1846 // live and need to be added to the set of statements we're tracking.
1847 bool AnyDecls = false;
1848 for (; I != E; ++I) {
1850
1851 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1852 case CSFC_Failure: return CSFC_Failure;
1853 case CSFC_FallThrough:
1854 // A fallthrough result means that the statement was simple and just
1855 // included in ResultStmt, keep adding them afterwards.
1856 break;
1857 case CSFC_Success:
1858 // A successful result means that we found the break statement and
1859 // stopped statement inclusion. We just ensure that any leftover stmts
1860 // are skippable and return success ourselves.
1861 for (++I; I != E; ++I)
1862 if (CodeGenFunction::ContainsLabel(*I, true))
1863 return CSFC_Failure;
1864 return CSFC_Success;
1865 }
1866 }
1867
1868 // If we're about to fall out of a scope without hitting a 'break;', we
1869 // can't perform the optimization if there were any decls in that scope
1870 // (we'd lose their end-of-lifetime).
1871 if (AnyDecls) {
1872 // If the entire compound statement was live, there's one more thing we
1873 // can try before giving up: emit the whole thing as a single statement.
1874 // We can do that unless the statement contains a 'break;'.
1875 // FIXME: Such a break must be at the end of a construct within this one.
1876 // We could emit this by just ignoring the BreakStmts entirely.
1877 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1878 ResultStmts.resize(StartSize);
1879 ResultStmts.push_back(S);
1880 } else {
1881 return CSFC_Failure;
1882 }
1883 }
1884
1885 return CSFC_FallThrough;
1886 }
1887
1888 // Okay, this is some other statement that we don't handle explicitly, like a
1889 // for statement or increment etc. If we are skipping over this statement,
1890 // just verify it doesn't have labels, which would make it invalid to elide.
1891 if (Case) {
1892 if (CodeGenFunction::ContainsLabel(S, true))
1893 return CSFC_Failure;
1894 return CSFC_Success;
1895 }
1896
1897 // Otherwise, we want to include this statement. Everything is cool with that
1898 // so long as it doesn't contain a break out of the switch we're in.
1900
1901 // Otherwise, everything is great. Include the statement and tell the caller
1902 // that we fall through and include the next statement as well.
1903 ResultStmts.push_back(S);
1904 return CSFC_FallThrough;
1905}
1906
1907/// FindCaseStatementsForValue - Find the case statement being jumped to and
1908/// then invoke CollectStatementsForCase to find the list of statements to emit
1909/// for a switch on constant. See the comment above CollectStatementsForCase
1910/// for more details.
1912 const llvm::APSInt &ConstantCondValue,
1913 SmallVectorImpl<const Stmt*> &ResultStmts,
1914 ASTContext &C,
1915 const SwitchCase *&ResultCase) {
1916 // First step, find the switch case that is being branched to. We can do this
1917 // efficiently by scanning the SwitchCase list.
1918 const SwitchCase *Case = S.getSwitchCaseList();
1919 const DefaultStmt *DefaultCase = nullptr;
1920
1921 for (; Case; Case = Case->getNextSwitchCase()) {
1922 // It's either a default or case. Just remember the default statement in
1923 // case we're not jumping to any numbered cases.
1924 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1925 DefaultCase = DS;
1926 continue;
1927 }
1928
1929 // Check to see if this case is the one we're looking for.
1930 const CaseStmt *CS = cast<CaseStmt>(Case);
1931 // Don't handle case ranges yet.
1932 if (CS->getRHS()) return false;
1933
1934 // If we found our case, remember it as 'case'.
1935 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1936 break;
1937 }
1938
1939 // If we didn't find a matching case, we use a default if it exists, or we
1940 // elide the whole switch body!
1941 if (!Case) {
1942 // It is safe to elide the body of the switch if it doesn't contain labels
1943 // etc. If it is safe, return successfully with an empty ResultStmts list.
1944 if (!DefaultCase)
1946 Case = DefaultCase;
1947 }
1948
1949 // Ok, we know which case is being jumped to, try to collect all the
1950 // statements that follow it. This can fail for a variety of reasons. Also,
1951 // check to see that the recursive walk actually found our case statement.
1952 // Insane cases like this can fail to find it in the recursive walk since we
1953 // don't handle every stmt kind:
1954 // switch (4) {
1955 // while (1) {
1956 // case 4: ...
1957 bool FoundCase = false;
1958 ResultCase = Case;
1959 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1960 ResultStmts) != CSFC_Failure &&
1961 FoundCase;
1962}
1963
1964static std::optional<SmallVector<uint64_t, 16>>
1966 // Are there enough branches to weight them?
1967 if (Likelihoods.size() <= 1)
1968 return std::nullopt;
1969
1970 uint64_t NumUnlikely = 0;
1971 uint64_t NumNone = 0;
1972 uint64_t NumLikely = 0;
1973 for (const auto LH : Likelihoods) {
1974 switch (LH) {
1975 case Stmt::LH_Unlikely:
1976 ++NumUnlikely;
1977 break;
1978 case Stmt::LH_None:
1979 ++NumNone;
1980 break;
1981 case Stmt::LH_Likely:
1982 ++NumLikely;
1983 break;
1984 }
1985 }
1986
1987 // Is there a likelihood attribute used?
1988 if (NumUnlikely == 0 && NumLikely == 0)
1989 return std::nullopt;
1990
1991 // When multiple cases share the same code they can be combined during
1992 // optimization. In that case the weights of the branch will be the sum of
1993 // the individual weights. Make sure the combined sum of all neutral cases
1994 // doesn't exceed the value of a single likely attribute.
1995 // The additions both avoid divisions by 0 and make sure the weights of None
1996 // don't exceed the weight of Likely.
1997 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1998 const uint64_t None = Likely / (NumNone + 1);
1999 const uint64_t Unlikely = 0;
2000
2002 Result.reserve(Likelihoods.size());
2003 for (const auto LH : Likelihoods) {
2004 switch (LH) {
2005 case Stmt::LH_Unlikely:
2006 Result.push_back(Unlikely);
2007 break;
2008 case Stmt::LH_None:
2009 Result.push_back(None);
2010 break;
2011 case Stmt::LH_Likely:
2012 Result.push_back(Likely);
2013 break;
2014 }
2015 }
2016
2017 return Result;
2018}
2019
2021 // Handle nested switch statements.
2022 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2023 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2024 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2025 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2026
2027 // See if we can constant fold the condition of the switch and therefore only
2028 // emit the live case statement (if any) of the switch.
2029 llvm::APSInt ConstantCondValue;
2030 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2032 const SwitchCase *Case = nullptr;
2033 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2034 getContext(), Case)) {
2035 if (Case)
2037 RunCleanupsScope ExecutedScope(*this);
2038
2039 if (S.getInit())
2040 EmitStmt(S.getInit());
2041
2042 // Emit the condition variable if needed inside the entire cleanup scope
2043 // used by this special case for constant folded switches.
2044 if (S.getConditionVariable())
2045 EmitDecl(*S.getConditionVariable());
2046
2047 // At this point, we are no longer "within" a switch instance, so
2048 // we can temporarily enforce this to ensure that any embedded case
2049 // statements are not emitted.
2050 SwitchInsn = nullptr;
2051
2052 // Okay, we can dead code eliminate everything except this case. Emit the
2053 // specified series of statements and we're good.
2054 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2055 EmitStmt(CaseStmts[i]);
2057
2058 // Now we want to restore the saved switch instance so that nested
2059 // switches continue to function properly
2060 SwitchInsn = SavedSwitchInsn;
2061
2062 return;
2063 }
2064 }
2065
2066 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2067
2068 RunCleanupsScope ConditionScope(*this);
2069
2070 if (S.getInit())
2071 EmitStmt(S.getInit());
2072
2073 if (S.getConditionVariable())
2074 EmitDecl(*S.getConditionVariable());
2075 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2076
2077 // Create basic block to hold stuff that comes after switch
2078 // statement. We also need to create a default block now so that
2079 // explicit case ranges tests can have a place to jump to on
2080 // failure.
2081 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2082 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2083 if (PGO.haveRegionCounts()) {
2084 // Walk the SwitchCase list to find how many there are.
2085 uint64_t DefaultCount = 0;
2086 unsigned NumCases = 0;
2087 for (const SwitchCase *Case = S.getSwitchCaseList();
2088 Case;
2089 Case = Case->getNextSwitchCase()) {
2090 if (isa<DefaultStmt>(Case))
2091 DefaultCount = getProfileCount(Case);
2092 NumCases += 1;
2093 }
2094 SwitchWeights = new SmallVector<uint64_t, 16>();
2095 SwitchWeights->reserve(NumCases);
2096 // The default needs to be first. We store the edge count, so we already
2097 // know the right weight.
2098 SwitchWeights->push_back(DefaultCount);
2099 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2100 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2101 // Initialize the default case.
2102 SwitchLikelihood->push_back(Stmt::LH_None);
2103 }
2104
2105 CaseRangeBlock = DefaultBlock;
2106
2107 // Clear the insertion point to indicate we are in unreachable code.
2108 Builder.ClearInsertionPoint();
2109
2110 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2111 // then reuse last ContinueBlock.
2112 JumpDest OuterContinue;
2113 if (!BreakContinueStack.empty())
2114 OuterContinue = BreakContinueStack.back().ContinueBlock;
2115
2116 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2117
2118 // Emit switch body.
2119 EmitStmt(S.getBody());
2120
2121 BreakContinueStack.pop_back();
2122
2123 // Update the default block in case explicit case range tests have
2124 // been chained on top.
2125 SwitchInsn->setDefaultDest(CaseRangeBlock);
2126
2127 // If a default was never emitted:
2128 if (!DefaultBlock->getParent()) {
2129 // If we have cleanups, emit the default block so that there's a
2130 // place to jump through the cleanups from.
2131 if (ConditionScope.requiresCleanups()) {
2132 EmitBlock(DefaultBlock);
2133
2134 // Otherwise, just forward the default block to the switch end.
2135 } else {
2136 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2137 delete DefaultBlock;
2138 }
2139 }
2140
2141 ConditionScope.ForceCleanup();
2142
2143 // Emit continuation.
2144 EmitBlock(SwitchExit.getBlock(), true);
2146
2147 // If the switch has a condition wrapped by __builtin_unpredictable,
2148 // create metadata that specifies that the switch is unpredictable.
2149 // Don't bother if not optimizing because that metadata would not be used.
2150 auto *Call = dyn_cast<CallExpr>(S.getCond());
2151 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2152 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2153 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2154 llvm::MDBuilder MDHelper(getLLVMContext());
2155 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2156 MDHelper.createUnpredictable());
2157 }
2158 }
2159
2160 if (SwitchWeights) {
2161 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2162 "switch weights do not match switch cases");
2163 // If there's only one jump destination there's no sense weighting it.
2164 if (SwitchWeights->size() > 1)
2165 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2166 createProfileWeights(*SwitchWeights));
2167 delete SwitchWeights;
2168 } else if (SwitchLikelihood) {
2169 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2170 "switch likelihoods do not match switch cases");
2171 std::optional<SmallVector<uint64_t, 16>> LHW =
2172 getLikelihoodWeights(*SwitchLikelihood);
2173 if (LHW) {
2174 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2175 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2176 createProfileWeights(*LHW));
2177 }
2178 delete SwitchLikelihood;
2179 }
2180 SwitchInsn = SavedSwitchInsn;
2181 SwitchWeights = SavedSwitchWeights;
2182 SwitchLikelihood = SavedSwitchLikelihood;
2183 CaseRangeBlock = SavedCRBlock;
2184}
2185
2186static std::string
2187SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2189 std::string Result;
2190
2191 while (*Constraint) {
2192 switch (*Constraint) {
2193 default:
2194 Result += Target.convertConstraint(Constraint);
2195 break;
2196 // Ignore these
2197 case '*':
2198 case '?':
2199 case '!':
2200 case '=': // Will see this and the following in mult-alt constraints.
2201 case '+':
2202 break;
2203 case '#': // Ignore the rest of the constraint alternative.
2204 while (Constraint[1] && Constraint[1] != ',')
2205 Constraint++;
2206 break;
2207 case '&':
2208 case '%':
2209 Result += *Constraint;
2210 while (Constraint[1] && Constraint[1] == *Constraint)
2211 Constraint++;
2212 break;
2213 case ',':
2214 Result += "|";
2215 break;
2216 case 'g':
2217 Result += "imr";
2218 break;
2219 case '[': {
2220 assert(OutCons &&
2221 "Must pass output names to constraints with a symbolic name");
2222 unsigned Index;
2223 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2224 assert(result && "Could not resolve symbolic name"); (void)result;
2225 Result += llvm::utostr(Index);
2226 break;
2227 }
2228 }
2229
2230 Constraint++;
2231 }
2232
2233 return Result;
2234}
2235
2236/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2237/// as using a particular register add that as a constraint that will be used
2238/// in this asm stmt.
2239static std::string
2240AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2242 const AsmStmt &Stmt, const bool EarlyClobber,
2243 std::string *GCCReg = nullptr) {
2244 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2245 if (!AsmDeclRef)
2246 return Constraint;
2247 const ValueDecl &Value = *AsmDeclRef->getDecl();
2248 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2249 if (!Variable)
2250 return Constraint;
2251 if (Variable->getStorageClass() != SC_Register)
2252 return Constraint;
2253 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2254 if (!Attr)
2255 return Constraint;
2256 StringRef Register = Attr->getLabel();
2257 assert(Target.isValidGCCRegisterName(Register));
2258 // We're using validateOutputConstraint here because we only care if
2259 // this is a register constraint.
2260 TargetInfo::ConstraintInfo Info(Constraint, "");
2261 if (Target.validateOutputConstraint(Info) &&
2262 !Info.allowsRegister()) {
2263 CGM.ErrorUnsupported(&Stmt, "__asm__");
2264 return Constraint;
2265 }
2266 // Canonicalize the register here before returning it.
2267 Register = Target.getNormalizedGCCRegisterName(Register);
2268 if (GCCReg != nullptr)
2269 *GCCReg = Register.str();
2270 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2271}
2272
2273std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2274 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2275 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2276 if (Info.allowsRegister() || !Info.allowsMemory()) {
2278 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2279
2280 llvm::Type *Ty = ConvertType(InputType);
2281 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2282 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2283 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2284 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2285
2286 return {
2287 Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
2288 nullptr};
2289 }
2290 }
2291
2292 Address Addr = InputValue.getAddress(*this);
2293 ConstraintStr += '*';
2294 return {Addr.getPointer(), Addr.getElementType()};
2295}
2296
2297std::pair<llvm::Value *, llvm::Type *>
2298CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2299 const Expr *InputExpr,
2300 std::string &ConstraintStr) {
2301 // If this can't be a register or memory, i.e., has to be a constant
2302 // (immediate or symbolic), try to emit it as such.
2303 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2304 if (Info.requiresImmediateConstant()) {
2305 Expr::EvalResult EVResult;
2306 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2307
2308 llvm::APSInt IntResult;
2309 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2310 getContext()))
2311 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2312 }
2313
2315 if (InputExpr->EvaluateAsInt(Result, getContext()))
2316 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2317 nullptr};
2318 }
2319
2320 if (Info.allowsRegister() || !Info.allowsMemory())
2322 return {EmitScalarExpr(InputExpr), nullptr};
2323 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2324 return {EmitScalarExpr(InputExpr), nullptr};
2325 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2326 LValue Dest = EmitLValue(InputExpr);
2327 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2328 InputExpr->getExprLoc());
2329}
2330
2331/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2332/// asm call instruction. The !srcloc MDNode contains a list of constant
2333/// integers which are the source locations of the start of each line in the
2334/// asm.
2335static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2336 CodeGenFunction &CGF) {
2338 // Add the location of the first line to the MDNode.
2339 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2340 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2341 StringRef StrVal = Str->getString();
2342 if (!StrVal.empty()) {
2344 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2345 unsigned StartToken = 0;
2346 unsigned ByteOffset = 0;
2347
2348 // Add the location of the start of each subsequent line of the asm to the
2349 // MDNode.
2350 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2351 if (StrVal[i] != '\n') continue;
2352 SourceLocation LineLoc = Str->getLocationOfByte(
2353 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2354 Locs.push_back(llvm::ConstantAsMetadata::get(
2355 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2356 }
2357 }
2358
2359 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2360}
2361
2362static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2363 bool HasUnwindClobber, bool ReadOnly,
2364 bool ReadNone, bool NoMerge, const AsmStmt &S,
2365 const std::vector<llvm::Type *> &ResultRegTypes,
2366 const std::vector<llvm::Type *> &ArgElemTypes,
2367 CodeGenFunction &CGF,
2368 std::vector<llvm::Value *> &RegResults) {
2369 if (!HasUnwindClobber)
2370 Result.addFnAttr(llvm::Attribute::NoUnwind);
2371
2372 if (NoMerge)
2373 Result.addFnAttr(llvm::Attribute::NoMerge);
2374 // Attach readnone and readonly attributes.
2375 if (!HasSideEffect) {
2376 if (ReadNone)
2377 Result.setDoesNotAccessMemory();
2378 else if (ReadOnly)
2379 Result.setOnlyReadsMemory();
2380 }
2381
2382 // Add elementtype attribute for indirect constraints.
2383 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2384 if (Pair.value()) {
2385 auto Attr = llvm::Attribute::get(
2386 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2387 Result.addParamAttr(Pair.index(), Attr);
2388 }
2389 }
2390
2391 // Slap the source location of the inline asm into a !srcloc metadata on the
2392 // call.
2393 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2394 Result.setMetadata("srcloc",
2395 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2396 else {
2397 // At least put the line number on MS inline asm blobs.
2398 llvm::Constant *Loc =
2399 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2400 Result.setMetadata("srcloc",
2401 llvm::MDNode::get(CGF.getLLVMContext(),
2402 llvm::ConstantAsMetadata::get(Loc)));
2403 }
2404
2406 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2407 // convergent (meaning, they may call an intrinsically convergent op, such
2408 // as bar.sync, and so can't have certain optimizations applied around
2409 // them).
2410 Result.addFnAttr(llvm::Attribute::Convergent);
2411 // Extract all of the register value results from the asm.
2412 if (ResultRegTypes.size() == 1) {
2413 RegResults.push_back(&Result);
2414 } else {
2415 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2416 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2417 RegResults.push_back(Tmp);
2418 }
2419 }
2420}
2421
2422static void
2424 const llvm::ArrayRef<llvm::Value *> RegResults,
2425 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2426 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2427 const llvm::ArrayRef<LValue> ResultRegDests,
2428 const llvm::ArrayRef<QualType> ResultRegQualTys,
2429 const llvm::BitVector &ResultTypeRequiresCast,
2430 const llvm::BitVector &ResultRegIsFlagReg) {
2432 CodeGenModule &CGM = CGF.CGM;
2433 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2434
2435 assert(RegResults.size() == ResultRegTypes.size());
2436 assert(RegResults.size() == ResultTruncRegTypes.size());
2437 assert(RegResults.size() == ResultRegDests.size());
2438 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2439 // in which case its size may grow.
2440 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2441 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2442
2443 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2444 llvm::Value *Tmp = RegResults[i];
2445 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2446
2447 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2448 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2449 // value.
2450 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2451 llvm::Value *IsBooleanValue =
2452 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2453 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2454 Builder.CreateCall(FnAssume, IsBooleanValue);
2455 }
2456
2457 // If the result type of the LLVM IR asm doesn't match the result type of
2458 // the expression, do the conversion.
2459 if (ResultRegTypes[i] != TruncTy) {
2460
2461 // Truncate the integer result to the right size, note that TruncTy can be
2462 // a pointer.
2463 if (TruncTy->isFloatingPointTy())
2464 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2465 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2466 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2467 Tmp = Builder.CreateTrunc(
2468 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2469 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2470 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2471 uint64_t TmpSize =
2472 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2473 Tmp = Builder.CreatePtrToInt(
2474 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2475 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2476 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2477 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2478 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2479 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2480 }
2481 }
2482
2483 LValue Dest = ResultRegDests[i];
2484 // ResultTypeRequiresCast elements correspond to the first
2485 // ResultTypeRequiresCast.size() elements of RegResults.
2486 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2487 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2488 Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
2489 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2490 Builder.CreateStore(Tmp, A);
2491 continue;
2492 }
2493
2494 QualType Ty =
2495 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2496 if (Ty.isNull()) {
2497 const Expr *OutExpr = S.getOutputExpr(i);
2498 CGM.getDiags().Report(OutExpr->getExprLoc(),
2499 diag::err_store_value_to_reg);
2500 return;
2501 }
2502 Dest = CGF.MakeAddrLValue(A, Ty);
2503 }
2504 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2505 }
2506}
2507
2509 const AsmStmt &S) {
2510 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2511
2512 StringRef Asm;
2513 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2514 Asm = GCCAsm->getAsmString()->getString();
2515
2516 auto &Ctx = CGF->CGM.getLLVMContext();
2517
2518 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2519 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2520 {StrTy->getType()}, false);
2521 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2522
2523 CGF->Builder.CreateCall(UBF, {StrTy});
2524}
2525
2527 // Pop all cleanup blocks at the end of the asm statement.
2528 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2529
2530 // Assemble the final asm string.
2531 std::string AsmString = S.generateAsmString(getContext());
2532
2533 // Get all the output and input constraints together.
2534 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2535 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2536
2537 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2538 bool IsValidTargetAsm = true;
2539 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2540 StringRef Name;
2541 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2542 Name = GAS->getOutputName(i);
2543 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2544 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2545 if (IsHipStdPar && !IsValid)
2546 IsValidTargetAsm = false;
2547 else
2548 assert(IsValid && "Failed to parse output constraint");
2549 OutputConstraintInfos.push_back(Info);
2550 }
2551
2552 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2553 StringRef Name;
2554 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2555 Name = GAS->getInputName(i);
2556 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2557 bool IsValid =
2558 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2559 if (IsHipStdPar && !IsValid)
2560 IsValidTargetAsm = false;
2561 else
2562 assert(IsValid && "Failed to parse input constraint");
2563 InputConstraintInfos.push_back(Info);
2564 }
2565
2566 if (!IsValidTargetAsm)
2567 return EmitHipStdParUnsupportedAsm(this, S);
2568
2569 std::string Constraints;
2570
2571 std::vector<LValue> ResultRegDests;
2572 std::vector<QualType> ResultRegQualTys;
2573 std::vector<llvm::Type *> ResultRegTypes;
2574 std::vector<llvm::Type *> ResultTruncRegTypes;
2575 std::vector<llvm::Type *> ArgTypes;
2576 std::vector<llvm::Type *> ArgElemTypes;
2577 std::vector<llvm::Value*> Args;
2578 llvm::BitVector ResultTypeRequiresCast;
2579 llvm::BitVector ResultRegIsFlagReg;
2580
2581 // Keep track of inout constraints.
2582 std::string InOutConstraints;
2583 std::vector<llvm::Value*> InOutArgs;
2584 std::vector<llvm::Type*> InOutArgTypes;
2585 std::vector<llvm::Type*> InOutArgElemTypes;
2586
2587 // Keep track of out constraints for tied input operand.
2588 std::vector<std::string> OutputConstraints;
2589
2590 // Keep track of defined physregs.
2591 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2592
2593 // An inline asm can be marked readonly if it meets the following conditions:
2594 // - it doesn't have any sideeffects
2595 // - it doesn't clobber memory
2596 // - it doesn't return a value by-reference
2597 // It can be marked readnone if it doesn't have any input memory constraints
2598 // in addition to meeting the conditions listed above.
2599 bool ReadOnly = true, ReadNone = true;
2600
2601 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2602 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2603
2604 // Simplify the output constraint.
2605 std::string OutputConstraint(S.getOutputConstraint(i));
2606 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2607 getTarget(), &OutputConstraintInfos);
2608
2609 const Expr *OutExpr = S.getOutputExpr(i);
2610 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2611
2612 std::string GCCReg;
2613 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2614 getTarget(), CGM, S,
2615 Info.earlyClobber(),
2616 &GCCReg);
2617 // Give an error on multiple outputs to same physreg.
2618 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2619 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2620
2621 OutputConstraints.push_back(OutputConstraint);
2622 LValue Dest = EmitLValue(OutExpr);
2623 if (!Constraints.empty())
2624 Constraints += ',';
2625
2626 // If this is a register output, then make the inline asm return it
2627 // by-value. If this is a memory result, return the value by-reference.
2628 QualType QTy = OutExpr->getType();
2629 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2631 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2632
2633 Constraints += "=" + OutputConstraint;
2634 ResultRegQualTys.push_back(QTy);
2635 ResultRegDests.push_back(Dest);
2636
2637 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2638 ResultRegIsFlagReg.push_back(IsFlagReg);
2639
2640 llvm::Type *Ty = ConvertTypeForMem(QTy);
2641 const bool RequiresCast = Info.allowsRegister() &&
2643 Ty->isAggregateType());
2644
2645 ResultTruncRegTypes.push_back(Ty);
2646 ResultTypeRequiresCast.push_back(RequiresCast);
2647
2648 if (RequiresCast) {
2649 unsigned Size = getContext().getTypeSize(QTy);
2650 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2651 }
2652 ResultRegTypes.push_back(Ty);
2653 // If this output is tied to an input, and if the input is larger, then
2654 // we need to set the actual result type of the inline asm node to be the
2655 // same as the input type.
2656 if (Info.hasMatchingInput()) {
2657 unsigned InputNo;
2658 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2659 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2660 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2661 break;
2662 }
2663 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2664
2665 QualType InputTy = S.getInputExpr(InputNo)->getType();
2666 QualType OutputType = OutExpr->getType();
2667
2668 uint64_t InputSize = getContext().getTypeSize(InputTy);
2669 if (getContext().getTypeSize(OutputType) < InputSize) {
2670 // Form the asm to return the value as a larger integer or fp type.
2671 ResultRegTypes.back() = ConvertType(InputTy);
2672 }
2673 }
2674 if (llvm::Type* AdjTy =
2675 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2676 ResultRegTypes.back()))
2677 ResultRegTypes.back() = AdjTy;
2678 else {
2679 CGM.getDiags().Report(S.getAsmLoc(),
2680 diag::err_asm_invalid_type_in_input)
2681 << OutExpr->getType() << OutputConstraint;
2682 }
2683
2684 // Update largest vector width for any vector types.
2685 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2686 LargestVectorWidth =
2687 std::max((uint64_t)LargestVectorWidth,
2688 VT->getPrimitiveSizeInBits().getKnownMinValue());
2689 } else {
2690 Address DestAddr = Dest.getAddress(*this);
2691 // Matrix types in memory are represented by arrays, but accessed through
2692 // vector pointers, with the alignment specified on the access operation.
2693 // For inline assembly, update pointer arguments to use vector pointers.
2694 // Otherwise there will be a mis-match if the matrix is also an
2695 // input-argument which is represented as vector.
2696 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2697 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2698
2699 ArgTypes.push_back(DestAddr.getType());
2700 ArgElemTypes.push_back(DestAddr.getElementType());
2701 Args.push_back(DestAddr.getPointer());
2702 Constraints += "=*";
2703 Constraints += OutputConstraint;
2704 ReadOnly = ReadNone = false;
2705 }
2706
2707 if (Info.isReadWrite()) {
2708 InOutConstraints += ',';
2709
2710 const Expr *InputExpr = S.getOutputExpr(i);
2711 llvm::Value *Arg;
2712 llvm::Type *ArgElemType;
2713 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2714 Info, Dest, InputExpr->getType(), InOutConstraints,
2715 InputExpr->getExprLoc());
2716
2717 if (llvm::Type* AdjTy =
2718 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2719 Arg->getType()))
2720 Arg = Builder.CreateBitCast(Arg, AdjTy);
2721
2722 // Update largest vector width for any vector types.
2723 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2724 LargestVectorWidth =
2725 std::max((uint64_t)LargestVectorWidth,
2726 VT->getPrimitiveSizeInBits().getKnownMinValue());
2727 // Only tie earlyclobber physregs.
2728 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2729 InOutConstraints += llvm::utostr(i);
2730 else
2731 InOutConstraints += OutputConstraint;
2732
2733 InOutArgTypes.push_back(Arg->getType());
2734 InOutArgElemTypes.push_back(ArgElemType);
2735 InOutArgs.push_back(Arg);
2736 }
2737 }
2738
2739 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2740 // to the return value slot. Only do this when returning in registers.
2741 if (isa<MSAsmStmt>(&S)) {
2742 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2743 if (RetAI.isDirect() || RetAI.isExtend()) {
2744 // Make a fake lvalue for the return value slot.
2747 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2748 ResultRegDests, AsmString, S.getNumOutputs());
2749 SawAsmBlock = true;
2750 }
2751 }
2752
2753 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2754 const Expr *InputExpr = S.getInputExpr(i);
2755
2756 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2757
2758 if (Info.allowsMemory())
2759 ReadNone = false;
2760
2761 if (!Constraints.empty())
2762 Constraints += ',';
2763
2764 // Simplify the input constraint.
2765 std::string InputConstraint(S.getInputConstraint(i));
2766 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2767 &OutputConstraintInfos);
2768
2769 InputConstraint = AddVariableConstraints(
2770 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2771 getTarget(), CGM, S, false /* No EarlyClobber */);
2772
2773 std::string ReplaceConstraint (InputConstraint);
2774 llvm::Value *Arg;
2775 llvm::Type *ArgElemType;
2776 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2777
2778 // If this input argument is tied to a larger output result, extend the
2779 // input to be the same size as the output. The LLVM backend wants to see
2780 // the input and output of a matching constraint be the same size. Note
2781 // that GCC does not define what the top bits are here. We use zext because
2782 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2783 if (Info.hasTiedOperand()) {
2784 unsigned Output = Info.getTiedOperand();
2785 QualType OutputType = S.getOutputExpr(Output)->getType();
2786 QualType InputTy = InputExpr->getType();
2787
2788 if (getContext().getTypeSize(OutputType) >
2789 getContext().getTypeSize(InputTy)) {
2790 // Use ptrtoint as appropriate so that we can do our extension.
2791 if (isa<llvm::PointerType>(Arg->getType()))
2792 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2793 llvm::Type *OutputTy = ConvertType(OutputType);
2794 if (isa<llvm::IntegerType>(OutputTy))
2795 Arg = Builder.CreateZExt(Arg, OutputTy);
2796 else if (isa<llvm::PointerType>(OutputTy))
2797 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2798 else if (OutputTy->isFloatingPointTy())
2799 Arg = Builder.CreateFPExt(Arg, OutputTy);
2800 }
2801 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2802 ReplaceConstraint = OutputConstraints[Output];
2803 }
2804 if (llvm::Type* AdjTy =
2805 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2806 Arg->getType()))
2807 Arg = Builder.CreateBitCast(Arg, AdjTy);
2808 else
2809 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2810 << InputExpr->getType() << InputConstraint;
2811
2812 // Update largest vector width for any vector types.
2813 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2814 LargestVectorWidth =
2815 std::max((uint64_t)LargestVectorWidth,
2816 VT->getPrimitiveSizeInBits().getKnownMinValue());
2817
2818 ArgTypes.push_back(Arg->getType());
2819 ArgElemTypes.push_back(ArgElemType);
2820 Args.push_back(Arg);
2821 Constraints += InputConstraint;
2822 }
2823
2824 // Append the "input" part of inout constraints.
2825 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2826 ArgTypes.push_back(InOutArgTypes[i]);
2827 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2828 Args.push_back(InOutArgs[i]);
2829 }
2830 Constraints += InOutConstraints;
2831
2832 // Labels
2834 llvm::BasicBlock *Fallthrough = nullptr;
2835 bool IsGCCAsmGoto = false;
2836 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2837 IsGCCAsmGoto = GS->isAsmGoto();
2838 if (IsGCCAsmGoto) {
2839 for (const auto *E : GS->labels()) {
2840 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2841 Transfer.push_back(Dest.getBlock());
2842 if (!Constraints.empty())
2843 Constraints += ',';
2844 Constraints += "!i";
2845 }
2846 Fallthrough = createBasicBlock("asm.fallthrough");
2847 }
2848 }
2849
2850 bool HasUnwindClobber = false;
2851
2852 // Clobbers
2853 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2854 StringRef Clobber = S.getClobber(i);
2855
2856 if (Clobber == "memory")
2857 ReadOnly = ReadNone = false;
2858 else if (Clobber == "unwind") {
2859 HasUnwindClobber = true;
2860 continue;
2861 } else if (Clobber != "cc") {
2862 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2863 if (CGM.getCodeGenOpts().StackClashProtector &&
2864 getTarget().isSPRegName(Clobber)) {
2865 CGM.getDiags().Report(S.getAsmLoc(),
2866 diag::warn_stack_clash_protection_inline_asm);
2867 }
2868 }
2869
2870 if (isa<MSAsmStmt>(&S)) {
2871 if (Clobber == "eax" || Clobber == "edx") {
2872 if (Constraints.find("=&A") != std::string::npos)
2873 continue;
2874 std::string::size_type position1 =
2875 Constraints.find("={" + Clobber.str() + "}");
2876 if (position1 != std::string::npos) {
2877 Constraints.insert(position1 + 1, "&");
2878 continue;
2879 }
2880 std::string::size_type position2 = Constraints.find("=A");
2881 if (position2 != std::string::npos) {
2882 Constraints.insert(position2 + 1, "&");
2883 continue;
2884 }
2885 }
2886 }
2887 if (!Constraints.empty())
2888 Constraints += ',';
2889
2890 Constraints += "~{";
2891 Constraints += Clobber;
2892 Constraints += '}';
2893 }
2894
2895 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2896 "unwind clobber can't be used with asm goto");
2897
2898 // Add machine specific clobbers
2899 std::string_view MachineClobbers = getTarget().getClobbers();
2900 if (!MachineClobbers.empty()) {
2901 if (!Constraints.empty())
2902 Constraints += ',';
2903 Constraints += MachineClobbers;
2904 }
2905
2906 llvm::Type *ResultType;
2907 if (ResultRegTypes.empty())
2908 ResultType = VoidTy;
2909 else if (ResultRegTypes.size() == 1)
2910 ResultType = ResultRegTypes[0];
2911 else
2912 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2913
2914 llvm::FunctionType *FTy =
2915 llvm::FunctionType::get(ResultType, ArgTypes, false);
2916
2917 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2918
2919 llvm::InlineAsm::AsmDialect GnuAsmDialect =
2920 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2921 ? llvm::InlineAsm::AD_ATT
2922 : llvm::InlineAsm::AD_Intel;
2923 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2924 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2925
2926 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2927 FTy, AsmString, Constraints, HasSideEffect,
2928 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2929 std::vector<llvm::Value*> RegResults;
2930 llvm::CallBrInst *CBR;
2931 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
2932 CBRRegResults;
2933 if (IsGCCAsmGoto) {
2934 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2935 EmitBlock(Fallthrough);
2936 UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
2937 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2938 *this, RegResults);
2939 // Because we are emitting code top to bottom, we don't have enough
2940 // information at this point to know precisely whether we have a critical
2941 // edge. If we have outputs, split all indirect destinations.
2942 if (!RegResults.empty()) {
2943 unsigned i = 0;
2944 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
2945 llvm::Twine SynthName = Dest->getName() + ".split";
2946 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
2947 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2948 Builder.SetInsertPoint(SynthBB);
2949
2950 if (ResultRegTypes.size() == 1) {
2951 CBRRegResults[SynthBB].push_back(CBR);
2952 } else {
2953 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
2954 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
2955 CBRRegResults[SynthBB].push_back(Tmp);
2956 }
2957 }
2958
2959 EmitBranch(Dest);
2960 EmitBlock(SynthBB);
2961 CBR->setIndirectDest(i++, SynthBB);
2962 }
2963 }
2964 } else if (HasUnwindClobber) {
2965 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2966 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2967 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2968 *this, RegResults);
2969 } else {
2970 llvm::CallInst *Result =
2971 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2972 UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
2973 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2974 *this, RegResults);
2975 }
2976
2977 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
2978 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
2979 ResultRegIsFlagReg);
2980
2981 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
2982 // different insertion point; one for each indirect destination and with
2983 // CBRRegResults rather than RegResults.
2984 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
2985 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
2986 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2987 Builder.SetInsertPoint(Succ, --(Succ->end()));
2988 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
2989 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
2990 ResultTypeRequiresCast, ResultRegIsFlagReg);
2991 }
2992 }
2993}
2994
2996 const RecordDecl *RD = S.getCapturedRecordDecl();
2997 QualType RecordTy = getContext().getRecordType(RD);
2998
2999 // Initialize the captured struct.
3000 LValue SlotLV =
3001 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3002
3003 RecordDecl::field_iterator CurField = RD->field_begin();
3004 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3005 E = S.capture_init_end();
3006 I != E; ++I, ++CurField) {
3007 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3008 if (CurField->hasCapturedVLAType()) {
3009 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3010 } else {
3011 EmitInitializerForField(*CurField, LV, *I);
3012 }
3013 }
3014
3015 return SlotLV;
3016}
3017
3018/// Generate an outlined function for the body of a CapturedStmt, store any
3019/// captured variables into the captured struct, and call the outlined function.
3020llvm::Function *
3022 LValue CapStruct = InitCapturedStruct(S);
3023
3024 // Emit the CapturedDecl
3025 CodeGenFunction CGF(CGM, true);
3026 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3027 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3028 delete CGF.CapturedStmtInfo;
3029
3030 // Emit call to the helper function.
3031 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3032
3033 return F;
3034}
3035
3037 LValue CapStruct = InitCapturedStruct(S);
3038 return CapStruct.getAddress(*this);
3039}
3040
3041/// Creates the outlined function for a CapturedStmt.
3042llvm::Function *
3044 assert(CapturedStmtInfo &&
3045 "CapturedStmtInfo should be set when generating the captured function");
3046 const CapturedDecl *CD = S.getCapturedDecl();
3047 const RecordDecl *RD = S.getCapturedRecordDecl();
3048 SourceLocation Loc = S.getBeginLoc();
3049 assert(CD->hasBody() && "missing CapturedDecl body");
3050
3051 // Build the argument list.
3052 ASTContext &Ctx = CGM.getContext();
3053 FunctionArgList Args;
3054 Args.append(CD->param_begin(), CD->param_end());
3055
3056 // Create the function declaration.
3057 const CGFunctionInfo &FuncInfo =
3059 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3060
3061 llvm::Function *F =
3062 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3064 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3065 if (CD->isNothrow())
3066 F->addFnAttr(llvm::Attribute::NoUnwind);
3067
3068 // Generate the function.
3069 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3070 CD->getBody()->getBeginLoc());
3071 // Set the context parameter in CapturedStmtInfo.
3072 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3074
3075 // Initialize variable-length arrays.
3077 Ctx.getTagDeclType(RD));
3078 for (auto *FD : RD->fields()) {
3079 if (FD->hasCapturedVLAType()) {
3080 auto *ExprArg =
3081 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3082 .getScalarVal();
3083 auto VAT = FD->getCapturedVLAType();
3084 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3085 }
3086 }
3087
3088 // If 'this' is captured, load it into CXXThisValue.
3091 LValue ThisLValue = EmitLValueForField(Base, FD);
3092 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3093 }
3094
3095 PGO.assignRegionCounters(GlobalDecl(CD), F);
3096 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3098
3099 return F;
3100}
#define V(N, I)
Definition: ASTContext.h:3259
#define SM(sm)
Definition: Cuda.cpp:82
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2240
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:1911
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2508
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:1965
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2335
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2187
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2362
static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder, const CGFunctionInfo *CurFnInfo)
If we have 'return f(...);', where both caller and callee are SwiftAsync, codegen it as 'tail call ....
Definition: CGStmt.cpp:1346
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1756
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2423
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1755
@ CSFC_Failure
Definition: CGStmt.cpp:1755
@ CSFC_Success
Definition: CGStmt.cpp:1755
@ CSFC_FallThrough
Definition: CGStmt.cpp:1755
llvm::MachO::Target Target
Definition: MachO.h:40
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:950
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
SourceManager & getSourceManager()
Definition: ASTContext.h:700
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2315
CanQualType VoidTy
Definition: ASTContext.h:1086
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3098
Attr - This represents one attribute.
Definition: Attr.h:42
Represents an attribute applied to a statement.
Definition: Stmt.h:2078
BreakStmt - This represents a break.
Definition: Stmt.h:2978
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2819
Expr * getCallee()
Definition: Expr.h:2969
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4651
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4713
bool isNothrow() const
Definition: Decl.cpp:5402
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4730
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4728
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5399
This captures a statement into a function.
Definition: Stmt.h:3755
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3919
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1422
CaseStmt - Represent a case statement.
Definition: Stmt.h:1799
Stmt * getSubStmt()
Definition: Stmt.h:1916
Expr * getLHS()
Definition: Stmt.h:1886
Expr * getRHS()
Definition: Stmt.h:1898
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
An aligned address.
Definition: Address.h:29
static Address invalid()
Definition: Address.h:46
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:62
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Value * getPointer() const
Definition: Address.h:51
bool isValid() const
Definition: Address.h:47
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:57
An aggregate value slot.
Definition: CGValue.h:512
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:880
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:125
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:97
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:680
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
bool checkIfLoopMustProgress(bool HasConstantCond)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1625
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:674
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:352
LValue - This represents an lvalue references.
Definition: CGValue.h:171
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:350
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:346
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:823
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:39
bool isScalar() const
Definition: CGValue.h:54
static RValue get(llvm::Value *V)
Definition: CGValue.h:89
bool isAggregate() const
Definition: CGValue.h:56
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:73
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:61
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:68
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:179
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:173
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1604
Stmt *const * const_body_iterator
Definition: Stmt.h:1671
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
ContinueStmt - This represents a continue.
Definition: Stmt.h:2948
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2352
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
ValueDecl * getDecl()
Definition: Expr.h:1328
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1495
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1029
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1093
SourceLocation getLocation() const
Definition: DeclBase.h:444
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2723
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3072
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3041
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3542
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3025
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2779
const Expr * getSubExpr() const
Definition: Expr.h:1052
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3799
CallingConv getCallConv() const
Definition: Type.h:4127
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3257
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2860
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2136
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2899
Represents the declaration of a label.
Definition: Decl.h:499
LabelStmt * getStmt() const
Definition: Decl.h:523
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2029
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:418
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:599
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:737
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:804
QualType getCanonicalType() const
Definition: Type.h:6954
The collection of all-type qualifiers we support.
Definition: Type.h:147
Represents a struct/union/class.
Definition: Decl.h:4133
field_range fields() const
Definition: Decl.h:4339
field_iterator field_begin() const
Definition: Decl.cpp:5035
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3017
Expr * getRetValue()
Definition: Stmt.h:3048
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1356
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1299
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1300
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1301
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1303
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:163
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:155
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1773
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1954
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1309
StringRef getString() const
Definition: Expr.h:1850
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1772
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2386
Exposes information about the current target.
Definition: TargetInfo.h:213
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:811
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:673
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:714
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition: Type.h:7443
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7724
bool isReferenceType() const
Definition: Type.h:7166
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:651
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:706
Represents a variable declaration or definition.
Definition: Decl.h:918
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2582
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:419
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:254
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:291
unsigned long uint64_t
YAML serialization mapping.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1089
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1096