clang 18.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46void CodeGenFunction::EmitStopPoint(const Stmt *S) {
47 if (CGDebugInfo *DI = getDebugInfo()) {
49 Loc = S->getBeginLoc();
50 DI->EmitLocation(Builder, Loc);
51
52 LastStopPoint = Loc;
53 }
54}
55
57 assert(S && "Null statement?");
58 PGO.setCurrentStmt(S);
59
60 // These statements have their own debug info handling.
61 if (EmitSimpleStmt(S, Attrs))
62 return;
63
64 // Check if we are generating unreachable code.
65 if (!HaveInsertPoint()) {
66 // If so, and the statement doesn't contain a label, then we do not need to
67 // generate actual code. This is safe because (1) the current point is
68 // unreachable, so we don't need to execute the code, and (2) we've already
69 // handled the statements which update internal data structures (like the
70 // local variable map) which could be used by subsequent statements.
71 if (!ContainsLabel(S)) {
72 // Verify that any decl statements were handled as simple, they may be in
73 // scope of subsequent reachable statements.
74 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
75 return;
76 }
77
78 // Otherwise, make a new block to hold the code.
80 }
81
82 // Generate a stoppoint if we are emitting debug info.
84
85 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
86 // enabled.
87 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
88 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
90 return;
91 }
92 }
93
94 switch (S->getStmtClass()) {
96 case Stmt::CXXCatchStmtClass:
97 case Stmt::SEHExceptStmtClass:
98 case Stmt::SEHFinallyStmtClass:
99 case Stmt::MSDependentExistsStmtClass:
100 llvm_unreachable("invalid statement class to emit generically");
101 case Stmt::NullStmtClass:
102 case Stmt::CompoundStmtClass:
103 case Stmt::DeclStmtClass:
104 case Stmt::LabelStmtClass:
105 case Stmt::AttributedStmtClass:
106 case Stmt::GotoStmtClass:
107 case Stmt::BreakStmtClass:
108 case Stmt::ContinueStmtClass:
109 case Stmt::DefaultStmtClass:
110 case Stmt::CaseStmtClass:
111 case Stmt::SEHLeaveStmtClass:
112 llvm_unreachable("should have emitted these statements as simple");
113
114#define STMT(Type, Base)
115#define ABSTRACT_STMT(Op)
116#define EXPR(Type, Base) \
117 case Stmt::Type##Class:
118#include "clang/AST/StmtNodes.inc"
119 {
120 // Remember the block we came in on.
121 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
122 assert(incoming && "expression emission must have an insertion point");
123
124 EmitIgnoredExpr(cast<Expr>(S));
125
126 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
127 assert(outgoing && "expression emission cleared block!");
128
129 // The expression emitters assume (reasonably!) that the insertion
130 // point is always set. To maintain that, the call-emission code
131 // for noreturn functions has to enter a new block with no
132 // predecessors. We want to kill that block and mark the current
133 // insertion point unreachable in the common case of a call like
134 // "exit();". Since expression emission doesn't otherwise create
135 // blocks with no predecessors, we can just test for that.
136 // However, we must be careful not to do this to our incoming
137 // block, because *statement* emission does sometimes create
138 // reachable blocks which will have no predecessors until later in
139 // the function. This occurs with, e.g., labels that are not
140 // reachable by fallthrough.
141 if (incoming != outgoing && outgoing->use_empty()) {
142 outgoing->eraseFromParent();
143 Builder.ClearInsertionPoint();
144 }
145 break;
146 }
147
148 case Stmt::IndirectGotoStmtClass:
149 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
150
151 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
152 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
153 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
154 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
155
156 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
157
158 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
159 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
160 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
161 case Stmt::CoroutineBodyStmtClass:
162 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
163 break;
164 case Stmt::CoreturnStmtClass:
165 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
166 break;
167 case Stmt::CapturedStmtClass: {
168 const CapturedStmt *CS = cast<CapturedStmt>(S);
170 }
171 break;
172 case Stmt::ObjCAtTryStmtClass:
173 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
174 break;
175 case Stmt::ObjCAtCatchStmtClass:
176 llvm_unreachable(
177 "@catch statements should be handled by EmitObjCAtTryStmt");
178 case Stmt::ObjCAtFinallyStmtClass:
179 llvm_unreachable(
180 "@finally statements should be handled by EmitObjCAtTryStmt");
181 case Stmt::ObjCAtThrowStmtClass:
182 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
183 break;
184 case Stmt::ObjCAtSynchronizedStmtClass:
185 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
186 break;
187 case Stmt::ObjCForCollectionStmtClass:
188 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
189 break;
190 case Stmt::ObjCAutoreleasePoolStmtClass:
191 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
192 break;
193
194 case Stmt::CXXTryStmtClass:
195 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
196 break;
197 case Stmt::CXXForRangeStmtClass:
198 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
199 break;
200 case Stmt::SEHTryStmtClass:
201 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
202 break;
203 case Stmt::OMPMetaDirectiveClass:
204 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
205 break;
206 case Stmt::OMPCanonicalLoopClass:
207 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
208 break;
209 case Stmt::OMPParallelDirectiveClass:
210 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
211 break;
212 case Stmt::OMPSimdDirectiveClass:
213 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
214 break;
215 case Stmt::OMPTileDirectiveClass:
216 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
217 break;
218 case Stmt::OMPUnrollDirectiveClass:
219 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
220 break;
221 case Stmt::OMPForDirectiveClass:
222 EmitOMPForDirective(cast<OMPForDirective>(*S));
223 break;
224 case Stmt::OMPForSimdDirectiveClass:
225 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
226 break;
227 case Stmt::OMPSectionsDirectiveClass:
228 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
229 break;
230 case Stmt::OMPSectionDirectiveClass:
231 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
232 break;
233 case Stmt::OMPSingleDirectiveClass:
234 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
235 break;
236 case Stmt::OMPMasterDirectiveClass:
237 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
238 break;
239 case Stmt::OMPCriticalDirectiveClass:
240 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
241 break;
242 case Stmt::OMPParallelForDirectiveClass:
243 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
244 break;
245 case Stmt::OMPParallelForSimdDirectiveClass:
246 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
247 break;
248 case Stmt::OMPParallelMasterDirectiveClass:
249 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
250 break;
251 case Stmt::OMPParallelSectionsDirectiveClass:
252 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
253 break;
254 case Stmt::OMPTaskDirectiveClass:
255 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
256 break;
257 case Stmt::OMPTaskyieldDirectiveClass:
258 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
259 break;
260 case Stmt::OMPErrorDirectiveClass:
261 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
262 break;
263 case Stmt::OMPBarrierDirectiveClass:
264 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
265 break;
266 case Stmt::OMPTaskwaitDirectiveClass:
267 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
268 break;
269 case Stmt::OMPTaskgroupDirectiveClass:
270 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
271 break;
272 case Stmt::OMPFlushDirectiveClass:
273 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
274 break;
275 case Stmt::OMPDepobjDirectiveClass:
276 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
277 break;
278 case Stmt::OMPScanDirectiveClass:
279 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
280 break;
281 case Stmt::OMPOrderedDirectiveClass:
282 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
283 break;
284 case Stmt::OMPAtomicDirectiveClass:
285 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
286 break;
287 case Stmt::OMPTargetDirectiveClass:
288 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
289 break;
290 case Stmt::OMPTeamsDirectiveClass:
291 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
292 break;
293 case Stmt::OMPCancellationPointDirectiveClass:
294 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
295 break;
296 case Stmt::OMPCancelDirectiveClass:
297 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
298 break;
299 case Stmt::OMPTargetDataDirectiveClass:
300 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
301 break;
302 case Stmt::OMPTargetEnterDataDirectiveClass:
303 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
304 break;
305 case Stmt::OMPTargetExitDataDirectiveClass:
306 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
307 break;
308 case Stmt::OMPTargetParallelDirectiveClass:
309 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
310 break;
311 case Stmt::OMPTargetParallelForDirectiveClass:
312 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
313 break;
314 case Stmt::OMPTaskLoopDirectiveClass:
315 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
316 break;
317 case Stmt::OMPTaskLoopSimdDirectiveClass:
318 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
319 break;
320 case Stmt::OMPMasterTaskLoopDirectiveClass:
321 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
322 break;
323 case Stmt::OMPMaskedTaskLoopDirectiveClass:
324 llvm_unreachable("masked taskloop directive not supported yet.");
325 break;
326 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
328 cast<OMPMasterTaskLoopSimdDirective>(*S));
329 break;
330 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
331 llvm_unreachable("masked taskloop simd directive not supported yet.");
332 break;
333 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
335 cast<OMPParallelMasterTaskLoopDirective>(*S));
336 break;
337 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
338 llvm_unreachable("parallel masked taskloop directive not supported yet.");
339 break;
340 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
342 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
343 break;
344 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
345 llvm_unreachable(
346 "parallel masked taskloop simd directive not supported yet.");
347 break;
348 case Stmt::OMPDistributeDirectiveClass:
349 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
350 break;
351 case Stmt::OMPTargetUpdateDirectiveClass:
352 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
353 break;
354 case Stmt::OMPDistributeParallelForDirectiveClass:
356 cast<OMPDistributeParallelForDirective>(*S));
357 break;
358 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
360 cast<OMPDistributeParallelForSimdDirective>(*S));
361 break;
362 case Stmt::OMPDistributeSimdDirectiveClass:
363 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
364 break;
365 case Stmt::OMPTargetParallelForSimdDirectiveClass:
367 cast<OMPTargetParallelForSimdDirective>(*S));
368 break;
369 case Stmt::OMPTargetSimdDirectiveClass:
370 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
371 break;
372 case Stmt::OMPTeamsDistributeDirectiveClass:
373 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
374 break;
375 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
377 cast<OMPTeamsDistributeSimdDirective>(*S));
378 break;
379 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
381 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
382 break;
383 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
385 cast<OMPTeamsDistributeParallelForDirective>(*S));
386 break;
387 case Stmt::OMPTargetTeamsDirectiveClass:
388 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
389 break;
390 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
392 cast<OMPTargetTeamsDistributeDirective>(*S));
393 break;
394 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
396 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
397 break;
398 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
400 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
401 break;
402 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
404 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
405 break;
406 case Stmt::OMPInteropDirectiveClass:
407 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
408 break;
409 case Stmt::OMPDispatchDirectiveClass:
410 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
411 break;
412 case Stmt::OMPScopeDirectiveClass:
413 llvm_unreachable("scope not supported with FE outlining");
414 case Stmt::OMPMaskedDirectiveClass:
415 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
416 break;
417 case Stmt::OMPGenericLoopDirectiveClass:
418 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
419 break;
420 case Stmt::OMPTeamsGenericLoopDirectiveClass:
421 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
422 break;
423 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
425 cast<OMPTargetTeamsGenericLoopDirective>(*S));
426 break;
427 case Stmt::OMPParallelGenericLoopDirectiveClass:
429 cast<OMPParallelGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
433 cast<OMPTargetParallelGenericLoopDirective>(*S));
434 break;
435 case Stmt::OMPParallelMaskedDirectiveClass:
436 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
437 break;
438 }
439}
440
443 switch (S->getStmtClass()) {
444 default:
445 return false;
446 case Stmt::NullStmtClass:
447 break;
448 case Stmt::CompoundStmtClass:
449 EmitCompoundStmt(cast<CompoundStmt>(*S));
450 break;
451 case Stmt::DeclStmtClass:
452 EmitDeclStmt(cast<DeclStmt>(*S));
453 break;
454 case Stmt::LabelStmtClass:
455 EmitLabelStmt(cast<LabelStmt>(*S));
456 break;
457 case Stmt::AttributedStmtClass:
458 EmitAttributedStmt(cast<AttributedStmt>(*S));
459 break;
460 case Stmt::GotoStmtClass:
461 EmitGotoStmt(cast<GotoStmt>(*S));
462 break;
463 case Stmt::BreakStmtClass:
464 EmitBreakStmt(cast<BreakStmt>(*S));
465 break;
466 case Stmt::ContinueStmtClass:
467 EmitContinueStmt(cast<ContinueStmt>(*S));
468 break;
469 case Stmt::DefaultStmtClass:
470 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
471 break;
472 case Stmt::CaseStmtClass:
473 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
474 break;
475 case Stmt::SEHLeaveStmtClass:
476 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
477 break;
478 }
479 return true;
480}
481
482/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
483/// this captures the expression result of the last sub-statement and returns it
484/// (for use by the statement expression extension).
486 AggValueSlot AggSlot) {
487 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
488 "LLVM IR generation of compound statement ('{}')");
489
490 // Keep track of the current cleanup stack depth, including debug scopes.
491 LexicalScope Scope(*this, S.getSourceRange());
492
493 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
494}
495
498 bool GetLast,
499 AggValueSlot AggSlot) {
500
501 const Stmt *ExprResult = S.getStmtExprResult();
502 assert((!GetLast || (GetLast && ExprResult)) &&
503 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
504
505 Address RetAlloca = Address::invalid();
506
507 for (auto *CurStmt : S.body()) {
508 if (GetLast && ExprResult == CurStmt) {
509 // We have to special case labels here. They are statements, but when put
510 // at the end of a statement expression, they yield the value of their
511 // subexpression. Handle this by walking through all labels we encounter,
512 // emitting them before we evaluate the subexpr.
513 // Similar issues arise for attributed statements.
514 while (!isa<Expr>(ExprResult)) {
515 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
516 EmitLabel(LS->getDecl());
517 ExprResult = LS->getSubStmt();
518 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
519 // FIXME: Update this if we ever have attributes that affect the
520 // semantics of an expression.
521 ExprResult = AS->getSubStmt();
522 } else {
523 llvm_unreachable("unknown value statement");
524 }
525 }
526
528
529 const Expr *E = cast<Expr>(ExprResult);
530 QualType ExprTy = E->getType();
531 if (hasAggregateEvaluationKind(ExprTy)) {
532 EmitAggExpr(E, AggSlot);
533 } else {
534 // We can't return an RValue here because there might be cleanups at
535 // the end of the StmtExpr. Because of that, we have to emit the result
536 // here into a temporary alloca.
537 RetAlloca = CreateMemTemp(ExprTy);
538 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
539 /*IsInit*/ false);
540 }
541 } else {
542 EmitStmt(CurStmt);
543 }
544 }
545
546 return RetAlloca;
547}
548
549void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
550 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
551
552 // If there is a cleanup stack, then we it isn't worth trying to
553 // simplify this block (we would need to remove it from the scope map
554 // and cleanup entry).
555 if (!EHStack.empty())
556 return;
557
558 // Can only simplify direct branches.
559 if (!BI || !BI->isUnconditional())
560 return;
561
562 // Can only simplify empty blocks.
563 if (BI->getIterator() != BB->begin())
564 return;
565
566 BB->replaceAllUsesWith(BI->getSuccessor(0));
567 BI->eraseFromParent();
568 BB->eraseFromParent();
569}
570
571void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
572 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
573
574 // Fall out of the current block (if necessary).
575 EmitBranch(BB);
576
577 if (IsFinished && BB->use_empty()) {
578 delete BB;
579 return;
580 }
581
582 // Place the block after the current block, if possible, or else at
583 // the end of the function.
584 if (CurBB && CurBB->getParent())
585 CurFn->insert(std::next(CurBB->getIterator()), BB);
586 else
587 CurFn->insert(CurFn->end(), BB);
588 Builder.SetInsertPoint(BB);
589}
590
591void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
592 // Emit a branch from the current block to the target one if this
593 // was a real block. If this was just a fall-through block after a
594 // terminator, don't emit it.
595 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
596
597 if (!CurBB || CurBB->getTerminator()) {
598 // If there is no insert point or the previous block is already
599 // terminated, don't touch it.
600 } else {
601 // Otherwise, create a fall-through branch.
602 Builder.CreateBr(Target);
603 }
604
605 Builder.ClearInsertionPoint();
606}
607
608void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
609 bool inserted = false;
610 for (llvm::User *u : block->users()) {
611 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
612 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
613 inserted = true;
614 break;
615 }
616 }
617
618 if (!inserted)
619 CurFn->insert(CurFn->end(), block);
620
621 Builder.SetInsertPoint(block);
622}
623
624CodeGenFunction::JumpDest
626 JumpDest &Dest = LabelMap[D];
627 if (Dest.isValid()) return Dest;
628
629 // Create, but don't insert, the new block.
630 Dest = JumpDest(createBasicBlock(D->getName()),
633 return Dest;
634}
635
637 // Add this label to the current lexical scope if we're within any
638 // normal cleanups. Jumps "in" to this label --- when permitted by
639 // the language --- may need to be routed around such cleanups.
640 if (EHStack.hasNormalCleanups() && CurLexicalScope)
641 CurLexicalScope->addLabel(D);
642
643 JumpDest &Dest = LabelMap[D];
644
645 // If we didn't need a forward reference to this label, just go
646 // ahead and create a destination at the current scope.
647 if (!Dest.isValid()) {
649
650 // Otherwise, we need to give this label a target depth and remove
651 // it from the branch-fixups list.
652 } else {
653 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
654 Dest.setScopeDepth(EHStack.stable_begin());
655 ResolveBranchFixups(Dest.getBlock());
656 }
657
658 EmitBlock(Dest.getBlock());
659
660 // Emit debug info for labels.
661 if (CGDebugInfo *DI = getDebugInfo()) {
663 DI->setLocation(D->getLocation());
664 DI->EmitLabel(D, Builder);
665 }
666 }
667
669}
670
671/// Change the cleanup scope of the labels in this lexical scope to
672/// match the scope of the enclosing context.
674 assert(!Labels.empty());
675 EHScopeStack::stable_iterator innermostScope
677
678 // Change the scope depth of all the labels.
680 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
681 assert(CGF.LabelMap.count(*i));
682 JumpDest &dest = CGF.LabelMap.find(*i)->second;
683 assert(dest.getScopeDepth().isValid());
684 assert(innermostScope.encloses(dest.getScopeDepth()));
685 dest.setScopeDepth(innermostScope);
686 }
687
688 // Reparent the labels if the new scope also has cleanups.
689 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
690 ParentScope->Labels.append(Labels.begin(), Labels.end());
691 }
692}
693
694
696 EmitLabel(S.getDecl());
697
698 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
699 if (getLangOpts().EHAsynch && S.isSideEntry())
701
702 EmitStmt(S.getSubStmt());
703}
704
706 bool nomerge = false;
707 bool noinline = false;
708 bool alwaysinline = false;
709 const CallExpr *musttail = nullptr;
710
711 for (const auto *A : S.getAttrs()) {
712 switch (A->getKind()) {
713 default:
714 break;
715 case attr::NoMerge:
716 nomerge = true;
717 break;
718 case attr::NoInline:
719 noinline = true;
720 break;
721 case attr::AlwaysInline:
722 alwaysinline = true;
723 break;
724 case attr::MustTail:
725 const Stmt *Sub = S.getSubStmt();
726 const ReturnStmt *R = cast<ReturnStmt>(Sub);
727 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
728 break;
729 }
730 }
731 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
732 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
733 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
734 SaveAndRestore save_musttail(MustTailCall, musttail);
735 EmitStmt(S.getSubStmt(), S.getAttrs());
736}
737
739 // If this code is reachable then emit a stop point (if generating
740 // debug info). We have to do this ourselves because we are on the
741 // "simple" statement path.
742 if (HaveInsertPoint())
743 EmitStopPoint(&S);
744
746}
747
748
750 if (const LabelDecl *Target = S.getConstantTarget()) {
752 return;
753 }
754
755 // Ensure that we have an i8* for our PHI node.
756 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
757 Int8PtrTy, "addr");
758 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
759
760 // Get the basic block for the indirect goto.
761 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
762
763 // The first instruction in the block has to be the PHI for the switch dest,
764 // add an entry for this branch.
765 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
766
767 EmitBranch(IndGotoBB);
768}
769
770void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
771 // The else branch of a consteval if statement is always the only branch that
772 // can be runtime evaluated.
773 if (S.isConsteval()) {
774 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
775 if (Executed) {
776 RunCleanupsScope ExecutedScope(*this);
777 EmitStmt(Executed);
778 }
779 return;
780 }
781
782 // C99 6.8.4.1: The first substatement is executed if the expression compares
783 // unequal to 0. The condition must be a scalar type.
784 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
785
786 if (S.getInit())
787 EmitStmt(S.getInit());
788
789 if (S.getConditionVariable())
790 EmitDecl(*S.getConditionVariable());
791
792 // If the condition constant folds and can be elided, try to avoid emitting
793 // the condition and the dead arm of the if/else.
794 bool CondConstant;
795 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
796 S.isConstexpr())) {
797 // Figure out which block (then or else) is executed.
798 const Stmt *Executed = S.getThen();
799 const Stmt *Skipped = S.getElse();
800 if (!CondConstant) // Condition false?
801 std::swap(Executed, Skipped);
802
803 // If the skipped block has no labels in it, just emit the executed block.
804 // This avoids emitting dead code and simplifies the CFG substantially.
805 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
806 if (CondConstant)
808 if (Executed) {
809 RunCleanupsScope ExecutedScope(*this);
810 EmitStmt(Executed);
811 }
812 return;
813 }
814 }
815
816 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
817 // the conditional branch.
818 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
819 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
820 llvm::BasicBlock *ElseBlock = ContBlock;
821 if (S.getElse())
822 ElseBlock = createBasicBlock("if.else");
823
824 // Prefer the PGO based weights over the likelihood attribute.
825 // When the build isn't optimized the metadata isn't used, so don't generate
826 // it.
827 // Also, differentiate between disabled PGO and a never executed branch with
828 // PGO. Assuming PGO is in use:
829 // - we want to ignore the [[likely]] attribute if the branch is never
830 // executed,
831 // - assuming the profile is poor, preserving the attribute may still be
832 // beneficial.
833 // As an approximation, preserve the attribute only if both the branch and the
834 // parent context were not executed.
836 uint64_t ThenCount = getProfileCount(S.getThen());
837 if (!ThenCount && !getCurrentProfileCount() &&
838 CGM.getCodeGenOpts().OptimizationLevel)
839 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
840 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
841
842 // Emit the 'then' code.
843 EmitBlock(ThenBlock);
845 {
846 RunCleanupsScope ThenScope(*this);
847 EmitStmt(S.getThen());
848 }
849 EmitBranch(ContBlock);
850
851 // Emit the 'else' code if present.
852 if (const Stmt *Else = S.getElse()) {
853 {
854 // There is no need to emit line number for an unconditional branch.
855 auto NL = ApplyDebugLocation::CreateEmpty(*this);
856 EmitBlock(ElseBlock);
857 }
858 {
859 RunCleanupsScope ElseScope(*this);
860 EmitStmt(Else);
861 }
862 {
863 // There is no need to emit line number for an unconditional branch.
864 auto NL = ApplyDebugLocation::CreateEmpty(*this);
865 EmitBranch(ContBlock);
866 }
867 }
868
869 // Emit the continuation block for code after the if.
870 EmitBlock(ContBlock, true);
871}
872
874 ArrayRef<const Attr *> WhileAttrs) {
875 // Emit the header for the loop, which will also become
876 // the continue target.
877 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
878 EmitBlock(LoopHeader.getBlock());
879
880 // Create an exit block for when the condition fails, which will
881 // also become the break target.
882 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
883
884 // Store the blocks to use for break and continue.
885 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
886
887 // C++ [stmt.while]p2:
888 // When the condition of a while statement is a declaration, the
889 // scope of the variable that is declared extends from its point
890 // of declaration (3.3.2) to the end of the while statement.
891 // [...]
892 // The object created in a condition is destroyed and created
893 // with each iteration of the loop.
894 RunCleanupsScope ConditionScope(*this);
895
896 if (S.getConditionVariable())
897 EmitDecl(*S.getConditionVariable());
898
899 // Evaluate the conditional in the while header. C99 6.8.5.1: The
900 // evaluation of the controlling expression takes place before each
901 // execution of the loop body.
902 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
903
904 // while(1) is common, avoid extra exit blocks. Be sure
905 // to correctly handle break/continue though.
906 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
907 bool CondIsConstInt = C != nullptr;
908 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
909 const SourceRange &R = S.getSourceRange();
910 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
911 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
913 checkIfLoopMustProgress(CondIsConstInt));
914
915 // As long as the condition is true, go to the loop body.
916 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
917 if (EmitBoolCondBranch) {
918 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
919 if (ConditionScope.requiresCleanups())
920 ExitBlock = createBasicBlock("while.exit");
921 llvm::MDNode *Weights =
922 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
923 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
924 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
925 BoolCondVal, Stmt::getLikelihood(S.getBody()));
926 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
927
928 if (ExitBlock != LoopExit.getBlock()) {
929 EmitBlock(ExitBlock);
931 }
932 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
933 CGM.getDiags().Report(A->getLocation(),
934 diag::warn_attribute_has_no_effect_on_infinite_loop)
935 << A << A->getRange();
937 S.getWhileLoc(),
938 diag::note_attribute_has_no_effect_on_infinite_loop_here)
939 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
940 }
941
942 // Emit the loop body. We have to emit this in a cleanup scope
943 // because it might be a singleton DeclStmt.
944 {
945 RunCleanupsScope BodyScope(*this);
946 EmitBlock(LoopBody);
948 EmitStmt(S.getBody());
949 }
950
951 BreakContinueStack.pop_back();
952
953 // Immediately force cleanup.
954 ConditionScope.ForceCleanup();
955
956 EmitStopPoint(&S);
957 // Branch to the loop header again.
958 EmitBranch(LoopHeader.getBlock());
959
960 LoopStack.pop();
961
962 // Emit the exit block.
963 EmitBlock(LoopExit.getBlock(), true);
964
965 // The LoopHeader typically is just a branch if we skipped emitting
966 // a branch, try to erase it.
967 if (!EmitBoolCondBranch)
968 SimplifyForwardingBlocks(LoopHeader.getBlock());
969}
970
972 ArrayRef<const Attr *> DoAttrs) {
973 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
974 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
975
976 uint64_t ParentCount = getCurrentProfileCount();
977
978 // Store the blocks to use for break and continue.
979 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
980
981 // Emit the body of the loop.
982 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
983
984 EmitBlockWithFallThrough(LoopBody, &S);
985 {
986 RunCleanupsScope BodyScope(*this);
987 EmitStmt(S.getBody());
988 }
989
990 EmitBlock(LoopCond.getBlock());
991
992 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
993 // after each execution of the loop body."
994
995 // Evaluate the conditional in the while header.
996 // C99 6.8.5p2/p4: The first substatement is executed if the expression
997 // compares unequal to 0. The condition must be a scalar type.
998 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
999
1000 BreakContinueStack.pop_back();
1001
1002 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1003 // to correctly handle break/continue though.
1004 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1005 bool CondIsConstInt = C;
1006 bool EmitBoolCondBranch = !C || !C->isZero();
1007
1008 const SourceRange &R = S.getSourceRange();
1009 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1012 checkIfLoopMustProgress(CondIsConstInt));
1013
1014 // As long as the condition is true, iterate the loop.
1015 if (EmitBoolCondBranch) {
1016 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1017 Builder.CreateCondBr(
1018 BoolCondVal, LoopBody, LoopExit.getBlock(),
1019 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1020 }
1021
1022 LoopStack.pop();
1023
1024 // Emit the exit block.
1025 EmitBlock(LoopExit.getBlock());
1026
1027 // The DoCond block typically is just a branch if we skipped
1028 // emitting a branch, try to erase it.
1029 if (!EmitBoolCondBranch)
1030 SimplifyForwardingBlocks(LoopCond.getBlock());
1031}
1032
1034 ArrayRef<const Attr *> ForAttrs) {
1035 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1036
1037 LexicalScope ForScope(*this, S.getSourceRange());
1038
1039 // Evaluate the first part before the loop.
1040 if (S.getInit())
1041 EmitStmt(S.getInit());
1042
1043 // Start the loop with a block that tests the condition.
1044 // If there's an increment, the continue scope will be overwritten
1045 // later.
1046 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1047 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1048 EmitBlock(CondBlock);
1049
1051 bool CondIsConstInt =
1052 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
1053
1054 const SourceRange &R = S.getSourceRange();
1055 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1058 checkIfLoopMustProgress(CondIsConstInt));
1059
1060 // Create a cleanup scope for the condition variable cleanups.
1061 LexicalScope ConditionScope(*this, S.getSourceRange());
1062
1063 // If the for loop doesn't have an increment we can just use the condition as
1064 // the continue block. Otherwise, if there is no condition variable, we can
1065 // form the continue block now. If there is a condition variable, we can't
1066 // form the continue block until after we've emitted the condition, because
1067 // the condition is in scope in the increment, but Sema's jump diagnostics
1068 // ensure that there are no continues from the condition variable that jump
1069 // to the loop increment.
1070 JumpDest Continue;
1071 if (!S.getInc())
1072 Continue = CondDest;
1073 else if (!S.getConditionVariable())
1074 Continue = getJumpDestInCurrentScope("for.inc");
1075 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1076
1077 if (S.getCond()) {
1078 // If the for statement has a condition scope, emit the local variable
1079 // declaration.
1080 if (S.getConditionVariable()) {
1081 EmitDecl(*S.getConditionVariable());
1082
1083 // We have entered the condition variable's scope, so we're now able to
1084 // jump to the continue block.
1085 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1086 BreakContinueStack.back().ContinueBlock = Continue;
1087 }
1088
1089 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1090 // If there are any cleanups between here and the loop-exit scope,
1091 // create a block to stage a loop exit along.
1092 if (ForScope.requiresCleanups())
1093 ExitBlock = createBasicBlock("for.cond.cleanup");
1094
1095 // As long as the condition is true, iterate the loop.
1096 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1097
1098 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1099 // compares unequal to 0. The condition must be a scalar type.
1100 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1101 llvm::MDNode *Weights =
1102 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1103 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1104 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1105 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1106
1107 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1108
1109 if (ExitBlock != LoopExit.getBlock()) {
1110 EmitBlock(ExitBlock);
1112 }
1113
1114 EmitBlock(ForBody);
1115 } else {
1116 // Treat it as a non-zero constant. Don't even create a new block for the
1117 // body, just fall into it.
1118 }
1120
1121 {
1122 // Create a separate cleanup scope for the body, in case it is not
1123 // a compound statement.
1124 RunCleanupsScope BodyScope(*this);
1125 EmitStmt(S.getBody());
1126 }
1127
1128 // If there is an increment, emit it next.
1129 if (S.getInc()) {
1130 EmitBlock(Continue.getBlock());
1131 EmitStmt(S.getInc());
1132 }
1133
1134 BreakContinueStack.pop_back();
1135
1136 ConditionScope.ForceCleanup();
1137
1138 EmitStopPoint(&S);
1139 EmitBranch(CondBlock);
1140
1141 ForScope.ForceCleanup();
1142
1143 LoopStack.pop();
1144
1145 // Emit the fall-through block.
1146 EmitBlock(LoopExit.getBlock(), true);
1147}
1148
1149void
1151 ArrayRef<const Attr *> ForAttrs) {
1152 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1153
1154 LexicalScope ForScope(*this, S.getSourceRange());
1155
1156 // Evaluate the first pieces before the loop.
1157 if (S.getInit())
1158 EmitStmt(S.getInit());
1159 EmitStmt(S.getRangeStmt());
1160 EmitStmt(S.getBeginStmt());
1161 EmitStmt(S.getEndStmt());
1162
1163 // Start the loop with a block that tests the condition.
1164 // If there's an increment, the continue scope will be overwritten
1165 // later.
1166 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1167 EmitBlock(CondBlock);
1168
1169 const SourceRange &R = S.getSourceRange();
1170 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1173
1174 // If there are any cleanups between here and the loop-exit scope,
1175 // create a block to stage a loop exit along.
1176 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1177 if (ForScope.requiresCleanups())
1178 ExitBlock = createBasicBlock("for.cond.cleanup");
1179
1180 // The loop body, consisting of the specified body and the loop variable.
1181 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1182
1183 // The body is executed if the expression, contextually converted
1184 // to bool, is true.
1185 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1186 llvm::MDNode *Weights =
1187 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1188 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1189 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1190 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1191 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1192
1193 if (ExitBlock != LoopExit.getBlock()) {
1194 EmitBlock(ExitBlock);
1196 }
1197
1198 EmitBlock(ForBody);
1200
1201 // Create a block for the increment. In case of a 'continue', we jump there.
1202 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1203
1204 // Store the blocks to use for break and continue.
1205 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1206
1207 {
1208 // Create a separate cleanup scope for the loop variable and body.
1209 LexicalScope BodyScope(*this, S.getSourceRange());
1210 EmitStmt(S.getLoopVarStmt());
1211 EmitStmt(S.getBody());
1212 }
1213
1214 EmitStopPoint(&S);
1215 // If there is an increment, emit it next.
1216 EmitBlock(Continue.getBlock());
1217 EmitStmt(S.getInc());
1218
1219 BreakContinueStack.pop_back();
1220
1221 EmitBranch(CondBlock);
1222
1223 ForScope.ForceCleanup();
1224
1225 LoopStack.pop();
1226
1227 // Emit the fall-through block.
1228 EmitBlock(LoopExit.getBlock(), true);
1229}
1230
1231void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1232 if (RV.isScalar()) {
1234 } else if (RV.isAggregate()) {
1235 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1238 } else {
1240 /*init*/ true);
1241 }
1243}
1244
1245namespace {
1246// RAII struct used to save and restore a return statment's result expression.
1247struct SaveRetExprRAII {
1248 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1249 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1250 CGF.RetExpr = RetExpr;
1251 }
1252 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1253 const Expr *OldRetExpr;
1254 CodeGenFunction &CGF;
1255};
1256} // namespace
1257
1258/// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1259/// codegen it as 'tail call ...; ret void;'.
1261 const CGFunctionInfo *CurFnInfo) {
1262 auto calleeQualType = CE->getCallee()->getType();
1263 const FunctionType *calleeType = nullptr;
1264 if (calleeQualType->isFunctionPointerType() ||
1265 calleeQualType->isFunctionReferenceType() ||
1266 calleeQualType->isBlockPointerType() ||
1267 calleeQualType->isMemberFunctionPointerType()) {
1268 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1269 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1270 calleeType = ty;
1271 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1272 if (auto methodDecl = CMCE->getMethodDecl()) {
1273 // getMethodDecl() doesn't handle member pointers at the moment.
1274 calleeType = methodDecl->getType()->castAs<FunctionType>();
1275 } else {
1276 return;
1277 }
1278 } else {
1279 return;
1280 }
1281 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1283 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1284 CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1285 Builder.CreateRetVoid();
1286 Builder.ClearInsertionPoint();
1287 }
1288}
1289
1290/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1291/// if the function returns void, or may be missing one if the function returns
1292/// non-void. Fun stuff :).
1294 if (requiresReturnValueCheck()) {
1295 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1296 auto *SLocPtr =
1297 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1298 llvm::GlobalVariable::PrivateLinkage, SLoc);
1299 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1301 assert(ReturnLocation.isValid() && "No valid return location");
1302 Builder.CreateStore(SLocPtr, ReturnLocation);
1303 }
1304
1305 // Returning from an outlined SEH helper is UB, and we already warn on it.
1306 if (IsOutlinedSEHHelper) {
1307 Builder.CreateUnreachable();
1308 Builder.ClearInsertionPoint();
1309 }
1310
1311 // Emit the result value, even if unused, to evaluate the side effects.
1312 const Expr *RV = S.getRetValue();
1313
1314 // Record the result expression of the return statement. The recorded
1315 // expression is used to determine whether a block capture's lifetime should
1316 // end at the end of the full expression as opposed to the end of the scope
1317 // enclosing the block expression.
1318 //
1319 // This permits a small, easily-implemented exception to our over-conservative
1320 // rules about not jumping to statements following block literals with
1321 // non-trivial cleanups.
1322 SaveRetExprRAII SaveRetExpr(RV, *this);
1323
1324 RunCleanupsScope cleanupScope(*this);
1325 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1326 RV = EWC->getSubExpr();
1327 // FIXME: Clean this up by using an LValue for ReturnTemp,
1328 // EmitStoreThroughLValue, and EmitAnyExpr.
1329 // Check if the NRVO candidate was not globalized in OpenMP mode.
1330 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1331 S.getNRVOCandidate()->isNRVOVariable() &&
1332 (!getLangOpts().OpenMP ||
1334 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1335 .isValid())) {
1336 // Apply the named return value optimization for this return statement,
1337 // which means doing nothing: the appropriate result has already been
1338 // constructed into the NRVO variable.
1339
1340 // If there is an NRVO flag for this variable, set it to 1 into indicate
1341 // that the cleanup code should not destroy the variable.
1342 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1343 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1344 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1345 // Make sure not to return anything, but evaluate the expression
1346 // for side effects.
1347 if (RV) {
1348 EmitAnyExpr(RV);
1349 if (auto *CE = dyn_cast<CallExpr>(RV))
1351 }
1352 } else if (!RV) {
1353 // Do nothing (return value is left uninitialized)
1354 } else if (FnRetTy->isReferenceType()) {
1355 // If this function returns a reference, take the address of the expression
1356 // rather than the value.
1358 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1359 } else {
1360 switch (getEvaluationKind(RV->getType())) {
1361 case TEK_Scalar:
1363 break;
1364 case TEK_Complex:
1366 /*isInit*/ true);
1367 break;
1368 case TEK_Aggregate:
1375 break;
1376 }
1377 }
1378
1379 ++NumReturnExprs;
1380 if (!RV || RV->isEvaluatable(getContext()))
1381 ++NumSimpleReturnExprs;
1382
1383 cleanupScope.ForceCleanup();
1385}
1386
1388 // As long as debug info is modeled with instructions, we have to ensure we
1389 // have a place to insert here and write the stop point here.
1390 if (HaveInsertPoint())
1391 EmitStopPoint(&S);
1392
1393 for (const auto *I : S.decls())
1394 EmitDecl(*I);
1395}
1396
1398 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1399
1400 // If this code is reachable then emit a stop point (if generating
1401 // debug info). We have to do this ourselves because we are on the
1402 // "simple" statement path.
1403 if (HaveInsertPoint())
1404 EmitStopPoint(&S);
1405
1406 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1407}
1408
1410 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1411
1412 // If this code is reachable then emit a stop point (if generating
1413 // debug info). We have to do this ourselves because we are on the
1414 // "simple" statement path.
1415 if (HaveInsertPoint())
1416 EmitStopPoint(&S);
1417
1418 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1419}
1420
1421/// EmitCaseStmtRange - If case statement range is not too big then
1422/// add multiple cases to switch instruction, one for each value within
1423/// the range. If range is too big then emit "if" condition check.
1425 ArrayRef<const Attr *> Attrs) {
1426 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1427
1428 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1429 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1430
1431 // Emit the code for this case. We do this first to make sure it is
1432 // properly chained from our predecessor before generating the
1433 // switch machinery to enter this block.
1434 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1435 EmitBlockWithFallThrough(CaseDest, &S);
1436 EmitStmt(S.getSubStmt());
1437
1438 // If range is empty, do nothing.
1439 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1440 return;
1441
1443 llvm::APInt Range = RHS - LHS;
1444 // FIXME: parameters such as this should not be hardcoded.
1445 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1446 // Range is small enough to add multiple switch instruction cases.
1447 uint64_t Total = getProfileCount(&S);
1448 unsigned NCases = Range.getZExtValue() + 1;
1449 // We only have one region counter for the entire set of cases here, so we
1450 // need to divide the weights evenly between the generated cases, ensuring
1451 // that the total weight is preserved. E.g., a weight of 5 over three cases
1452 // will be distributed as weights of 2, 2, and 1.
1453 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1454 for (unsigned I = 0; I != NCases; ++I) {
1455 if (SwitchWeights)
1456 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1457 else if (SwitchLikelihood)
1458 SwitchLikelihood->push_back(LH);
1459
1460 if (Rem)
1461 Rem--;
1462 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1463 ++LHS;
1464 }
1465 return;
1466 }
1467
1468 // The range is too big. Emit "if" condition into a new block,
1469 // making sure to save and restore the current insertion point.
1470 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1471
1472 // Push this test onto the chain of range checks (which terminates
1473 // in the default basic block). The switch's default will be changed
1474 // to the top of this chain after switch emission is complete.
1475 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1476 CaseRangeBlock = createBasicBlock("sw.caserange");
1477
1478 CurFn->insert(CurFn->end(), CaseRangeBlock);
1479 Builder.SetInsertPoint(CaseRangeBlock);
1480
1481 // Emit range check.
1482 llvm::Value *Diff =
1483 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1484 llvm::Value *Cond =
1485 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1486
1487 llvm::MDNode *Weights = nullptr;
1488 if (SwitchWeights) {
1489 uint64_t ThisCount = getProfileCount(&S);
1490 uint64_t DefaultCount = (*SwitchWeights)[0];
1491 Weights = createProfileWeights(ThisCount, DefaultCount);
1492
1493 // Since we're chaining the switch default through each large case range, we
1494 // need to update the weight for the default, ie, the first case, to include
1495 // this case.
1496 (*SwitchWeights)[0] += ThisCount;
1497 } else if (SwitchLikelihood)
1498 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1499
1500 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1501
1502 // Restore the appropriate insertion point.
1503 if (RestoreBB)
1504 Builder.SetInsertPoint(RestoreBB);
1505 else
1506 Builder.ClearInsertionPoint();
1507}
1508
1510 ArrayRef<const Attr *> Attrs) {
1511 // If there is no enclosing switch instance that we're aware of, then this
1512 // case statement and its block can be elided. This situation only happens
1513 // when we've constant-folded the switch, are emitting the constant case,
1514 // and part of the constant case includes another case statement. For
1515 // instance: switch (4) { case 4: do { case 5: } while (1); }
1516 if (!SwitchInsn) {
1517 EmitStmt(S.getSubStmt());
1518 return;
1519 }
1520
1521 // Handle case ranges.
1522 if (S.getRHS()) {
1523 EmitCaseStmtRange(S, Attrs);
1524 return;
1525 }
1526
1527 llvm::ConstantInt *CaseVal =
1528 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1529
1530 // Emit debuginfo for the case value if it is an enum value.
1531 const ConstantExpr *CE;
1532 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1533 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1534 else
1535 CE = dyn_cast<ConstantExpr>(S.getLHS());
1536 if (CE) {
1537 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1538 if (CGDebugInfo *Dbg = getDebugInfo())
1540 Dbg->EmitGlobalVariable(DE->getDecl(),
1541 APValue(llvm::APSInt(CaseVal->getValue())));
1542 }
1543
1544 if (SwitchLikelihood)
1545 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1546
1547 // If the body of the case is just a 'break', try to not emit an empty block.
1548 // If we're profiling or we're not optimizing, leave the block in for better
1549 // debug and coverage analysis.
1551 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1552 isa<BreakStmt>(S.getSubStmt())) {
1553 JumpDest Block = BreakContinueStack.back().BreakBlock;
1554
1555 // Only do this optimization if there are no cleanups that need emitting.
1557 if (SwitchWeights)
1558 SwitchWeights->push_back(getProfileCount(&S));
1559 SwitchInsn->addCase(CaseVal, Block.getBlock());
1560
1561 // If there was a fallthrough into this case, make sure to redirect it to
1562 // the end of the switch as well.
1563 if (Builder.GetInsertBlock()) {
1564 Builder.CreateBr(Block.getBlock());
1565 Builder.ClearInsertionPoint();
1566 }
1567 return;
1568 }
1569 }
1570
1571 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1572 EmitBlockWithFallThrough(CaseDest, &S);
1573 if (SwitchWeights)
1574 SwitchWeights->push_back(getProfileCount(&S));
1575 SwitchInsn->addCase(CaseVal, CaseDest);
1576
1577 // Recursively emitting the statement is acceptable, but is not wonderful for
1578 // code where we have many case statements nested together, i.e.:
1579 // case 1:
1580 // case 2:
1581 // case 3: etc.
1582 // Handling this recursively will create a new block for each case statement
1583 // that falls through to the next case which is IR intensive. It also causes
1584 // deep recursion which can run into stack depth limitations. Handle
1585 // sequential non-range case statements specially.
1586 //
1587 // TODO When the next case has a likelihood attribute the code returns to the
1588 // recursive algorithm. Maybe improve this case if it becomes common practice
1589 // to use a lot of attributes.
1590 const CaseStmt *CurCase = &S;
1591 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1592
1593 // Otherwise, iteratively add consecutive cases to this switch stmt.
1594 while (NextCase && NextCase->getRHS() == nullptr) {
1595 CurCase = NextCase;
1596 llvm::ConstantInt *CaseVal =
1597 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1598
1599 if (SwitchWeights)
1600 SwitchWeights->push_back(getProfileCount(NextCase));
1602 CaseDest = createBasicBlock("sw.bb");
1603 EmitBlockWithFallThrough(CaseDest, CurCase);
1604 }
1605 // Since this loop is only executed when the CaseStmt has no attributes
1606 // use a hard-coded value.
1607 if (SwitchLikelihood)
1608 SwitchLikelihood->push_back(Stmt::LH_None);
1609
1610 SwitchInsn->addCase(CaseVal, CaseDest);
1611 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1612 }
1613
1614 // Generate a stop point for debug info if the case statement is
1615 // followed by a default statement. A fallthrough case before a
1616 // default case gets its own branch target.
1617 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1618 EmitStopPoint(CurCase);
1619
1620 // Normal default recursion for non-cases.
1621 EmitStmt(CurCase->getSubStmt());
1622}
1623
1625 ArrayRef<const Attr *> Attrs) {
1626 // If there is no enclosing switch instance that we're aware of, then this
1627 // default statement can be elided. This situation only happens when we've
1628 // constant-folded the switch.
1629 if (!SwitchInsn) {
1630 EmitStmt(S.getSubStmt());
1631 return;
1632 }
1633
1634 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1635 assert(DefaultBlock->empty() &&
1636 "EmitDefaultStmt: Default block already defined?");
1637
1638 if (SwitchLikelihood)
1639 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1640
1641 EmitBlockWithFallThrough(DefaultBlock, &S);
1642
1643 EmitStmt(S.getSubStmt());
1644}
1645
1646/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1647/// constant value that is being switched on, see if we can dead code eliminate
1648/// the body of the switch to a simple series of statements to emit. Basically,
1649/// on a switch (5) we want to find these statements:
1650/// case 5:
1651/// printf(...); <--
1652/// ++i; <--
1653/// break;
1654///
1655/// and add them to the ResultStmts vector. If it is unsafe to do this
1656/// transformation (for example, one of the elided statements contains a label
1657/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1658/// should include statements after it (e.g. the printf() line is a substmt of
1659/// the case) then return CSFC_FallThrough. If we handled it and found a break
1660/// statement, then return CSFC_Success.
1661///
1662/// If Case is non-null, then we are looking for the specified case, checking
1663/// that nothing we jump over contains labels. If Case is null, then we found
1664/// the case and are looking for the break.
1665///
1666/// If the recursive walk actually finds our Case, then we set FoundCase to
1667/// true.
1668///
1671 const SwitchCase *Case,
1672 bool &FoundCase,
1673 SmallVectorImpl<const Stmt*> &ResultStmts) {
1674 // If this is a null statement, just succeed.
1675 if (!S)
1676 return Case ? CSFC_Success : CSFC_FallThrough;
1677
1678 // If this is the switchcase (case 4: or default) that we're looking for, then
1679 // we're in business. Just add the substatement.
1680 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1681 if (S == Case) {
1682 FoundCase = true;
1683 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1684 ResultStmts);
1685 }
1686
1687 // Otherwise, this is some other case or default statement, just ignore it.
1688 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1689 ResultStmts);
1690 }
1691
1692 // If we are in the live part of the code and we found our break statement,
1693 // return a success!
1694 if (!Case && isa<BreakStmt>(S))
1695 return CSFC_Success;
1696
1697 // If this is a switch statement, then it might contain the SwitchCase, the
1698 // break, or neither.
1699 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1700 // Handle this as two cases: we might be looking for the SwitchCase (if so
1701 // the skipped statements must be skippable) or we might already have it.
1702 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1703 bool StartedInLiveCode = FoundCase;
1704 unsigned StartSize = ResultStmts.size();
1705
1706 // If we've not found the case yet, scan through looking for it.
1707 if (Case) {
1708 // Keep track of whether we see a skipped declaration. The code could be
1709 // using the declaration even if it is skipped, so we can't optimize out
1710 // the decl if the kept statements might refer to it.
1711 bool HadSkippedDecl = false;
1712
1713 // If we're looking for the case, just see if we can skip each of the
1714 // substatements.
1715 for (; Case && I != E; ++I) {
1716 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1717
1718 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1719 case CSFC_Failure: return CSFC_Failure;
1720 case CSFC_Success:
1721 // A successful result means that either 1) that the statement doesn't
1722 // have the case and is skippable, or 2) does contain the case value
1723 // and also contains the break to exit the switch. In the later case,
1724 // we just verify the rest of the statements are elidable.
1725 if (FoundCase) {
1726 // If we found the case and skipped declarations, we can't do the
1727 // optimization.
1728 if (HadSkippedDecl)
1729 return CSFC_Failure;
1730
1731 for (++I; I != E; ++I)
1732 if (CodeGenFunction::ContainsLabel(*I, true))
1733 return CSFC_Failure;
1734 return CSFC_Success;
1735 }
1736 break;
1737 case CSFC_FallThrough:
1738 // If we have a fallthrough condition, then we must have found the
1739 // case started to include statements. Consider the rest of the
1740 // statements in the compound statement as candidates for inclusion.
1741 assert(FoundCase && "Didn't find case but returned fallthrough?");
1742 // We recursively found Case, so we're not looking for it anymore.
1743 Case = nullptr;
1744
1745 // If we found the case and skipped declarations, we can't do the
1746 // optimization.
1747 if (HadSkippedDecl)
1748 return CSFC_Failure;
1749 break;
1750 }
1751 }
1752
1753 if (!FoundCase)
1754 return CSFC_Success;
1755
1756 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1757 }
1758
1759 // If we have statements in our range, then we know that the statements are
1760 // live and need to be added to the set of statements we're tracking.
1761 bool AnyDecls = false;
1762 for (; I != E; ++I) {
1764
1765 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1766 case CSFC_Failure: return CSFC_Failure;
1767 case CSFC_FallThrough:
1768 // A fallthrough result means that the statement was simple and just
1769 // included in ResultStmt, keep adding them afterwards.
1770 break;
1771 case CSFC_Success:
1772 // A successful result means that we found the break statement and
1773 // stopped statement inclusion. We just ensure that any leftover stmts
1774 // are skippable and return success ourselves.
1775 for (++I; I != E; ++I)
1776 if (CodeGenFunction::ContainsLabel(*I, true))
1777 return CSFC_Failure;
1778 return CSFC_Success;
1779 }
1780 }
1781
1782 // If we're about to fall out of a scope without hitting a 'break;', we
1783 // can't perform the optimization if there were any decls in that scope
1784 // (we'd lose their end-of-lifetime).
1785 if (AnyDecls) {
1786 // If the entire compound statement was live, there's one more thing we
1787 // can try before giving up: emit the whole thing as a single statement.
1788 // We can do that unless the statement contains a 'break;'.
1789 // FIXME: Such a break must be at the end of a construct within this one.
1790 // We could emit this by just ignoring the BreakStmts entirely.
1791 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1792 ResultStmts.resize(StartSize);
1793 ResultStmts.push_back(S);
1794 } else {
1795 return CSFC_Failure;
1796 }
1797 }
1798
1799 return CSFC_FallThrough;
1800 }
1801
1802 // Okay, this is some other statement that we don't handle explicitly, like a
1803 // for statement or increment etc. If we are skipping over this statement,
1804 // just verify it doesn't have labels, which would make it invalid to elide.
1805 if (Case) {
1806 if (CodeGenFunction::ContainsLabel(S, true))
1807 return CSFC_Failure;
1808 return CSFC_Success;
1809 }
1810
1811 // Otherwise, we want to include this statement. Everything is cool with that
1812 // so long as it doesn't contain a break out of the switch we're in.
1814
1815 // Otherwise, everything is great. Include the statement and tell the caller
1816 // that we fall through and include the next statement as well.
1817 ResultStmts.push_back(S);
1818 return CSFC_FallThrough;
1819}
1820
1821/// FindCaseStatementsForValue - Find the case statement being jumped to and
1822/// then invoke CollectStatementsForCase to find the list of statements to emit
1823/// for a switch on constant. See the comment above CollectStatementsForCase
1824/// for more details.
1826 const llvm::APSInt &ConstantCondValue,
1827 SmallVectorImpl<const Stmt*> &ResultStmts,
1828 ASTContext &C,
1829 const SwitchCase *&ResultCase) {
1830 // First step, find the switch case that is being branched to. We can do this
1831 // efficiently by scanning the SwitchCase list.
1832 const SwitchCase *Case = S.getSwitchCaseList();
1833 const DefaultStmt *DefaultCase = nullptr;
1834
1835 for (; Case; Case = Case->getNextSwitchCase()) {
1836 // It's either a default or case. Just remember the default statement in
1837 // case we're not jumping to any numbered cases.
1838 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1839 DefaultCase = DS;
1840 continue;
1841 }
1842
1843 // Check to see if this case is the one we're looking for.
1844 const CaseStmt *CS = cast<CaseStmt>(Case);
1845 // Don't handle case ranges yet.
1846 if (CS->getRHS()) return false;
1847
1848 // If we found our case, remember it as 'case'.
1849 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1850 break;
1851 }
1852
1853 // If we didn't find a matching case, we use a default if it exists, or we
1854 // elide the whole switch body!
1855 if (!Case) {
1856 // It is safe to elide the body of the switch if it doesn't contain labels
1857 // etc. If it is safe, return successfully with an empty ResultStmts list.
1858 if (!DefaultCase)
1860 Case = DefaultCase;
1861 }
1862
1863 // Ok, we know which case is being jumped to, try to collect all the
1864 // statements that follow it. This can fail for a variety of reasons. Also,
1865 // check to see that the recursive walk actually found our case statement.
1866 // Insane cases like this can fail to find it in the recursive walk since we
1867 // don't handle every stmt kind:
1868 // switch (4) {
1869 // while (1) {
1870 // case 4: ...
1871 bool FoundCase = false;
1872 ResultCase = Case;
1873 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1874 ResultStmts) != CSFC_Failure &&
1875 FoundCase;
1876}
1877
1878static std::optional<SmallVector<uint64_t, 16>>
1880 // Are there enough branches to weight them?
1881 if (Likelihoods.size() <= 1)
1882 return std::nullopt;
1883
1884 uint64_t NumUnlikely = 0;
1885 uint64_t NumNone = 0;
1886 uint64_t NumLikely = 0;
1887 for (const auto LH : Likelihoods) {
1888 switch (LH) {
1889 case Stmt::LH_Unlikely:
1890 ++NumUnlikely;
1891 break;
1892 case Stmt::LH_None:
1893 ++NumNone;
1894 break;
1895 case Stmt::LH_Likely:
1896 ++NumLikely;
1897 break;
1898 }
1899 }
1900
1901 // Is there a likelihood attribute used?
1902 if (NumUnlikely == 0 && NumLikely == 0)
1903 return std::nullopt;
1904
1905 // When multiple cases share the same code they can be combined during
1906 // optimization. In that case the weights of the branch will be the sum of
1907 // the individual weights. Make sure the combined sum of all neutral cases
1908 // doesn't exceed the value of a single likely attribute.
1909 // The additions both avoid divisions by 0 and make sure the weights of None
1910 // don't exceed the weight of Likely.
1911 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1912 const uint64_t None = Likely / (NumNone + 1);
1913 const uint64_t Unlikely = 0;
1914
1916 Result.reserve(Likelihoods.size());
1917 for (const auto LH : Likelihoods) {
1918 switch (LH) {
1919 case Stmt::LH_Unlikely:
1920 Result.push_back(Unlikely);
1921 break;
1922 case Stmt::LH_None:
1923 Result.push_back(None);
1924 break;
1925 case Stmt::LH_Likely:
1926 Result.push_back(Likely);
1927 break;
1928 }
1929 }
1930
1931 return Result;
1932}
1933
1935 // Handle nested switch statements.
1936 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1937 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1938 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1939 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1940
1941 // See if we can constant fold the condition of the switch and therefore only
1942 // emit the live case statement (if any) of the switch.
1943 llvm::APSInt ConstantCondValue;
1944 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1946 const SwitchCase *Case = nullptr;
1947 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1948 getContext(), Case)) {
1949 if (Case)
1951 RunCleanupsScope ExecutedScope(*this);
1952
1953 if (S.getInit())
1954 EmitStmt(S.getInit());
1955
1956 // Emit the condition variable if needed inside the entire cleanup scope
1957 // used by this special case for constant folded switches.
1958 if (S.getConditionVariable())
1959 EmitDecl(*S.getConditionVariable());
1960
1961 // At this point, we are no longer "within" a switch instance, so
1962 // we can temporarily enforce this to ensure that any embedded case
1963 // statements are not emitted.
1964 SwitchInsn = nullptr;
1965
1966 // Okay, we can dead code eliminate everything except this case. Emit the
1967 // specified series of statements and we're good.
1968 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1969 EmitStmt(CaseStmts[i]);
1971
1972 // Now we want to restore the saved switch instance so that nested
1973 // switches continue to function properly
1974 SwitchInsn = SavedSwitchInsn;
1975
1976 return;
1977 }
1978 }
1979
1980 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1981
1982 RunCleanupsScope ConditionScope(*this);
1983
1984 if (S.getInit())
1985 EmitStmt(S.getInit());
1986
1987 if (S.getConditionVariable())
1988 EmitDecl(*S.getConditionVariable());
1989 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1990
1991 // Create basic block to hold stuff that comes after switch
1992 // statement. We also need to create a default block now so that
1993 // explicit case ranges tests can have a place to jump to on
1994 // failure.
1995 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1996 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1997 if (PGO.haveRegionCounts()) {
1998 // Walk the SwitchCase list to find how many there are.
1999 uint64_t DefaultCount = 0;
2000 unsigned NumCases = 0;
2001 for (const SwitchCase *Case = S.getSwitchCaseList();
2002 Case;
2003 Case = Case->getNextSwitchCase()) {
2004 if (isa<DefaultStmt>(Case))
2005 DefaultCount = getProfileCount(Case);
2006 NumCases += 1;
2007 }
2008 SwitchWeights = new SmallVector<uint64_t, 16>();
2009 SwitchWeights->reserve(NumCases);
2010 // The default needs to be first. We store the edge count, so we already
2011 // know the right weight.
2012 SwitchWeights->push_back(DefaultCount);
2013 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2014 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2015 // Initialize the default case.
2016 SwitchLikelihood->push_back(Stmt::LH_None);
2017 }
2018
2019 CaseRangeBlock = DefaultBlock;
2020
2021 // Clear the insertion point to indicate we are in unreachable code.
2022 Builder.ClearInsertionPoint();
2023
2024 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2025 // then reuse last ContinueBlock.
2026 JumpDest OuterContinue;
2027 if (!BreakContinueStack.empty())
2028 OuterContinue = BreakContinueStack.back().ContinueBlock;
2029
2030 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2031
2032 // Emit switch body.
2033 EmitStmt(S.getBody());
2034
2035 BreakContinueStack.pop_back();
2036
2037 // Update the default block in case explicit case range tests have
2038 // been chained on top.
2039 SwitchInsn->setDefaultDest(CaseRangeBlock);
2040
2041 // If a default was never emitted:
2042 if (!DefaultBlock->getParent()) {
2043 // If we have cleanups, emit the default block so that there's a
2044 // place to jump through the cleanups from.
2045 if (ConditionScope.requiresCleanups()) {
2046 EmitBlock(DefaultBlock);
2047
2048 // Otherwise, just forward the default block to the switch end.
2049 } else {
2050 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2051 delete DefaultBlock;
2052 }
2053 }
2054
2055 ConditionScope.ForceCleanup();
2056
2057 // Emit continuation.
2058 EmitBlock(SwitchExit.getBlock(), true);
2060
2061 // If the switch has a condition wrapped by __builtin_unpredictable,
2062 // create metadata that specifies that the switch is unpredictable.
2063 // Don't bother if not optimizing because that metadata would not be used.
2064 auto *Call = dyn_cast<CallExpr>(S.getCond());
2065 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2066 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2067 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2068 llvm::MDBuilder MDHelper(getLLVMContext());
2069 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2070 MDHelper.createUnpredictable());
2071 }
2072 }
2073
2074 if (SwitchWeights) {
2075 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2076 "switch weights do not match switch cases");
2077 // If there's only one jump destination there's no sense weighting it.
2078 if (SwitchWeights->size() > 1)
2079 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2080 createProfileWeights(*SwitchWeights));
2081 delete SwitchWeights;
2082 } else if (SwitchLikelihood) {
2083 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2084 "switch likelihoods do not match switch cases");
2085 std::optional<SmallVector<uint64_t, 16>> LHW =
2086 getLikelihoodWeights(*SwitchLikelihood);
2087 if (LHW) {
2088 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2089 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2090 createProfileWeights(*LHW));
2091 }
2092 delete SwitchLikelihood;
2093 }
2094 SwitchInsn = SavedSwitchInsn;
2095 SwitchWeights = SavedSwitchWeights;
2096 SwitchLikelihood = SavedSwitchLikelihood;
2097 CaseRangeBlock = SavedCRBlock;
2098}
2099
2100static std::string
2101SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2103 std::string Result;
2104
2105 while (*Constraint) {
2106 switch (*Constraint) {
2107 default:
2108 Result += Target.convertConstraint(Constraint);
2109 break;
2110 // Ignore these
2111 case '*':
2112 case '?':
2113 case '!':
2114 case '=': // Will see this and the following in mult-alt constraints.
2115 case '+':
2116 break;
2117 case '#': // Ignore the rest of the constraint alternative.
2118 while (Constraint[1] && Constraint[1] != ',')
2119 Constraint++;
2120 break;
2121 case '&':
2122 case '%':
2123 Result += *Constraint;
2124 while (Constraint[1] && Constraint[1] == *Constraint)
2125 Constraint++;
2126 break;
2127 case ',':
2128 Result += "|";
2129 break;
2130 case 'g':
2131 Result += "imr";
2132 break;
2133 case '[': {
2134 assert(OutCons &&
2135 "Must pass output names to constraints with a symbolic name");
2136 unsigned Index;
2137 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2138 assert(result && "Could not resolve symbolic name"); (void)result;
2139 Result += llvm::utostr(Index);
2140 break;
2141 }
2142 }
2143
2144 Constraint++;
2145 }
2146
2147 return Result;
2148}
2149
2150/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2151/// as using a particular register add that as a constraint that will be used
2152/// in this asm stmt.
2153static std::string
2154AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2156 const AsmStmt &Stmt, const bool EarlyClobber,
2157 std::string *GCCReg = nullptr) {
2158 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2159 if (!AsmDeclRef)
2160 return Constraint;
2161 const ValueDecl &Value = *AsmDeclRef->getDecl();
2162 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2163 if (!Variable)
2164 return Constraint;
2165 if (Variable->getStorageClass() != SC_Register)
2166 return Constraint;
2167 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2168 if (!Attr)
2169 return Constraint;
2170 StringRef Register = Attr->getLabel();
2171 assert(Target.isValidGCCRegisterName(Register));
2172 // We're using validateOutputConstraint here because we only care if
2173 // this is a register constraint.
2174 TargetInfo::ConstraintInfo Info(Constraint, "");
2175 if (Target.validateOutputConstraint(Info) &&
2176 !Info.allowsRegister()) {
2177 CGM.ErrorUnsupported(&Stmt, "__asm__");
2178 return Constraint;
2179 }
2180 // Canonicalize the register here before returning it.
2181 Register = Target.getNormalizedGCCRegisterName(Register);
2182 if (GCCReg != nullptr)
2183 *GCCReg = Register.str();
2184 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2185}
2186
2187std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2188 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2189 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2190 if (Info.allowsRegister() || !Info.allowsMemory()) {
2192 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2193
2194 llvm::Type *Ty = ConvertType(InputType);
2195 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2196 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2197 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2198 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2199
2200 return {
2201 Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
2202 nullptr};
2203 }
2204 }
2205
2206 Address Addr = InputValue.getAddress(*this);
2207 ConstraintStr += '*';
2208 return {Addr.getPointer(), Addr.getElementType()};
2209}
2210
2211std::pair<llvm::Value *, llvm::Type *>
2212CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2213 const Expr *InputExpr,
2214 std::string &ConstraintStr) {
2215 // If this can't be a register or memory, i.e., has to be a constant
2216 // (immediate or symbolic), try to emit it as such.
2217 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2218 if (Info.requiresImmediateConstant()) {
2219 Expr::EvalResult EVResult;
2220 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2221
2222 llvm::APSInt IntResult;
2223 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2224 getContext()))
2225 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2226 }
2227
2229 if (InputExpr->EvaluateAsInt(Result, getContext()))
2230 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2231 nullptr};
2232 }
2233
2234 if (Info.allowsRegister() || !Info.allowsMemory())
2236 return {EmitScalarExpr(InputExpr), nullptr};
2237 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2238 return {EmitScalarExpr(InputExpr), nullptr};
2239 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2240 LValue Dest = EmitLValue(InputExpr);
2241 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2242 InputExpr->getExprLoc());
2243}
2244
2245/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2246/// asm call instruction. The !srcloc MDNode contains a list of constant
2247/// integers which are the source locations of the start of each line in the
2248/// asm.
2249static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2250 CodeGenFunction &CGF) {
2252 // Add the location of the first line to the MDNode.
2253 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2254 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2255 StringRef StrVal = Str->getString();
2256 if (!StrVal.empty()) {
2258 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2259 unsigned StartToken = 0;
2260 unsigned ByteOffset = 0;
2261
2262 // Add the location of the start of each subsequent line of the asm to the
2263 // MDNode.
2264 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2265 if (StrVal[i] != '\n') continue;
2266 SourceLocation LineLoc = Str->getLocationOfByte(
2267 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2268 Locs.push_back(llvm::ConstantAsMetadata::get(
2269 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2270 }
2271 }
2272
2273 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2274}
2275
2276static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2277 bool HasUnwindClobber, bool ReadOnly,
2278 bool ReadNone, bool NoMerge, const AsmStmt &S,
2279 const std::vector<llvm::Type *> &ResultRegTypes,
2280 const std::vector<llvm::Type *> &ArgElemTypes,
2281 CodeGenFunction &CGF,
2282 std::vector<llvm::Value *> &RegResults) {
2283 if (!HasUnwindClobber)
2284 Result.addFnAttr(llvm::Attribute::NoUnwind);
2285
2286 if (NoMerge)
2287 Result.addFnAttr(llvm::Attribute::NoMerge);
2288 // Attach readnone and readonly attributes.
2289 if (!HasSideEffect) {
2290 if (ReadNone)
2291 Result.setDoesNotAccessMemory();
2292 else if (ReadOnly)
2293 Result.setOnlyReadsMemory();
2294 }
2295
2296 // Add elementtype attribute for indirect constraints.
2297 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2298 if (Pair.value()) {
2299 auto Attr = llvm::Attribute::get(
2300 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2301 Result.addParamAttr(Pair.index(), Attr);
2302 }
2303 }
2304
2305 // Slap the source location of the inline asm into a !srcloc metadata on the
2306 // call.
2307 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2308 Result.setMetadata("srcloc",
2309 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2310 else {
2311 // At least put the line number on MS inline asm blobs.
2312 llvm::Constant *Loc =
2313 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2314 Result.setMetadata("srcloc",
2315 llvm::MDNode::get(CGF.getLLVMContext(),
2316 llvm::ConstantAsMetadata::get(Loc)));
2317 }
2318
2320 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2321 // convergent (meaning, they may call an intrinsically convergent op, such
2322 // as bar.sync, and so can't have certain optimizations applied around
2323 // them).
2324 Result.addFnAttr(llvm::Attribute::Convergent);
2325 // Extract all of the register value results from the asm.
2326 if (ResultRegTypes.size() == 1) {
2327 RegResults.push_back(&Result);
2328 } else {
2329 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2330 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2331 RegResults.push_back(Tmp);
2332 }
2333 }
2334}
2335
2336static void
2338 const llvm::ArrayRef<llvm::Value *> RegResults,
2339 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2340 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2341 const llvm::ArrayRef<LValue> ResultRegDests,
2342 const llvm::ArrayRef<QualType> ResultRegQualTys,
2343 const llvm::BitVector &ResultTypeRequiresCast,
2344 const llvm::BitVector &ResultRegIsFlagReg) {
2346 CodeGenModule &CGM = CGF.CGM;
2347 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2348
2349 assert(RegResults.size() == ResultRegTypes.size());
2350 assert(RegResults.size() == ResultTruncRegTypes.size());
2351 assert(RegResults.size() == ResultRegDests.size());
2352 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2353 // in which case its size may grow.
2354 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2355 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2356
2357 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2358 llvm::Value *Tmp = RegResults[i];
2359 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2360
2361 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2362 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2363 // value.
2364 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2365 llvm::Value *IsBooleanValue =
2366 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2367 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2368 Builder.CreateCall(FnAssume, IsBooleanValue);
2369 }
2370
2371 // If the result type of the LLVM IR asm doesn't match the result type of
2372 // the expression, do the conversion.
2373 if (ResultRegTypes[i] != TruncTy) {
2374
2375 // Truncate the integer result to the right size, note that TruncTy can be
2376 // a pointer.
2377 if (TruncTy->isFloatingPointTy())
2378 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2379 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2380 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2381 Tmp = Builder.CreateTrunc(
2382 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2383 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2384 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2385 uint64_t TmpSize =
2386 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2387 Tmp = Builder.CreatePtrToInt(
2388 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2389 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2390 } else if (TruncTy->isIntegerTy()) {
2391 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2392 } else if (TruncTy->isVectorTy()) {
2393 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2394 }
2395 }
2396
2397 LValue Dest = ResultRegDests[i];
2398 // ResultTypeRequiresCast elements correspond to the first
2399 // ResultTypeRequiresCast.size() elements of RegResults.
2400 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2401 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2402 Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
2403 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2404 Builder.CreateStore(Tmp, A);
2405 continue;
2406 }
2407
2408 QualType Ty =
2409 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2410 if (Ty.isNull()) {
2411 const Expr *OutExpr = S.getOutputExpr(i);
2412 CGM.getDiags().Report(OutExpr->getExprLoc(),
2413 diag::err_store_value_to_reg);
2414 return;
2415 }
2416 Dest = CGF.MakeAddrLValue(A, Ty);
2417 }
2418 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2419 }
2420}
2421
2423 const AsmStmt &S) {
2424 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2425
2426 StringRef Asm;
2427 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2428 Asm = GCCAsm->getAsmString()->getString();
2429
2430 auto &Ctx = CGF->CGM.getLLVMContext();
2431
2432 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2433 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2434 {StrTy->getType()}, false);
2435 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2436
2437 CGF->Builder.CreateCall(UBF, {StrTy});
2438}
2439
2441 // Pop all cleanup blocks at the end of the asm statement.
2442 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2443
2444 // Assemble the final asm string.
2445 std::string AsmString = S.generateAsmString(getContext());
2446
2447 // Get all the output and input constraints together.
2448 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2449 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2450
2451 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2452 bool IsValidTargetAsm = true;
2453 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2454 StringRef Name;
2455 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2456 Name = GAS->getOutputName(i);
2457 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2458 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2459 if (IsHipStdPar && !IsValid)
2460 IsValidTargetAsm = false;
2461 else
2462 assert(IsValid && "Failed to parse output constraint");
2463 OutputConstraintInfos.push_back(Info);
2464 }
2465
2466 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2467 StringRef Name;
2468 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2469 Name = GAS->getInputName(i);
2470 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2471 bool IsValid =
2472 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2473 if (IsHipStdPar && !IsValid)
2474 IsValidTargetAsm = false;
2475 else
2476 assert(IsValid && "Failed to parse input constraint");
2477 InputConstraintInfos.push_back(Info);
2478 }
2479
2480 if (!IsValidTargetAsm)
2481 return EmitHipStdParUnsupportedAsm(this, S);
2482
2483 std::string Constraints;
2484
2485 std::vector<LValue> ResultRegDests;
2486 std::vector<QualType> ResultRegQualTys;
2487 std::vector<llvm::Type *> ResultRegTypes;
2488 std::vector<llvm::Type *> ResultTruncRegTypes;
2489 std::vector<llvm::Type *> ArgTypes;
2490 std::vector<llvm::Type *> ArgElemTypes;
2491 std::vector<llvm::Value*> Args;
2492 llvm::BitVector ResultTypeRequiresCast;
2493 llvm::BitVector ResultRegIsFlagReg;
2494
2495 // Keep track of inout constraints.
2496 std::string InOutConstraints;
2497 std::vector<llvm::Value*> InOutArgs;
2498 std::vector<llvm::Type*> InOutArgTypes;
2499 std::vector<llvm::Type*> InOutArgElemTypes;
2500
2501 // Keep track of out constraints for tied input operand.
2502 std::vector<std::string> OutputConstraints;
2503
2504 // Keep track of defined physregs.
2505 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2506
2507 // An inline asm can be marked readonly if it meets the following conditions:
2508 // - it doesn't have any sideeffects
2509 // - it doesn't clobber memory
2510 // - it doesn't return a value by-reference
2511 // It can be marked readnone if it doesn't have any input memory constraints
2512 // in addition to meeting the conditions listed above.
2513 bool ReadOnly = true, ReadNone = true;
2514
2515 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2516 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2517
2518 // Simplify the output constraint.
2519 std::string OutputConstraint(S.getOutputConstraint(i));
2520 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2521 getTarget(), &OutputConstraintInfos);
2522
2523 const Expr *OutExpr = S.getOutputExpr(i);
2524 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2525
2526 std::string GCCReg;
2527 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2528 getTarget(), CGM, S,
2529 Info.earlyClobber(),
2530 &GCCReg);
2531 // Give an error on multiple outputs to same physreg.
2532 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2533 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2534
2535 OutputConstraints.push_back(OutputConstraint);
2536 LValue Dest = EmitLValue(OutExpr);
2537 if (!Constraints.empty())
2538 Constraints += ',';
2539
2540 // If this is a register output, then make the inline asm return it
2541 // by-value. If this is a memory result, return the value by-reference.
2542 QualType QTy = OutExpr->getType();
2543 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2545 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2546
2547 Constraints += "=" + OutputConstraint;
2548 ResultRegQualTys.push_back(QTy);
2549 ResultRegDests.push_back(Dest);
2550
2551 bool IsFlagReg = llvm::StringRef(OutputConstraint).startswith("{@cc");
2552 ResultRegIsFlagReg.push_back(IsFlagReg);
2553
2554 llvm::Type *Ty = ConvertTypeForMem(QTy);
2555 const bool RequiresCast = Info.allowsRegister() &&
2557 Ty->isAggregateType());
2558
2559 ResultTruncRegTypes.push_back(Ty);
2560 ResultTypeRequiresCast.push_back(RequiresCast);
2561
2562 if (RequiresCast) {
2563 unsigned Size = getContext().getTypeSize(QTy);
2564 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2565 }
2566 ResultRegTypes.push_back(Ty);
2567 // If this output is tied to an input, and if the input is larger, then
2568 // we need to set the actual result type of the inline asm node to be the
2569 // same as the input type.
2570 if (Info.hasMatchingInput()) {
2571 unsigned InputNo;
2572 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2573 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2574 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2575 break;
2576 }
2577 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2578
2579 QualType InputTy = S.getInputExpr(InputNo)->getType();
2580 QualType OutputType = OutExpr->getType();
2581
2582 uint64_t InputSize = getContext().getTypeSize(InputTy);
2583 if (getContext().getTypeSize(OutputType) < InputSize) {
2584 // Form the asm to return the value as a larger integer or fp type.
2585 ResultRegTypes.back() = ConvertType(InputTy);
2586 }
2587 }
2588 if (llvm::Type* AdjTy =
2589 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2590 ResultRegTypes.back()))
2591 ResultRegTypes.back() = AdjTy;
2592 else {
2593 CGM.getDiags().Report(S.getAsmLoc(),
2594 diag::err_asm_invalid_type_in_input)
2595 << OutExpr->getType() << OutputConstraint;
2596 }
2597
2598 // Update largest vector width for any vector types.
2599 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2600 LargestVectorWidth =
2601 std::max((uint64_t)LargestVectorWidth,
2602 VT->getPrimitiveSizeInBits().getKnownMinValue());
2603 } else {
2604 Address DestAddr = Dest.getAddress(*this);
2605 // Matrix types in memory are represented by arrays, but accessed through
2606 // vector pointers, with the alignment specified on the access operation.
2607 // For inline assembly, update pointer arguments to use vector pointers.
2608 // Otherwise there will be a mis-match if the matrix is also an
2609 // input-argument which is represented as vector.
2610 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2611 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2612
2613 ArgTypes.push_back(DestAddr.getType());
2614 ArgElemTypes.push_back(DestAddr.getElementType());
2615 Args.push_back(DestAddr.getPointer());
2616 Constraints += "=*";
2617 Constraints += OutputConstraint;
2618 ReadOnly = ReadNone = false;
2619 }
2620
2621 if (Info.isReadWrite()) {
2622 InOutConstraints += ',';
2623
2624 const Expr *InputExpr = S.getOutputExpr(i);
2625 llvm::Value *Arg;
2626 llvm::Type *ArgElemType;
2627 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2628 Info, Dest, InputExpr->getType(), InOutConstraints,
2629 InputExpr->getExprLoc());
2630
2631 if (llvm::Type* AdjTy =
2632 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2633 Arg->getType()))
2634 Arg = Builder.CreateBitCast(Arg, AdjTy);
2635
2636 // Update largest vector width for any vector types.
2637 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2638 LargestVectorWidth =
2639 std::max((uint64_t)LargestVectorWidth,
2640 VT->getPrimitiveSizeInBits().getKnownMinValue());
2641 // Only tie earlyclobber physregs.
2642 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2643 InOutConstraints += llvm::utostr(i);
2644 else
2645 InOutConstraints += OutputConstraint;
2646
2647 InOutArgTypes.push_back(Arg->getType());
2648 InOutArgElemTypes.push_back(ArgElemType);
2649 InOutArgs.push_back(Arg);
2650 }
2651 }
2652
2653 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2654 // to the return value slot. Only do this when returning in registers.
2655 if (isa<MSAsmStmt>(&S)) {
2656 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2657 if (RetAI.isDirect() || RetAI.isExtend()) {
2658 // Make a fake lvalue for the return value slot.
2661 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2662 ResultRegDests, AsmString, S.getNumOutputs());
2663 SawAsmBlock = true;
2664 }
2665 }
2666
2667 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2668 const Expr *InputExpr = S.getInputExpr(i);
2669
2670 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2671
2672 if (Info.allowsMemory())
2673 ReadNone = false;
2674
2675 if (!Constraints.empty())
2676 Constraints += ',';
2677
2678 // Simplify the input constraint.
2679 std::string InputConstraint(S.getInputConstraint(i));
2680 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2681 &OutputConstraintInfos);
2682
2683 InputConstraint = AddVariableConstraints(
2684 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2685 getTarget(), CGM, S, false /* No EarlyClobber */);
2686
2687 std::string ReplaceConstraint (InputConstraint);
2688 llvm::Value *Arg;
2689 llvm::Type *ArgElemType;
2690 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2691
2692 // If this input argument is tied to a larger output result, extend the
2693 // input to be the same size as the output. The LLVM backend wants to see
2694 // the input and output of a matching constraint be the same size. Note
2695 // that GCC does not define what the top bits are here. We use zext because
2696 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2697 if (Info.hasTiedOperand()) {
2698 unsigned Output = Info.getTiedOperand();
2699 QualType OutputType = S.getOutputExpr(Output)->getType();
2700 QualType InputTy = InputExpr->getType();
2701
2702 if (getContext().getTypeSize(OutputType) >
2703 getContext().getTypeSize(InputTy)) {
2704 // Use ptrtoint as appropriate so that we can do our extension.
2705 if (isa<llvm::PointerType>(Arg->getType()))
2706 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2707 llvm::Type *OutputTy = ConvertType(OutputType);
2708 if (isa<llvm::IntegerType>(OutputTy))
2709 Arg = Builder.CreateZExt(Arg, OutputTy);
2710 else if (isa<llvm::PointerType>(OutputTy))
2711 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2712 else if (OutputTy->isFloatingPointTy())
2713 Arg = Builder.CreateFPExt(Arg, OutputTy);
2714 }
2715 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2716 ReplaceConstraint = OutputConstraints[Output];
2717 }
2718 if (llvm::Type* AdjTy =
2719 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2720 Arg->getType()))
2721 Arg = Builder.CreateBitCast(Arg, AdjTy);
2722 else
2723 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2724 << InputExpr->getType() << InputConstraint;
2725
2726 // Update largest vector width for any vector types.
2727 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2728 LargestVectorWidth =
2729 std::max((uint64_t)LargestVectorWidth,
2730 VT->getPrimitiveSizeInBits().getKnownMinValue());
2731
2732 ArgTypes.push_back(Arg->getType());
2733 ArgElemTypes.push_back(ArgElemType);
2734 Args.push_back(Arg);
2735 Constraints += InputConstraint;
2736 }
2737
2738 // Append the "input" part of inout constraints.
2739 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2740 ArgTypes.push_back(InOutArgTypes[i]);
2741 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2742 Args.push_back(InOutArgs[i]);
2743 }
2744 Constraints += InOutConstraints;
2745
2746 // Labels
2748 llvm::BasicBlock *Fallthrough = nullptr;
2749 bool IsGCCAsmGoto = false;
2750 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2751 IsGCCAsmGoto = GS->isAsmGoto();
2752 if (IsGCCAsmGoto) {
2753 for (const auto *E : GS->labels()) {
2754 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2755 Transfer.push_back(Dest.getBlock());
2756 if (!Constraints.empty())
2757 Constraints += ',';
2758 Constraints += "!i";
2759 }
2760 Fallthrough = createBasicBlock("asm.fallthrough");
2761 }
2762 }
2763
2764 bool HasUnwindClobber = false;
2765
2766 // Clobbers
2767 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2768 StringRef Clobber = S.getClobber(i);
2769
2770 if (Clobber == "memory")
2771 ReadOnly = ReadNone = false;
2772 else if (Clobber == "unwind") {
2773 HasUnwindClobber = true;
2774 continue;
2775 } else if (Clobber != "cc") {
2776 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2777 if (CGM.getCodeGenOpts().StackClashProtector &&
2778 getTarget().isSPRegName(Clobber)) {
2779 CGM.getDiags().Report(S.getAsmLoc(),
2780 diag::warn_stack_clash_protection_inline_asm);
2781 }
2782 }
2783
2784 if (isa<MSAsmStmt>(&S)) {
2785 if (Clobber == "eax" || Clobber == "edx") {
2786 if (Constraints.find("=&A") != std::string::npos)
2787 continue;
2788 std::string::size_type position1 =
2789 Constraints.find("={" + Clobber.str() + "}");
2790 if (position1 != std::string::npos) {
2791 Constraints.insert(position1 + 1, "&");
2792 continue;
2793 }
2794 std::string::size_type position2 = Constraints.find("=A");
2795 if (position2 != std::string::npos) {
2796 Constraints.insert(position2 + 1, "&");
2797 continue;
2798 }
2799 }
2800 }
2801 if (!Constraints.empty())
2802 Constraints += ',';
2803
2804 Constraints += "~{";
2805 Constraints += Clobber;
2806 Constraints += '}';
2807 }
2808
2809 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2810 "unwind clobber can't be used with asm goto");
2811
2812 // Add machine specific clobbers
2813 std::string_view MachineClobbers = getTarget().getClobbers();
2814 if (!MachineClobbers.empty()) {
2815 if (!Constraints.empty())
2816 Constraints += ',';
2817 Constraints += MachineClobbers;
2818 }
2819
2820 llvm::Type *ResultType;
2821 if (ResultRegTypes.empty())
2822 ResultType = VoidTy;
2823 else if (ResultRegTypes.size() == 1)
2824 ResultType = ResultRegTypes[0];
2825 else
2826 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2827
2828 llvm::FunctionType *FTy =
2829 llvm::FunctionType::get(ResultType, ArgTypes, false);
2830
2831 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2832
2833 llvm::InlineAsm::AsmDialect GnuAsmDialect =
2834 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2835 ? llvm::InlineAsm::AD_ATT
2836 : llvm::InlineAsm::AD_Intel;
2837 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2838 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2839
2840 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2841 FTy, AsmString, Constraints, HasSideEffect,
2842 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2843 std::vector<llvm::Value*> RegResults;
2844 llvm::CallBrInst *CBR;
2845 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
2846 CBRRegResults;
2847 if (IsGCCAsmGoto) {
2848 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2849 EmitBlock(Fallthrough);
2850 UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
2851 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2852 *this, RegResults);
2853 // Because we are emitting code top to bottom, we don't have enough
2854 // information at this point to know precisely whether we have a critical
2855 // edge. If we have outputs, split all indirect destinations.
2856 if (!RegResults.empty()) {
2857 unsigned i = 0;
2858 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
2859 llvm::Twine SynthName = Dest->getName() + ".split";
2860 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
2861 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2862 Builder.SetInsertPoint(SynthBB);
2863
2864 if (ResultRegTypes.size() == 1) {
2865 CBRRegResults[SynthBB].push_back(CBR);
2866 } else {
2867 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
2868 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
2869 CBRRegResults[SynthBB].push_back(Tmp);
2870 }
2871 }
2872
2873 EmitBranch(Dest);
2874 EmitBlock(SynthBB);
2875 CBR->setIndirectDest(i++, SynthBB);
2876 }
2877 }
2878 } else if (HasUnwindClobber) {
2879 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2880 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2881 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2882 *this, RegResults);
2883 } else {
2884 llvm::CallInst *Result =
2885 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2886 UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
2887 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2888 *this, RegResults);
2889 }
2890
2891 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
2892 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
2893 ResultRegIsFlagReg);
2894
2895 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
2896 // different insertion point; one for each indirect destination and with
2897 // CBRRegResults rather than RegResults.
2898 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
2899 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
2900 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2901 Builder.SetInsertPoint(Succ, --(Succ->end()));
2902 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
2903 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
2904 ResultTypeRequiresCast, ResultRegIsFlagReg);
2905 }
2906 }
2907}
2908
2910 const RecordDecl *RD = S.getCapturedRecordDecl();
2911 QualType RecordTy = getContext().getRecordType(RD);
2912
2913 // Initialize the captured struct.
2914 LValue SlotLV =
2915 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2916
2917 RecordDecl::field_iterator CurField = RD->field_begin();
2918 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2919 E = S.capture_init_end();
2920 I != E; ++I, ++CurField) {
2921 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2922 if (CurField->hasCapturedVLAType()) {
2923 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2924 } else {
2925 EmitInitializerForField(*CurField, LV, *I);
2926 }
2927 }
2928
2929 return SlotLV;
2930}
2931
2932/// Generate an outlined function for the body of a CapturedStmt, store any
2933/// captured variables into the captured struct, and call the outlined function.
2934llvm::Function *
2936 LValue CapStruct = InitCapturedStruct(S);
2937
2938 // Emit the CapturedDecl
2939 CodeGenFunction CGF(CGM, true);
2940 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2941 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2942 delete CGF.CapturedStmtInfo;
2943
2944 // Emit call to the helper function.
2945 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2946
2947 return F;
2948}
2949
2951 LValue CapStruct = InitCapturedStruct(S);
2952 return CapStruct.getAddress(*this);
2953}
2954
2955/// Creates the outlined function for a CapturedStmt.
2956llvm::Function *
2958 assert(CapturedStmtInfo &&
2959 "CapturedStmtInfo should be set when generating the captured function");
2960 const CapturedDecl *CD = S.getCapturedDecl();
2961 const RecordDecl *RD = S.getCapturedRecordDecl();
2962 SourceLocation Loc = S.getBeginLoc();
2963 assert(CD->hasBody() && "missing CapturedDecl body");
2964
2965 // Build the argument list.
2966 ASTContext &Ctx = CGM.getContext();
2967 FunctionArgList Args;
2968 Args.append(CD->param_begin(), CD->param_end());
2969
2970 // Create the function declaration.
2971 const CGFunctionInfo &FuncInfo =
2973 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2974
2975 llvm::Function *F =
2976 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2978 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2979 if (CD->isNothrow())
2980 F->addFnAttr(llvm::Attribute::NoUnwind);
2981
2982 // Generate the function.
2983 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2984 CD->getBody()->getBeginLoc());
2985 // Set the context parameter in CapturedStmtInfo.
2986 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2988
2989 // Initialize variable-length arrays.
2991 Ctx.getTagDeclType(RD));
2992 for (auto *FD : RD->fields()) {
2993 if (FD->hasCapturedVLAType()) {
2994 auto *ExprArg =
2995 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2996 .getScalarVal();
2997 auto VAT = FD->getCapturedVLAType();
2998 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2999 }
3000 }
3001
3002 // If 'this' is captured, load it into CXXThisValue.
3005 LValue ThisLValue = EmitLValueForField(Base, FD);
3006 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3007 }
3008
3009 PGO.assignRegionCounters(GlobalDecl(CD), F);
3010 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3012
3013 return F;
3014}
#define V(N, I)
Definition: ASTContext.h:3241
#define SM(sm)
Definition: Cuda.cpp:80
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2154
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:1825
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2422
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:1879
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2249
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2101
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2276
static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder, const CGFunctionInfo *CurFnInfo)
If we have 'return f(...);', where both caller and callee are SwiftAsync, codegen it as 'tail call ....
Definition: CGStmt.cpp:1260
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1670
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2337
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1669
@ CSFC_Failure
Definition: CGStmt.cpp:1669
@ CSFC_Success
Definition: CGStmt.cpp:1669
@ CSFC_FallThrough
Definition: CGStmt.cpp:1669
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:950
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
SourceManager & getSourceManager()
Definition: ASTContext.h:697
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2307
CanQualType VoidTy
Definition: ASTContext.h:1083
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3094
Attr - This represents one attribute.
Definition: Attr.h:41
Represents an attribute applied to a statement.
Definition: Stmt.h:2074
BreakStmt - This represents a break.
Definition: Stmt.h:2974
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2847
Expr * getCallee()
Definition: Expr.h:2997
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4655
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4717
bool isNothrow() const
Definition: Decl.cpp:5367
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4734
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4732
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5364
This captures a statement into a function.
Definition: Stmt.h:3751
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3915
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1420
CaseStmt - Represent a case statement.
Definition: Stmt.h:1795
Stmt * getSubStmt()
Definition: Stmt.h:1912
Expr * getLHS()
Definition: Stmt.h:1882
Expr * getRHS()
Definition: Stmt.h:1894
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
An aligned address.
Definition: Address.h:29
static Address invalid()
Definition: Address.h:46
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:62
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Value * getPointer() const
Definition: Address.h:51
bool isValid() const
Definition: Address.h:47
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:57
An aggregate value slot.
Definition: CGValue.h:512
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:595
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:880
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:125
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:97
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:673
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
bool checkIfLoopMustProgress(bool HasConstantCond)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:797
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:74
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:51
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1623
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:670
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:351
LValue - This represents an lvalue references.
Definition: CGValue.h:171
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:350
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:346
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:823
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:39
bool isScalar() const
Definition: CGValue.h:54
static RValue get(llvm::Value *V)
Definition: CGValue.h:89
bool isAggregate() const
Definition: CGValue.h:56
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:73
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:61
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:68
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:179
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:173
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1602
Stmt *const * const_body_iterator
Definition: Stmt.h:1667
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1059
ContinueStmt - This represents a continue.
Definition: Stmt.h:2944
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2340
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1248
ValueDecl * getDecl()
Definition: Expr.h:1316
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1493
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1041
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1082
SourceLocation getLocation() const
Definition: DeclBase.h:444
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1547
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2719
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3058
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3027
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:267
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3015
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2775
const Expr * getSubExpr() const
Definition: Expr.h:1039
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3794
CallingConv getCallConv() const
Definition: Type.h:4089
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3253
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2856
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2132
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2895
Represents the declaration of a label.
Definition: Decl.h:497
LabelStmt * getStmt() const
Definition: Decl.h:521
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2025
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:83
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:579
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:275
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:736
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:803
QualType getCanonicalType() const
Definition: Type.h:6833
The collection of all-type qualifiers we support.
Definition: Type.h:146
Represents a struct/union/class.
Definition: Decl.h:4117
field_range fields() const
Definition: Decl.h:4323
field_iterator field_begin() const
Definition: Decl.cpp:5006
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3013
Expr * getRetValue()
Definition: Stmt.h:3044
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1354
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1297
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1298
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1299
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1301
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:162
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:337
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:154
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1812
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1982
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1299
StringRef getString() const
Definition: Expr.h:1889
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1768
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2382
Exposes information about the current target.
Definition: TargetInfo.h:212
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:811
bool resolveSymbolicName(const char *&Name, ArrayRef< ConstraintInfo > OutputConstraints, unsigned &Index) const
Definition: TargetInfo.cpp:788
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:673
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:714
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
virtual std::string convertConstraint(const char *&Constraint) const
Definition: TargetInfo.h:1195
virtual bool isValidGCCRegisterName(StringRef Name) const
Returns whether the passed in string is a valid register name according to GCC.
Definition: TargetInfo.cpp:628
bool isVoidType() const
Definition: Type.h:7352
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7625
bool isReferenceType() const
Definition: Type.h:7045
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:651
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:704
Represents a variable declaration or definition.
Definition: Decl.h:916
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2578
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:410
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:252
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:289
unsigned long uint64_t
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:629
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:631
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1087
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1094