clang 17.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46void CodeGenFunction::EmitStopPoint(const Stmt *S) {
47 if (CGDebugInfo *DI = getDebugInfo()) {
49 Loc = S->getBeginLoc();
50 DI->EmitLocation(Builder, Loc);
51
52 LastStopPoint = Loc;
53 }
54}
55
57 assert(S && "Null statement?");
58 PGO.setCurrentStmt(S);
59
60 // These statements have their own debug info handling.
61 if (EmitSimpleStmt(S, Attrs))
62 return;
63
64 // Check if we are generating unreachable code.
65 if (!HaveInsertPoint()) {
66 // If so, and the statement doesn't contain a label, then we do not need to
67 // generate actual code. This is safe because (1) the current point is
68 // unreachable, so we don't need to execute the code, and (2) we've already
69 // handled the statements which update internal data structures (like the
70 // local variable map) which could be used by subsequent statements.
71 if (!ContainsLabel(S)) {
72 // Verify that any decl statements were handled as simple, they may be in
73 // scope of subsequent reachable statements.
74 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
75 return;
76 }
77
78 // Otherwise, make a new block to hold the code.
80 }
81
82 // Generate a stoppoint if we are emitting debug info.
84
85 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
86 // enabled.
87 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
88 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
90 return;
91 }
92 }
93
94 switch (S->getStmtClass()) {
96 case Stmt::CXXCatchStmtClass:
97 case Stmt::SEHExceptStmtClass:
98 case Stmt::SEHFinallyStmtClass:
99 case Stmt::MSDependentExistsStmtClass:
100 llvm_unreachable("invalid statement class to emit generically");
101 case Stmt::NullStmtClass:
102 case Stmt::CompoundStmtClass:
103 case Stmt::DeclStmtClass:
104 case Stmt::LabelStmtClass:
105 case Stmt::AttributedStmtClass:
106 case Stmt::GotoStmtClass:
107 case Stmt::BreakStmtClass:
108 case Stmt::ContinueStmtClass:
109 case Stmt::DefaultStmtClass:
110 case Stmt::CaseStmtClass:
111 case Stmt::SEHLeaveStmtClass:
112 llvm_unreachable("should have emitted these statements as simple");
113
114#define STMT(Type, Base)
115#define ABSTRACT_STMT(Op)
116#define EXPR(Type, Base) \
117 case Stmt::Type##Class:
118#include "clang/AST/StmtNodes.inc"
119 {
120 // Remember the block we came in on.
121 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
122 assert(incoming && "expression emission must have an insertion point");
123
124 EmitIgnoredExpr(cast<Expr>(S));
125
126 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
127 assert(outgoing && "expression emission cleared block!");
128
129 // The expression emitters assume (reasonably!) that the insertion
130 // point is always set. To maintain that, the call-emission code
131 // for noreturn functions has to enter a new block with no
132 // predecessors. We want to kill that block and mark the current
133 // insertion point unreachable in the common case of a call like
134 // "exit();". Since expression emission doesn't otherwise create
135 // blocks with no predecessors, we can just test for that.
136 // However, we must be careful not to do this to our incoming
137 // block, because *statement* emission does sometimes create
138 // reachable blocks which will have no predecessors until later in
139 // the function. This occurs with, e.g., labels that are not
140 // reachable by fallthrough.
141 if (incoming != outgoing && outgoing->use_empty()) {
142 outgoing->eraseFromParent();
143 Builder.ClearInsertionPoint();
144 }
145 break;
146 }
147
148 case Stmt::IndirectGotoStmtClass:
149 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
150
151 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
152 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
153 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
154 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
155
156 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
157
158 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
159 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
160 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
161 case Stmt::CoroutineBodyStmtClass:
162 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
163 break;
164 case Stmt::CoreturnStmtClass:
165 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
166 break;
167 case Stmt::CapturedStmtClass: {
168 const CapturedStmt *CS = cast<CapturedStmt>(S);
170 }
171 break;
172 case Stmt::ObjCAtTryStmtClass:
173 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
174 break;
175 case Stmt::ObjCAtCatchStmtClass:
176 llvm_unreachable(
177 "@catch statements should be handled by EmitObjCAtTryStmt");
178 case Stmt::ObjCAtFinallyStmtClass:
179 llvm_unreachable(
180 "@finally statements should be handled by EmitObjCAtTryStmt");
181 case Stmt::ObjCAtThrowStmtClass:
182 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
183 break;
184 case Stmt::ObjCAtSynchronizedStmtClass:
185 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
186 break;
187 case Stmt::ObjCForCollectionStmtClass:
188 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
189 break;
190 case Stmt::ObjCAutoreleasePoolStmtClass:
191 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
192 break;
193
194 case Stmt::CXXTryStmtClass:
195 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
196 break;
197 case Stmt::CXXForRangeStmtClass:
198 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
199 break;
200 case Stmt::SEHTryStmtClass:
201 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
202 break;
203 case Stmt::OMPMetaDirectiveClass:
204 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
205 break;
206 case Stmt::OMPCanonicalLoopClass:
207 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
208 break;
209 case Stmt::OMPParallelDirectiveClass:
210 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
211 break;
212 case Stmt::OMPSimdDirectiveClass:
213 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
214 break;
215 case Stmt::OMPTileDirectiveClass:
216 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
217 break;
218 case Stmt::OMPUnrollDirectiveClass:
219 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
220 break;
221 case Stmt::OMPForDirectiveClass:
222 EmitOMPForDirective(cast<OMPForDirective>(*S));
223 break;
224 case Stmt::OMPForSimdDirectiveClass:
225 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
226 break;
227 case Stmt::OMPSectionsDirectiveClass:
228 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
229 break;
230 case Stmt::OMPSectionDirectiveClass:
231 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
232 break;
233 case Stmt::OMPSingleDirectiveClass:
234 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
235 break;
236 case Stmt::OMPMasterDirectiveClass:
237 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
238 break;
239 case Stmt::OMPCriticalDirectiveClass:
240 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
241 break;
242 case Stmt::OMPParallelForDirectiveClass:
243 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
244 break;
245 case Stmt::OMPParallelForSimdDirectiveClass:
246 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
247 break;
248 case Stmt::OMPParallelMasterDirectiveClass:
249 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
250 break;
251 case Stmt::OMPParallelSectionsDirectiveClass:
252 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
253 break;
254 case Stmt::OMPTaskDirectiveClass:
255 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
256 break;
257 case Stmt::OMPTaskyieldDirectiveClass:
258 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
259 break;
260 case Stmt::OMPErrorDirectiveClass:
261 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
262 break;
263 case Stmt::OMPBarrierDirectiveClass:
264 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
265 break;
266 case Stmt::OMPTaskwaitDirectiveClass:
267 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
268 break;
269 case Stmt::OMPTaskgroupDirectiveClass:
270 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
271 break;
272 case Stmt::OMPFlushDirectiveClass:
273 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
274 break;
275 case Stmt::OMPDepobjDirectiveClass:
276 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
277 break;
278 case Stmt::OMPScanDirectiveClass:
279 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
280 break;
281 case Stmt::OMPOrderedDirectiveClass:
282 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
283 break;
284 case Stmt::OMPAtomicDirectiveClass:
285 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
286 break;
287 case Stmt::OMPTargetDirectiveClass:
288 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
289 break;
290 case Stmt::OMPTeamsDirectiveClass:
291 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
292 break;
293 case Stmt::OMPCancellationPointDirectiveClass:
294 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
295 break;
296 case Stmt::OMPCancelDirectiveClass:
297 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
298 break;
299 case Stmt::OMPTargetDataDirectiveClass:
300 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
301 break;
302 case Stmt::OMPTargetEnterDataDirectiveClass:
303 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
304 break;
305 case Stmt::OMPTargetExitDataDirectiveClass:
306 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
307 break;
308 case Stmt::OMPTargetParallelDirectiveClass:
309 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
310 break;
311 case Stmt::OMPTargetParallelForDirectiveClass:
312 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
313 break;
314 case Stmt::OMPTaskLoopDirectiveClass:
315 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
316 break;
317 case Stmt::OMPTaskLoopSimdDirectiveClass:
318 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
319 break;
320 case Stmt::OMPMasterTaskLoopDirectiveClass:
321 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
322 break;
323 case Stmt::OMPMaskedTaskLoopDirectiveClass:
324 llvm_unreachable("masked taskloop directive not supported yet.");
325 break;
326 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
328 cast<OMPMasterTaskLoopSimdDirective>(*S));
329 break;
330 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
331 llvm_unreachable("masked taskloop simd directive not supported yet.");
332 break;
333 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
335 cast<OMPParallelMasterTaskLoopDirective>(*S));
336 break;
337 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
338 llvm_unreachable("parallel masked taskloop directive not supported yet.");
339 break;
340 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
342 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
343 break;
344 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
345 llvm_unreachable(
346 "parallel masked taskloop simd directive not supported yet.");
347 break;
348 case Stmt::OMPDistributeDirectiveClass:
349 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
350 break;
351 case Stmt::OMPTargetUpdateDirectiveClass:
352 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
353 break;
354 case Stmt::OMPDistributeParallelForDirectiveClass:
356 cast<OMPDistributeParallelForDirective>(*S));
357 break;
358 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
360 cast<OMPDistributeParallelForSimdDirective>(*S));
361 break;
362 case Stmt::OMPDistributeSimdDirectiveClass:
363 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
364 break;
365 case Stmt::OMPTargetParallelForSimdDirectiveClass:
367 cast<OMPTargetParallelForSimdDirective>(*S));
368 break;
369 case Stmt::OMPTargetSimdDirectiveClass:
370 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
371 break;
372 case Stmt::OMPTeamsDistributeDirectiveClass:
373 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
374 break;
375 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
377 cast<OMPTeamsDistributeSimdDirective>(*S));
378 break;
379 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
381 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
382 break;
383 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
385 cast<OMPTeamsDistributeParallelForDirective>(*S));
386 break;
387 case Stmt::OMPTargetTeamsDirectiveClass:
388 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
389 break;
390 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
392 cast<OMPTargetTeamsDistributeDirective>(*S));
393 break;
394 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
396 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
397 break;
398 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
400 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
401 break;
402 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
404 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
405 break;
406 case Stmt::OMPInteropDirectiveClass:
407 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
408 break;
409 case Stmt::OMPDispatchDirectiveClass:
410 llvm_unreachable("Dispatch directive not supported yet.");
411 break;
412 case Stmt::OMPMaskedDirectiveClass:
413 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
414 break;
415 case Stmt::OMPGenericLoopDirectiveClass:
416 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
417 break;
418 case Stmt::OMPTeamsGenericLoopDirectiveClass:
419 llvm_unreachable("teams loop directive not supported yet.");
420 break;
421 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
422 llvm_unreachable("target teams loop directive not supported yet.");
423 break;
424 case Stmt::OMPParallelGenericLoopDirectiveClass:
425 llvm_unreachable("parallel loop directive not supported yet.");
426 break;
427 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
428 llvm_unreachable("target parallel loop directive not supported yet.");
429 break;
430 case Stmt::OMPParallelMaskedDirectiveClass:
431 llvm_unreachable("parallel masked directive not supported yet.");
432 break;
433 }
434}
435
438 switch (S->getStmtClass()) {
439 default:
440 return false;
441 case Stmt::NullStmtClass:
442 break;
443 case Stmt::CompoundStmtClass:
444 EmitCompoundStmt(cast<CompoundStmt>(*S));
445 break;
446 case Stmt::DeclStmtClass:
447 EmitDeclStmt(cast<DeclStmt>(*S));
448 break;
449 case Stmt::LabelStmtClass:
450 EmitLabelStmt(cast<LabelStmt>(*S));
451 break;
452 case Stmt::AttributedStmtClass:
453 EmitAttributedStmt(cast<AttributedStmt>(*S));
454 break;
455 case Stmt::GotoStmtClass:
456 EmitGotoStmt(cast<GotoStmt>(*S));
457 break;
458 case Stmt::BreakStmtClass:
459 EmitBreakStmt(cast<BreakStmt>(*S));
460 break;
461 case Stmt::ContinueStmtClass:
462 EmitContinueStmt(cast<ContinueStmt>(*S));
463 break;
464 case Stmt::DefaultStmtClass:
465 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
466 break;
467 case Stmt::CaseStmtClass:
468 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
469 break;
470 case Stmt::SEHLeaveStmtClass:
471 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
472 break;
473 }
474 return true;
475}
476
477/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
478/// this captures the expression result of the last sub-statement and returns it
479/// (for use by the statement expression extension).
481 AggValueSlot AggSlot) {
482 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
483 "LLVM IR generation of compound statement ('{}')");
484
485 // Keep track of the current cleanup stack depth, including debug scopes.
486 LexicalScope Scope(*this, S.getSourceRange());
487
488 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
489}
490
493 bool GetLast,
494 AggValueSlot AggSlot) {
495
496 const Stmt *ExprResult = S.getStmtExprResult();
497 assert((!GetLast || (GetLast && ExprResult)) &&
498 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
499
500 Address RetAlloca = Address::invalid();
501
502 for (auto *CurStmt : S.body()) {
503 if (GetLast && ExprResult == CurStmt) {
504 // We have to special case labels here. They are statements, but when put
505 // at the end of a statement expression, they yield the value of their
506 // subexpression. Handle this by walking through all labels we encounter,
507 // emitting them before we evaluate the subexpr.
508 // Similar issues arise for attributed statements.
509 while (!isa<Expr>(ExprResult)) {
510 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
511 EmitLabel(LS->getDecl());
512 ExprResult = LS->getSubStmt();
513 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
514 // FIXME: Update this if we ever have attributes that affect the
515 // semantics of an expression.
516 ExprResult = AS->getSubStmt();
517 } else {
518 llvm_unreachable("unknown value statement");
519 }
520 }
521
523
524 const Expr *E = cast<Expr>(ExprResult);
525 QualType ExprTy = E->getType();
526 if (hasAggregateEvaluationKind(ExprTy)) {
527 EmitAggExpr(E, AggSlot);
528 } else {
529 // We can't return an RValue here because there might be cleanups at
530 // the end of the StmtExpr. Because of that, we have to emit the result
531 // here into a temporary alloca.
532 RetAlloca = CreateMemTemp(ExprTy);
533 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
534 /*IsInit*/ false);
535 }
536 } else {
537 EmitStmt(CurStmt);
538 }
539 }
540
541 return RetAlloca;
542}
543
544void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
545 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
546
547 // If there is a cleanup stack, then we it isn't worth trying to
548 // simplify this block (we would need to remove it from the scope map
549 // and cleanup entry).
550 if (!EHStack.empty())
551 return;
552
553 // Can only simplify direct branches.
554 if (!BI || !BI->isUnconditional())
555 return;
556
557 // Can only simplify empty blocks.
558 if (BI->getIterator() != BB->begin())
559 return;
560
561 BB->replaceAllUsesWith(BI->getSuccessor(0));
562 BI->eraseFromParent();
563 BB->eraseFromParent();
564}
565
566void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
567 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
568
569 // Fall out of the current block (if necessary).
570 EmitBranch(BB);
571
572 if (IsFinished && BB->use_empty()) {
573 delete BB;
574 return;
575 }
576
577 // Place the block after the current block, if possible, or else at
578 // the end of the function.
579 if (CurBB && CurBB->getParent())
580 CurFn->insert(std::next(CurBB->getIterator()), BB);
581 else
582 CurFn->insert(CurFn->end(), BB);
583 Builder.SetInsertPoint(BB);
584}
585
586void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
587 // Emit a branch from the current block to the target one if this
588 // was a real block. If this was just a fall-through block after a
589 // terminator, don't emit it.
590 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
591
592 if (!CurBB || CurBB->getTerminator()) {
593 // If there is no insert point or the previous block is already
594 // terminated, don't touch it.
595 } else {
596 // Otherwise, create a fall-through branch.
597 Builder.CreateBr(Target);
598 }
599
600 Builder.ClearInsertionPoint();
601}
602
603void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
604 bool inserted = false;
605 for (llvm::User *u : block->users()) {
606 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
607 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
608 inserted = true;
609 break;
610 }
611 }
612
613 if (!inserted)
614 CurFn->insert(CurFn->end(), block);
615
616 Builder.SetInsertPoint(block);
617}
618
619CodeGenFunction::JumpDest
621 JumpDest &Dest = LabelMap[D];
622 if (Dest.isValid()) return Dest;
623
624 // Create, but don't insert, the new block.
625 Dest = JumpDest(createBasicBlock(D->getName()),
628 return Dest;
629}
630
632 // Add this label to the current lexical scope if we're within any
633 // normal cleanups. Jumps "in" to this label --- when permitted by
634 // the language --- may need to be routed around such cleanups.
635 if (EHStack.hasNormalCleanups() && CurLexicalScope)
636 CurLexicalScope->addLabel(D);
637
638 JumpDest &Dest = LabelMap[D];
639
640 // If we didn't need a forward reference to this label, just go
641 // ahead and create a destination at the current scope.
642 if (!Dest.isValid()) {
644
645 // Otherwise, we need to give this label a target depth and remove
646 // it from the branch-fixups list.
647 } else {
648 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
649 Dest.setScopeDepth(EHStack.stable_begin());
650 ResolveBranchFixups(Dest.getBlock());
651 }
652
653 EmitBlock(Dest.getBlock());
654
655 // Emit debug info for labels.
656 if (CGDebugInfo *DI = getDebugInfo()) {
658 DI->setLocation(D->getLocation());
659 DI->EmitLabel(D, Builder);
660 }
661 }
662
664}
665
666/// Change the cleanup scope of the labels in this lexical scope to
667/// match the scope of the enclosing context.
669 assert(!Labels.empty());
670 EHScopeStack::stable_iterator innermostScope
672
673 // Change the scope depth of all the labels.
675 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
676 assert(CGF.LabelMap.count(*i));
677 JumpDest &dest = CGF.LabelMap.find(*i)->second;
678 assert(dest.getScopeDepth().isValid());
679 assert(innermostScope.encloses(dest.getScopeDepth()));
680 dest.setScopeDepth(innermostScope);
681 }
682
683 // Reparent the labels if the new scope also has cleanups.
684 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
685 ParentScope->Labels.append(Labels.begin(), Labels.end());
686 }
687}
688
689
691 EmitLabel(S.getDecl());
692
693 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
694 if (getLangOpts().EHAsynch && S.isSideEntry())
696
697 EmitStmt(S.getSubStmt());
698}
699
701 bool nomerge = false;
702 bool noinline = false;
703 bool alwaysinline = false;
704 const CallExpr *musttail = nullptr;
705
706 for (const auto *A : S.getAttrs()) {
707 switch (A->getKind()) {
708 default:
709 break;
710 case attr::NoMerge:
711 nomerge = true;
712 break;
713 case attr::NoInline:
714 noinline = true;
715 break;
716 case attr::AlwaysInline:
717 alwaysinline = true;
718 break;
719 case attr::MustTail:
720 const Stmt *Sub = S.getSubStmt();
721 const ReturnStmt *R = cast<ReturnStmt>(Sub);
722 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
723 break;
724 }
725 }
726 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
727 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
728 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
729 SaveAndRestore save_musttail(MustTailCall, musttail);
730 EmitStmt(S.getSubStmt(), S.getAttrs());
731}
732
734 // If this code is reachable then emit a stop point (if generating
735 // debug info). We have to do this ourselves because we are on the
736 // "simple" statement path.
737 if (HaveInsertPoint())
738 EmitStopPoint(&S);
739
741}
742
743
745 if (const LabelDecl *Target = S.getConstantTarget()) {
747 return;
748 }
749
750 // Ensure that we have an i8* for our PHI node.
751 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
752 Int8PtrTy, "addr");
753 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
754
755 // Get the basic block for the indirect goto.
756 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
757
758 // The first instruction in the block has to be the PHI for the switch dest,
759 // add an entry for this branch.
760 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
761
762 EmitBranch(IndGotoBB);
763}
764
765void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
766 // The else branch of a consteval if statement is always the only branch that
767 // can be runtime evaluated.
768 if (S.isConsteval()) {
769 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
770 if (Executed) {
771 RunCleanupsScope ExecutedScope(*this);
772 EmitStmt(Executed);
773 }
774 return;
775 }
776
777 // C99 6.8.4.1: The first substatement is executed if the expression compares
778 // unequal to 0. The condition must be a scalar type.
779 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
780
781 if (S.getInit())
782 EmitStmt(S.getInit());
783
784 if (S.getConditionVariable())
785 EmitDecl(*S.getConditionVariable());
786
787 // If the condition constant folds and can be elided, try to avoid emitting
788 // the condition and the dead arm of the if/else.
789 bool CondConstant;
790 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
791 S.isConstexpr())) {
792 // Figure out which block (then or else) is executed.
793 const Stmt *Executed = S.getThen();
794 const Stmt *Skipped = S.getElse();
795 if (!CondConstant) // Condition false?
796 std::swap(Executed, Skipped);
797
798 // If the skipped block has no labels in it, just emit the executed block.
799 // This avoids emitting dead code and simplifies the CFG substantially.
800 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
801 if (CondConstant)
803 if (Executed) {
804 RunCleanupsScope ExecutedScope(*this);
805 EmitStmt(Executed);
806 }
807 return;
808 }
809 }
810
811 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
812 // the conditional branch.
813 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
814 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
815 llvm::BasicBlock *ElseBlock = ContBlock;
816 if (S.getElse())
817 ElseBlock = createBasicBlock("if.else");
818
819 // Prefer the PGO based weights over the likelihood attribute.
820 // When the build isn't optimized the metadata isn't used, so don't generate
821 // it.
822 // Also, differentiate between disabled PGO and a never executed branch with
823 // PGO. Assuming PGO is in use:
824 // - we want to ignore the [[likely]] attribute if the branch is never
825 // executed,
826 // - assuming the profile is poor, preserving the attribute may still be
827 // beneficial.
828 // As an approximation, preserve the attribute only if both the branch and the
829 // parent context were not executed.
831 uint64_t ThenCount = getProfileCount(S.getThen());
832 if (!ThenCount && !getCurrentProfileCount() &&
833 CGM.getCodeGenOpts().OptimizationLevel)
834 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
835 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
836
837 // Emit the 'then' code.
838 EmitBlock(ThenBlock);
840 {
841 RunCleanupsScope ThenScope(*this);
842 EmitStmt(S.getThen());
843 }
844 EmitBranch(ContBlock);
845
846 // Emit the 'else' code if present.
847 if (const Stmt *Else = S.getElse()) {
848 {
849 // There is no need to emit line number for an unconditional branch.
850 auto NL = ApplyDebugLocation::CreateEmpty(*this);
851 EmitBlock(ElseBlock);
852 }
853 {
854 RunCleanupsScope ElseScope(*this);
855 EmitStmt(Else);
856 }
857 {
858 // There is no need to emit line number for an unconditional branch.
859 auto NL = ApplyDebugLocation::CreateEmpty(*this);
860 EmitBranch(ContBlock);
861 }
862 }
863
864 // Emit the continuation block for code after the if.
865 EmitBlock(ContBlock, true);
866}
867
869 ArrayRef<const Attr *> WhileAttrs) {
870 // Emit the header for the loop, which will also become
871 // the continue target.
872 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
873 EmitBlock(LoopHeader.getBlock());
874
875 // Create an exit block for when the condition fails, which will
876 // also become the break target.
877 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
878
879 // Store the blocks to use for break and continue.
880 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
881
882 // C++ [stmt.while]p2:
883 // When the condition of a while statement is a declaration, the
884 // scope of the variable that is declared extends from its point
885 // of declaration (3.3.2) to the end of the while statement.
886 // [...]
887 // The object created in a condition is destroyed and created
888 // with each iteration of the loop.
889 RunCleanupsScope ConditionScope(*this);
890
891 if (S.getConditionVariable())
892 EmitDecl(*S.getConditionVariable());
893
894 // Evaluate the conditional in the while header. C99 6.8.5.1: The
895 // evaluation of the controlling expression takes place before each
896 // execution of the loop body.
897 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
898
899 // while(1) is common, avoid extra exit blocks. Be sure
900 // to correctly handle break/continue though.
901 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
902 bool CondIsConstInt = C != nullptr;
903 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
904 const SourceRange &R = S.getSourceRange();
905 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
906 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
908 checkIfLoopMustProgress(CondIsConstInt));
909
910 // As long as the condition is true, go to the loop body.
911 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
912 if (EmitBoolCondBranch) {
913 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
914 if (ConditionScope.requiresCleanups())
915 ExitBlock = createBasicBlock("while.exit");
916 llvm::MDNode *Weights =
917 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
918 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
919 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
920 BoolCondVal, Stmt::getLikelihood(S.getBody()));
921 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
922
923 if (ExitBlock != LoopExit.getBlock()) {
924 EmitBlock(ExitBlock);
926 }
927 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
928 CGM.getDiags().Report(A->getLocation(),
929 diag::warn_attribute_has_no_effect_on_infinite_loop)
930 << A << A->getRange();
932 S.getWhileLoc(),
933 diag::note_attribute_has_no_effect_on_infinite_loop_here)
934 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
935 }
936
937 // Emit the loop body. We have to emit this in a cleanup scope
938 // because it might be a singleton DeclStmt.
939 {
940 RunCleanupsScope BodyScope(*this);
941 EmitBlock(LoopBody);
943 EmitStmt(S.getBody());
944 }
945
946 BreakContinueStack.pop_back();
947
948 // Immediately force cleanup.
949 ConditionScope.ForceCleanup();
950
951 EmitStopPoint(&S);
952 // Branch to the loop header again.
953 EmitBranch(LoopHeader.getBlock());
954
955 LoopStack.pop();
956
957 // Emit the exit block.
958 EmitBlock(LoopExit.getBlock(), true);
959
960 // The LoopHeader typically is just a branch if we skipped emitting
961 // a branch, try to erase it.
962 if (!EmitBoolCondBranch)
963 SimplifyForwardingBlocks(LoopHeader.getBlock());
964}
965
967 ArrayRef<const Attr *> DoAttrs) {
968 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
969 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
970
971 uint64_t ParentCount = getCurrentProfileCount();
972
973 // Store the blocks to use for break and continue.
974 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
975
976 // Emit the body of the loop.
977 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
978
979 EmitBlockWithFallThrough(LoopBody, &S);
980 {
981 RunCleanupsScope BodyScope(*this);
982 EmitStmt(S.getBody());
983 }
984
985 EmitBlock(LoopCond.getBlock());
986
987 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
988 // after each execution of the loop body."
989
990 // Evaluate the conditional in the while header.
991 // C99 6.8.5p2/p4: The first substatement is executed if the expression
992 // compares unequal to 0. The condition must be a scalar type.
993 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
994
995 BreakContinueStack.pop_back();
996
997 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
998 // to correctly handle break/continue though.
999 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1000 bool CondIsConstInt = C;
1001 bool EmitBoolCondBranch = !C || !C->isZero();
1002
1003 const SourceRange &R = S.getSourceRange();
1004 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1007 checkIfLoopMustProgress(CondIsConstInt));
1008
1009 // As long as the condition is true, iterate the loop.
1010 if (EmitBoolCondBranch) {
1011 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1012 Builder.CreateCondBr(
1013 BoolCondVal, LoopBody, LoopExit.getBlock(),
1014 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1015 }
1016
1017 LoopStack.pop();
1018
1019 // Emit the exit block.
1020 EmitBlock(LoopExit.getBlock());
1021
1022 // The DoCond block typically is just a branch if we skipped
1023 // emitting a branch, try to erase it.
1024 if (!EmitBoolCondBranch)
1025 SimplifyForwardingBlocks(LoopCond.getBlock());
1026}
1027
1029 ArrayRef<const Attr *> ForAttrs) {
1030 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1031
1032 LexicalScope ForScope(*this, S.getSourceRange());
1033
1034 // Evaluate the first part before the loop.
1035 if (S.getInit())
1036 EmitStmt(S.getInit());
1037
1038 // Start the loop with a block that tests the condition.
1039 // If there's an increment, the continue scope will be overwritten
1040 // later.
1041 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1042 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1043 EmitBlock(CondBlock);
1044
1046 bool CondIsConstInt =
1047 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
1048
1049 const SourceRange &R = S.getSourceRange();
1050 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1053 checkIfLoopMustProgress(CondIsConstInt));
1054
1055 // Create a cleanup scope for the condition variable cleanups.
1056 LexicalScope ConditionScope(*this, S.getSourceRange());
1057
1058 // If the for loop doesn't have an increment we can just use the condition as
1059 // the continue block. Otherwise, if there is no condition variable, we can
1060 // form the continue block now. If there is a condition variable, we can't
1061 // form the continue block until after we've emitted the condition, because
1062 // the condition is in scope in the increment, but Sema's jump diagnostics
1063 // ensure that there are no continues from the condition variable that jump
1064 // to the loop increment.
1065 JumpDest Continue;
1066 if (!S.getInc())
1067 Continue = CondDest;
1068 else if (!S.getConditionVariable())
1069 Continue = getJumpDestInCurrentScope("for.inc");
1070 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1071
1072 if (S.getCond()) {
1073 // If the for statement has a condition scope, emit the local variable
1074 // declaration.
1075 if (S.getConditionVariable()) {
1076 EmitDecl(*S.getConditionVariable());
1077
1078 // We have entered the condition variable's scope, so we're now able to
1079 // jump to the continue block.
1080 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1081 BreakContinueStack.back().ContinueBlock = Continue;
1082 }
1083
1084 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1085 // If there are any cleanups between here and the loop-exit scope,
1086 // create a block to stage a loop exit along.
1087 if (ForScope.requiresCleanups())
1088 ExitBlock = createBasicBlock("for.cond.cleanup");
1089
1090 // As long as the condition is true, iterate the loop.
1091 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1092
1093 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1094 // compares unequal to 0. The condition must be a scalar type.
1095 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1096 llvm::MDNode *Weights =
1097 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1098 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1099 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1100 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1101
1102 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1103
1104 if (ExitBlock != LoopExit.getBlock()) {
1105 EmitBlock(ExitBlock);
1107 }
1108
1109 EmitBlock(ForBody);
1110 } else {
1111 // Treat it as a non-zero constant. Don't even create a new block for the
1112 // body, just fall into it.
1113 }
1115
1116 {
1117 // Create a separate cleanup scope for the body, in case it is not
1118 // a compound statement.
1119 RunCleanupsScope BodyScope(*this);
1120 EmitStmt(S.getBody());
1121 }
1122
1123 // If there is an increment, emit it next.
1124 if (S.getInc()) {
1125 EmitBlock(Continue.getBlock());
1126 EmitStmt(S.getInc());
1127 }
1128
1129 BreakContinueStack.pop_back();
1130
1131 ConditionScope.ForceCleanup();
1132
1133 EmitStopPoint(&S);
1134 EmitBranch(CondBlock);
1135
1136 ForScope.ForceCleanup();
1137
1138 LoopStack.pop();
1139
1140 // Emit the fall-through block.
1141 EmitBlock(LoopExit.getBlock(), true);
1142}
1143
1144void
1146 ArrayRef<const Attr *> ForAttrs) {
1147 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1148
1149 LexicalScope ForScope(*this, S.getSourceRange());
1150
1151 // Evaluate the first pieces before the loop.
1152 if (S.getInit())
1153 EmitStmt(S.getInit());
1154 EmitStmt(S.getRangeStmt());
1155 EmitStmt(S.getBeginStmt());
1156 EmitStmt(S.getEndStmt());
1157
1158 // Start the loop with a block that tests the condition.
1159 // If there's an increment, the continue scope will be overwritten
1160 // later.
1161 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1162 EmitBlock(CondBlock);
1163
1164 const SourceRange &R = S.getSourceRange();
1165 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1168
1169 // If there are any cleanups between here and the loop-exit scope,
1170 // create a block to stage a loop exit along.
1171 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1172 if (ForScope.requiresCleanups())
1173 ExitBlock = createBasicBlock("for.cond.cleanup");
1174
1175 // The loop body, consisting of the specified body and the loop variable.
1176 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1177
1178 // The body is executed if the expression, contextually converted
1179 // to bool, is true.
1180 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1181 llvm::MDNode *Weights =
1182 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1183 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1184 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1185 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1186 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1187
1188 if (ExitBlock != LoopExit.getBlock()) {
1189 EmitBlock(ExitBlock);
1191 }
1192
1193 EmitBlock(ForBody);
1195
1196 // Create a block for the increment. In case of a 'continue', we jump there.
1197 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1198
1199 // Store the blocks to use for break and continue.
1200 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1201
1202 {
1203 // Create a separate cleanup scope for the loop variable and body.
1204 LexicalScope BodyScope(*this, S.getSourceRange());
1205 EmitStmt(S.getLoopVarStmt());
1206 EmitStmt(S.getBody());
1207 }
1208
1209 EmitStopPoint(&S);
1210 // If there is an increment, emit it next.
1211 EmitBlock(Continue.getBlock());
1212 EmitStmt(S.getInc());
1213
1214 BreakContinueStack.pop_back();
1215
1216 EmitBranch(CondBlock);
1217
1218 ForScope.ForceCleanup();
1219
1220 LoopStack.pop();
1221
1222 // Emit the fall-through block.
1223 EmitBlock(LoopExit.getBlock(), true);
1224}
1225
1226void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1227 if (RV.isScalar()) {
1229 } else if (RV.isAggregate()) {
1230 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1233 } else {
1235 /*init*/ true);
1236 }
1238}
1239
1240namespace {
1241// RAII struct used to save and restore a return statment's result expression.
1242struct SaveRetExprRAII {
1243 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1244 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1245 CGF.RetExpr = RetExpr;
1246 }
1247 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1248 const Expr *OldRetExpr;
1249 CodeGenFunction &CGF;
1250};
1251} // namespace
1252
1253/// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1254/// codegen it as 'tail call ...; ret void;'.
1256 const CGFunctionInfo *CurFnInfo) {
1257 auto calleeQualType = CE->getCallee()->getType();
1258 const FunctionType *calleeType = nullptr;
1259 if (calleeQualType->isFunctionPointerType() ||
1260 calleeQualType->isFunctionReferenceType() ||
1261 calleeQualType->isBlockPointerType() ||
1262 calleeQualType->isMemberFunctionPointerType()) {
1263 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1264 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1265 calleeType = ty;
1266 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1267 if (auto methodDecl = CMCE->getMethodDecl()) {
1268 // getMethodDecl() doesn't handle member pointers at the moment.
1269 calleeType = methodDecl->getType()->castAs<FunctionType>();
1270 } else {
1271 return;
1272 }
1273 } else {
1274 return;
1275 }
1276 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1278 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1279 CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1280 Builder.CreateRetVoid();
1281 Builder.ClearInsertionPoint();
1282 }
1283}
1284
1285/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1286/// if the function returns void, or may be missing one if the function returns
1287/// non-void. Fun stuff :).
1289 if (requiresReturnValueCheck()) {
1290 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1291 auto *SLocPtr =
1292 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1293 llvm::GlobalVariable::PrivateLinkage, SLoc);
1294 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1296 assert(ReturnLocation.isValid() && "No valid return location");
1297 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1298 ReturnLocation);
1299 }
1300
1301 // Returning from an outlined SEH helper is UB, and we already warn on it.
1302 if (IsOutlinedSEHHelper) {
1303 Builder.CreateUnreachable();
1304 Builder.ClearInsertionPoint();
1305 }
1306
1307 // Emit the result value, even if unused, to evaluate the side effects.
1308 const Expr *RV = S.getRetValue();
1309
1310 // Record the result expression of the return statement. The recorded
1311 // expression is used to determine whether a block capture's lifetime should
1312 // end at the end of the full expression as opposed to the end of the scope
1313 // enclosing the block expression.
1314 //
1315 // This permits a small, easily-implemented exception to our over-conservative
1316 // rules about not jumping to statements following block literals with
1317 // non-trivial cleanups.
1318 SaveRetExprRAII SaveRetExpr(RV, *this);
1319
1320 RunCleanupsScope cleanupScope(*this);
1321 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1322 RV = EWC->getSubExpr();
1323 // FIXME: Clean this up by using an LValue for ReturnTemp,
1324 // EmitStoreThroughLValue, and EmitAnyExpr.
1325 // Check if the NRVO candidate was not globalized in OpenMP mode.
1326 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1327 S.getNRVOCandidate()->isNRVOVariable() &&
1328 (!getLangOpts().OpenMP ||
1330 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1331 .isValid())) {
1332 // Apply the named return value optimization for this return statement,
1333 // which means doing nothing: the appropriate result has already been
1334 // constructed into the NRVO variable.
1335
1336 // If there is an NRVO flag for this variable, set it to 1 into indicate
1337 // that the cleanup code should not destroy the variable.
1338 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1339 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1340 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1341 // Make sure not to return anything, but evaluate the expression
1342 // for side effects.
1343 if (RV) {
1344 EmitAnyExpr(RV);
1345 if (auto *CE = dyn_cast<CallExpr>(RV))
1347 }
1348 } else if (!RV) {
1349 // Do nothing (return value is left uninitialized)
1350 } else if (FnRetTy->isReferenceType()) {
1351 // If this function returns a reference, take the address of the expression
1352 // rather than the value.
1354 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1355 } else {
1356 switch (getEvaluationKind(RV->getType())) {
1357 case TEK_Scalar:
1359 break;
1360 case TEK_Complex:
1362 /*isInit*/ true);
1363 break;
1364 case TEK_Aggregate:
1371 break;
1372 }
1373 }
1374
1375 ++NumReturnExprs;
1376 if (!RV || RV->isEvaluatable(getContext()))
1377 ++NumSimpleReturnExprs;
1378
1379 cleanupScope.ForceCleanup();
1381}
1382
1384 // As long as debug info is modeled with instructions, we have to ensure we
1385 // have a place to insert here and write the stop point here.
1386 if (HaveInsertPoint())
1387 EmitStopPoint(&S);
1388
1389 for (const auto *I : S.decls())
1390 EmitDecl(*I);
1391}
1392
1394 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1395
1396 // If this code is reachable then emit a stop point (if generating
1397 // debug info). We have to do this ourselves because we are on the
1398 // "simple" statement path.
1399 if (HaveInsertPoint())
1400 EmitStopPoint(&S);
1401
1402 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1403}
1404
1406 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1407
1408 // If this code is reachable then emit a stop point (if generating
1409 // debug info). We have to do this ourselves because we are on the
1410 // "simple" statement path.
1411 if (HaveInsertPoint())
1412 EmitStopPoint(&S);
1413
1414 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1415}
1416
1417/// EmitCaseStmtRange - If case statement range is not too big then
1418/// add multiple cases to switch instruction, one for each value within
1419/// the range. If range is too big then emit "if" condition check.
1421 ArrayRef<const Attr *> Attrs) {
1422 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1423
1424 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1425 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1426
1427 // Emit the code for this case. We do this first to make sure it is
1428 // properly chained from our predecessor before generating the
1429 // switch machinery to enter this block.
1430 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1431 EmitBlockWithFallThrough(CaseDest, &S);
1432 EmitStmt(S.getSubStmt());
1433
1434 // If range is empty, do nothing.
1435 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1436 return;
1437
1439 llvm::APInt Range = RHS - LHS;
1440 // FIXME: parameters such as this should not be hardcoded.
1441 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1442 // Range is small enough to add multiple switch instruction cases.
1443 uint64_t Total = getProfileCount(&S);
1444 unsigned NCases = Range.getZExtValue() + 1;
1445 // We only have one region counter for the entire set of cases here, so we
1446 // need to divide the weights evenly between the generated cases, ensuring
1447 // that the total weight is preserved. E.g., a weight of 5 over three cases
1448 // will be distributed as weights of 2, 2, and 1.
1449 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1450 for (unsigned I = 0; I != NCases; ++I) {
1451 if (SwitchWeights)
1452 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1453 else if (SwitchLikelihood)
1454 SwitchLikelihood->push_back(LH);
1455
1456 if (Rem)
1457 Rem--;
1458 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1459 ++LHS;
1460 }
1461 return;
1462 }
1463
1464 // The range is too big. Emit "if" condition into a new block,
1465 // making sure to save and restore the current insertion point.
1466 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1467
1468 // Push this test onto the chain of range checks (which terminates
1469 // in the default basic block). The switch's default will be changed
1470 // to the top of this chain after switch emission is complete.
1471 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1472 CaseRangeBlock = createBasicBlock("sw.caserange");
1473
1474 CurFn->insert(CurFn->end(), CaseRangeBlock);
1475 Builder.SetInsertPoint(CaseRangeBlock);
1476
1477 // Emit range check.
1478 llvm::Value *Diff =
1479 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1480 llvm::Value *Cond =
1481 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1482
1483 llvm::MDNode *Weights = nullptr;
1484 if (SwitchWeights) {
1485 uint64_t ThisCount = getProfileCount(&S);
1486 uint64_t DefaultCount = (*SwitchWeights)[0];
1487 Weights = createProfileWeights(ThisCount, DefaultCount);
1488
1489 // Since we're chaining the switch default through each large case range, we
1490 // need to update the weight for the default, ie, the first case, to include
1491 // this case.
1492 (*SwitchWeights)[0] += ThisCount;
1493 } else if (SwitchLikelihood)
1494 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1495
1496 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1497
1498 // Restore the appropriate insertion point.
1499 if (RestoreBB)
1500 Builder.SetInsertPoint(RestoreBB);
1501 else
1502 Builder.ClearInsertionPoint();
1503}
1504
1506 ArrayRef<const Attr *> Attrs) {
1507 // If there is no enclosing switch instance that we're aware of, then this
1508 // case statement and its block can be elided. This situation only happens
1509 // when we've constant-folded the switch, are emitting the constant case,
1510 // and part of the constant case includes another case statement. For
1511 // instance: switch (4) { case 4: do { case 5: } while (1); }
1512 if (!SwitchInsn) {
1513 EmitStmt(S.getSubStmt());
1514 return;
1515 }
1516
1517 // Handle case ranges.
1518 if (S.getRHS()) {
1519 EmitCaseStmtRange(S, Attrs);
1520 return;
1521 }
1522
1523 llvm::ConstantInt *CaseVal =
1524 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1525
1526 // Emit debuginfo for the case value if it is an enum value.
1527 const ConstantExpr *CE;
1528 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1529 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1530 else
1531 CE = dyn_cast<ConstantExpr>(S.getLHS());
1532 if (CE) {
1533 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1534 if (CGDebugInfo *Dbg = getDebugInfo())
1536 Dbg->EmitGlobalVariable(DE->getDecl(),
1537 APValue(llvm::APSInt(CaseVal->getValue())));
1538 }
1539
1540 if (SwitchLikelihood)
1541 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1542
1543 // If the body of the case is just a 'break', try to not emit an empty block.
1544 // If we're profiling or we're not optimizing, leave the block in for better
1545 // debug and coverage analysis.
1547 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1548 isa<BreakStmt>(S.getSubStmt())) {
1549 JumpDest Block = BreakContinueStack.back().BreakBlock;
1550
1551 // Only do this optimization if there are no cleanups that need emitting.
1553 if (SwitchWeights)
1554 SwitchWeights->push_back(getProfileCount(&S));
1555 SwitchInsn->addCase(CaseVal, Block.getBlock());
1556
1557 // If there was a fallthrough into this case, make sure to redirect it to
1558 // the end of the switch as well.
1559 if (Builder.GetInsertBlock()) {
1560 Builder.CreateBr(Block.getBlock());
1561 Builder.ClearInsertionPoint();
1562 }
1563 return;
1564 }
1565 }
1566
1567 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1568 EmitBlockWithFallThrough(CaseDest, &S);
1569 if (SwitchWeights)
1570 SwitchWeights->push_back(getProfileCount(&S));
1571 SwitchInsn->addCase(CaseVal, CaseDest);
1572
1573 // Recursively emitting the statement is acceptable, but is not wonderful for
1574 // code where we have many case statements nested together, i.e.:
1575 // case 1:
1576 // case 2:
1577 // case 3: etc.
1578 // Handling this recursively will create a new block for each case statement
1579 // that falls through to the next case which is IR intensive. It also causes
1580 // deep recursion which can run into stack depth limitations. Handle
1581 // sequential non-range case statements specially.
1582 //
1583 // TODO When the next case has a likelihood attribute the code returns to the
1584 // recursive algorithm. Maybe improve this case if it becomes common practice
1585 // to use a lot of attributes.
1586 const CaseStmt *CurCase = &S;
1587 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1588
1589 // Otherwise, iteratively add consecutive cases to this switch stmt.
1590 while (NextCase && NextCase->getRHS() == nullptr) {
1591 CurCase = NextCase;
1592 llvm::ConstantInt *CaseVal =
1593 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1594
1595 if (SwitchWeights)
1596 SwitchWeights->push_back(getProfileCount(NextCase));
1598 CaseDest = createBasicBlock("sw.bb");
1599 EmitBlockWithFallThrough(CaseDest, CurCase);
1600 }
1601 // Since this loop is only executed when the CaseStmt has no attributes
1602 // use a hard-coded value.
1603 if (SwitchLikelihood)
1604 SwitchLikelihood->push_back(Stmt::LH_None);
1605
1606 SwitchInsn->addCase(CaseVal, CaseDest);
1607 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1608 }
1609
1610 // Generate a stop point for debug info if the case statement is
1611 // followed by a default statement. A fallthrough case before a
1612 // default case gets its own branch target.
1613 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1614 EmitStopPoint(CurCase);
1615
1616 // Normal default recursion for non-cases.
1617 EmitStmt(CurCase->getSubStmt());
1618}
1619
1621 ArrayRef<const Attr *> Attrs) {
1622 // If there is no enclosing switch instance that we're aware of, then this
1623 // default statement can be elided. This situation only happens when we've
1624 // constant-folded the switch.
1625 if (!SwitchInsn) {
1626 EmitStmt(S.getSubStmt());
1627 return;
1628 }
1629
1630 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1631 assert(DefaultBlock->empty() &&
1632 "EmitDefaultStmt: Default block already defined?");
1633
1634 if (SwitchLikelihood)
1635 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1636
1637 EmitBlockWithFallThrough(DefaultBlock, &S);
1638
1639 EmitStmt(S.getSubStmt());
1640}
1641
1642/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1643/// constant value that is being switched on, see if we can dead code eliminate
1644/// the body of the switch to a simple series of statements to emit. Basically,
1645/// on a switch (5) we want to find these statements:
1646/// case 5:
1647/// printf(...); <--
1648/// ++i; <--
1649/// break;
1650///
1651/// and add them to the ResultStmts vector. If it is unsafe to do this
1652/// transformation (for example, one of the elided statements contains a label
1653/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1654/// should include statements after it (e.g. the printf() line is a substmt of
1655/// the case) then return CSFC_FallThrough. If we handled it and found a break
1656/// statement, then return CSFC_Success.
1657///
1658/// If Case is non-null, then we are looking for the specified case, checking
1659/// that nothing we jump over contains labels. If Case is null, then we found
1660/// the case and are looking for the break.
1661///
1662/// If the recursive walk actually finds our Case, then we set FoundCase to
1663/// true.
1664///
1667 const SwitchCase *Case,
1668 bool &FoundCase,
1669 SmallVectorImpl<const Stmt*> &ResultStmts) {
1670 // If this is a null statement, just succeed.
1671 if (!S)
1672 return Case ? CSFC_Success : CSFC_FallThrough;
1673
1674 // If this is the switchcase (case 4: or default) that we're looking for, then
1675 // we're in business. Just add the substatement.
1676 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1677 if (S == Case) {
1678 FoundCase = true;
1679 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1680 ResultStmts);
1681 }
1682
1683 // Otherwise, this is some other case or default statement, just ignore it.
1684 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1685 ResultStmts);
1686 }
1687
1688 // If we are in the live part of the code and we found our break statement,
1689 // return a success!
1690 if (!Case && isa<BreakStmt>(S))
1691 return CSFC_Success;
1692
1693 // If this is a switch statement, then it might contain the SwitchCase, the
1694 // break, or neither.
1695 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1696 // Handle this as two cases: we might be looking for the SwitchCase (if so
1697 // the skipped statements must be skippable) or we might already have it.
1698 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1699 bool StartedInLiveCode = FoundCase;
1700 unsigned StartSize = ResultStmts.size();
1701
1702 // If we've not found the case yet, scan through looking for it.
1703 if (Case) {
1704 // Keep track of whether we see a skipped declaration. The code could be
1705 // using the declaration even if it is skipped, so we can't optimize out
1706 // the decl if the kept statements might refer to it.
1707 bool HadSkippedDecl = false;
1708
1709 // If we're looking for the case, just see if we can skip each of the
1710 // substatements.
1711 for (; Case && I != E; ++I) {
1712 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1713
1714 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1715 case CSFC_Failure: return CSFC_Failure;
1716 case CSFC_Success:
1717 // A successful result means that either 1) that the statement doesn't
1718 // have the case and is skippable, or 2) does contain the case value
1719 // and also contains the break to exit the switch. In the later case,
1720 // we just verify the rest of the statements are elidable.
1721 if (FoundCase) {
1722 // If we found the case and skipped declarations, we can't do the
1723 // optimization.
1724 if (HadSkippedDecl)
1725 return CSFC_Failure;
1726
1727 for (++I; I != E; ++I)
1728 if (CodeGenFunction::ContainsLabel(*I, true))
1729 return CSFC_Failure;
1730 return CSFC_Success;
1731 }
1732 break;
1733 case CSFC_FallThrough:
1734 // If we have a fallthrough condition, then we must have found the
1735 // case started to include statements. Consider the rest of the
1736 // statements in the compound statement as candidates for inclusion.
1737 assert(FoundCase && "Didn't find case but returned fallthrough?");
1738 // We recursively found Case, so we're not looking for it anymore.
1739 Case = nullptr;
1740
1741 // If we found the case and skipped declarations, we can't do the
1742 // optimization.
1743 if (HadSkippedDecl)
1744 return CSFC_Failure;
1745 break;
1746 }
1747 }
1748
1749 if (!FoundCase)
1750 return CSFC_Success;
1751
1752 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1753 }
1754
1755 // If we have statements in our range, then we know that the statements are
1756 // live and need to be added to the set of statements we're tracking.
1757 bool AnyDecls = false;
1758 for (; I != E; ++I) {
1760
1761 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1762 case CSFC_Failure: return CSFC_Failure;
1763 case CSFC_FallThrough:
1764 // A fallthrough result means that the statement was simple and just
1765 // included in ResultStmt, keep adding them afterwards.
1766 break;
1767 case CSFC_Success:
1768 // A successful result means that we found the break statement and
1769 // stopped statement inclusion. We just ensure that any leftover stmts
1770 // are skippable and return success ourselves.
1771 for (++I; I != E; ++I)
1772 if (CodeGenFunction::ContainsLabel(*I, true))
1773 return CSFC_Failure;
1774 return CSFC_Success;
1775 }
1776 }
1777
1778 // If we're about to fall out of a scope without hitting a 'break;', we
1779 // can't perform the optimization if there were any decls in that scope
1780 // (we'd lose their end-of-lifetime).
1781 if (AnyDecls) {
1782 // If the entire compound statement was live, there's one more thing we
1783 // can try before giving up: emit the whole thing as a single statement.
1784 // We can do that unless the statement contains a 'break;'.
1785 // FIXME: Such a break must be at the end of a construct within this one.
1786 // We could emit this by just ignoring the BreakStmts entirely.
1787 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1788 ResultStmts.resize(StartSize);
1789 ResultStmts.push_back(S);
1790 } else {
1791 return CSFC_Failure;
1792 }
1793 }
1794
1795 return CSFC_FallThrough;
1796 }
1797
1798 // Okay, this is some other statement that we don't handle explicitly, like a
1799 // for statement or increment etc. If we are skipping over this statement,
1800 // just verify it doesn't have labels, which would make it invalid to elide.
1801 if (Case) {
1802 if (CodeGenFunction::ContainsLabel(S, true))
1803 return CSFC_Failure;
1804 return CSFC_Success;
1805 }
1806
1807 // Otherwise, we want to include this statement. Everything is cool with that
1808 // so long as it doesn't contain a break out of the switch we're in.
1810
1811 // Otherwise, everything is great. Include the statement and tell the caller
1812 // that we fall through and include the next statement as well.
1813 ResultStmts.push_back(S);
1814 return CSFC_FallThrough;
1815}
1816
1817/// FindCaseStatementsForValue - Find the case statement being jumped to and
1818/// then invoke CollectStatementsForCase to find the list of statements to emit
1819/// for a switch on constant. See the comment above CollectStatementsForCase
1820/// for more details.
1822 const llvm::APSInt &ConstantCondValue,
1823 SmallVectorImpl<const Stmt*> &ResultStmts,
1824 ASTContext &C,
1825 const SwitchCase *&ResultCase) {
1826 // First step, find the switch case that is being branched to. We can do this
1827 // efficiently by scanning the SwitchCase list.
1828 const SwitchCase *Case = S.getSwitchCaseList();
1829 const DefaultStmt *DefaultCase = nullptr;
1830
1831 for (; Case; Case = Case->getNextSwitchCase()) {
1832 // It's either a default or case. Just remember the default statement in
1833 // case we're not jumping to any numbered cases.
1834 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1835 DefaultCase = DS;
1836 continue;
1837 }
1838
1839 // Check to see if this case is the one we're looking for.
1840 const CaseStmt *CS = cast<CaseStmt>(Case);
1841 // Don't handle case ranges yet.
1842 if (CS->getRHS()) return false;
1843
1844 // If we found our case, remember it as 'case'.
1845 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1846 break;
1847 }
1848
1849 // If we didn't find a matching case, we use a default if it exists, or we
1850 // elide the whole switch body!
1851 if (!Case) {
1852 // It is safe to elide the body of the switch if it doesn't contain labels
1853 // etc. If it is safe, return successfully with an empty ResultStmts list.
1854 if (!DefaultCase)
1856 Case = DefaultCase;
1857 }
1858
1859 // Ok, we know which case is being jumped to, try to collect all the
1860 // statements that follow it. This can fail for a variety of reasons. Also,
1861 // check to see that the recursive walk actually found our case statement.
1862 // Insane cases like this can fail to find it in the recursive walk since we
1863 // don't handle every stmt kind:
1864 // switch (4) {
1865 // while (1) {
1866 // case 4: ...
1867 bool FoundCase = false;
1868 ResultCase = Case;
1869 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1870 ResultStmts) != CSFC_Failure &&
1871 FoundCase;
1872}
1873
1874static std::optional<SmallVector<uint64_t, 16>>
1876 // Are there enough branches to weight them?
1877 if (Likelihoods.size() <= 1)
1878 return std::nullopt;
1879
1880 uint64_t NumUnlikely = 0;
1881 uint64_t NumNone = 0;
1882 uint64_t NumLikely = 0;
1883 for (const auto LH : Likelihoods) {
1884 switch (LH) {
1885 case Stmt::LH_Unlikely:
1886 ++NumUnlikely;
1887 break;
1888 case Stmt::LH_None:
1889 ++NumNone;
1890 break;
1891 case Stmt::LH_Likely:
1892 ++NumLikely;
1893 break;
1894 }
1895 }
1896
1897 // Is there a likelihood attribute used?
1898 if (NumUnlikely == 0 && NumLikely == 0)
1899 return std::nullopt;
1900
1901 // When multiple cases share the same code they can be combined during
1902 // optimization. In that case the weights of the branch will be the sum of
1903 // the individual weights. Make sure the combined sum of all neutral cases
1904 // doesn't exceed the value of a single likely attribute.
1905 // The additions both avoid divisions by 0 and make sure the weights of None
1906 // don't exceed the weight of Likely.
1907 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1908 const uint64_t None = Likely / (NumNone + 1);
1909 const uint64_t Unlikely = 0;
1910
1912 Result.reserve(Likelihoods.size());
1913 for (const auto LH : Likelihoods) {
1914 switch (LH) {
1915 case Stmt::LH_Unlikely:
1916 Result.push_back(Unlikely);
1917 break;
1918 case Stmt::LH_None:
1919 Result.push_back(None);
1920 break;
1921 case Stmt::LH_Likely:
1922 Result.push_back(Likely);
1923 break;
1924 }
1925 }
1926
1927 return Result;
1928}
1929
1931 // Handle nested switch statements.
1932 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1933 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1934 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1935 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1936
1937 // See if we can constant fold the condition of the switch and therefore only
1938 // emit the live case statement (if any) of the switch.
1939 llvm::APSInt ConstantCondValue;
1940 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1942 const SwitchCase *Case = nullptr;
1943 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1944 getContext(), Case)) {
1945 if (Case)
1947 RunCleanupsScope ExecutedScope(*this);
1948
1949 if (S.getInit())
1950 EmitStmt(S.getInit());
1951
1952 // Emit the condition variable if needed inside the entire cleanup scope
1953 // used by this special case for constant folded switches.
1954 if (S.getConditionVariable())
1955 EmitDecl(*S.getConditionVariable());
1956
1957 // At this point, we are no longer "within" a switch instance, so
1958 // we can temporarily enforce this to ensure that any embedded case
1959 // statements are not emitted.
1960 SwitchInsn = nullptr;
1961
1962 // Okay, we can dead code eliminate everything except this case. Emit the
1963 // specified series of statements and we're good.
1964 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1965 EmitStmt(CaseStmts[i]);
1967
1968 // Now we want to restore the saved switch instance so that nested
1969 // switches continue to function properly
1970 SwitchInsn = SavedSwitchInsn;
1971
1972 return;
1973 }
1974 }
1975
1976 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1977
1978 RunCleanupsScope ConditionScope(*this);
1979
1980 if (S.getInit())
1981 EmitStmt(S.getInit());
1982
1983 if (S.getConditionVariable())
1984 EmitDecl(*S.getConditionVariable());
1985 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1986
1987 // Create basic block to hold stuff that comes after switch
1988 // statement. We also need to create a default block now so that
1989 // explicit case ranges tests can have a place to jump to on
1990 // failure.
1991 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1992 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1993 if (PGO.haveRegionCounts()) {
1994 // Walk the SwitchCase list to find how many there are.
1995 uint64_t DefaultCount = 0;
1996 unsigned NumCases = 0;
1997 for (const SwitchCase *Case = S.getSwitchCaseList();
1998 Case;
1999 Case = Case->getNextSwitchCase()) {
2000 if (isa<DefaultStmt>(Case))
2001 DefaultCount = getProfileCount(Case);
2002 NumCases += 1;
2003 }
2004 SwitchWeights = new SmallVector<uint64_t, 16>();
2005 SwitchWeights->reserve(NumCases);
2006 // The default needs to be first. We store the edge count, so we already
2007 // know the right weight.
2008 SwitchWeights->push_back(DefaultCount);
2009 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2010 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2011 // Initialize the default case.
2012 SwitchLikelihood->push_back(Stmt::LH_None);
2013 }
2014
2015 CaseRangeBlock = DefaultBlock;
2016
2017 // Clear the insertion point to indicate we are in unreachable code.
2018 Builder.ClearInsertionPoint();
2019
2020 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2021 // then reuse last ContinueBlock.
2022 JumpDest OuterContinue;
2023 if (!BreakContinueStack.empty())
2024 OuterContinue = BreakContinueStack.back().ContinueBlock;
2025
2026 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2027
2028 // Emit switch body.
2029 EmitStmt(S.getBody());
2030
2031 BreakContinueStack.pop_back();
2032
2033 // Update the default block in case explicit case range tests have
2034 // been chained on top.
2035 SwitchInsn->setDefaultDest(CaseRangeBlock);
2036
2037 // If a default was never emitted:
2038 if (!DefaultBlock->getParent()) {
2039 // If we have cleanups, emit the default block so that there's a
2040 // place to jump through the cleanups from.
2041 if (ConditionScope.requiresCleanups()) {
2042 EmitBlock(DefaultBlock);
2043
2044 // Otherwise, just forward the default block to the switch end.
2045 } else {
2046 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2047 delete DefaultBlock;
2048 }
2049 }
2050
2051 ConditionScope.ForceCleanup();
2052
2053 // Emit continuation.
2054 EmitBlock(SwitchExit.getBlock(), true);
2056
2057 // If the switch has a condition wrapped by __builtin_unpredictable,
2058 // create metadata that specifies that the switch is unpredictable.
2059 // Don't bother if not optimizing because that metadata would not be used.
2060 auto *Call = dyn_cast<CallExpr>(S.getCond());
2061 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2062 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2063 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2064 llvm::MDBuilder MDHelper(getLLVMContext());
2065 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2066 MDHelper.createUnpredictable());
2067 }
2068 }
2069
2070 if (SwitchWeights) {
2071 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2072 "switch weights do not match switch cases");
2073 // If there's only one jump destination there's no sense weighting it.
2074 if (SwitchWeights->size() > 1)
2075 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2076 createProfileWeights(*SwitchWeights));
2077 delete SwitchWeights;
2078 } else if (SwitchLikelihood) {
2079 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2080 "switch likelihoods do not match switch cases");
2081 std::optional<SmallVector<uint64_t, 16>> LHW =
2082 getLikelihoodWeights(*SwitchLikelihood);
2083 if (LHW) {
2084 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2085 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2086 createProfileWeights(*LHW));
2087 }
2088 delete SwitchLikelihood;
2089 }
2090 SwitchInsn = SavedSwitchInsn;
2091 SwitchWeights = SavedSwitchWeights;
2092 SwitchLikelihood = SavedSwitchLikelihood;
2093 CaseRangeBlock = SavedCRBlock;
2094}
2095
2096static std::string
2097SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2099 std::string Result;
2100
2101 while (*Constraint) {
2102 switch (*Constraint) {
2103 default:
2104 Result += Target.convertConstraint(Constraint);
2105 break;
2106 // Ignore these
2107 case '*':
2108 case '?':
2109 case '!':
2110 case '=': // Will see this and the following in mult-alt constraints.
2111 case '+':
2112 break;
2113 case '#': // Ignore the rest of the constraint alternative.
2114 while (Constraint[1] && Constraint[1] != ',')
2115 Constraint++;
2116 break;
2117 case '&':
2118 case '%':
2119 Result += *Constraint;
2120 while (Constraint[1] && Constraint[1] == *Constraint)
2121 Constraint++;
2122 break;
2123 case ',':
2124 Result += "|";
2125 break;
2126 case 'g':
2127 Result += "imr";
2128 break;
2129 case '[': {
2130 assert(OutCons &&
2131 "Must pass output names to constraints with a symbolic name");
2132 unsigned Index;
2133 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2134 assert(result && "Could not resolve symbolic name"); (void)result;
2135 Result += llvm::utostr(Index);
2136 break;
2137 }
2138 }
2139
2140 Constraint++;
2141 }
2142
2143 return Result;
2144}
2145
2146/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2147/// as using a particular register add that as a constraint that will be used
2148/// in this asm stmt.
2149static std::string
2150AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2152 const AsmStmt &Stmt, const bool EarlyClobber,
2153 std::string *GCCReg = nullptr) {
2154 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2155 if (!AsmDeclRef)
2156 return Constraint;
2157 const ValueDecl &Value = *AsmDeclRef->getDecl();
2158 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2159 if (!Variable)
2160 return Constraint;
2161 if (Variable->getStorageClass() != SC_Register)
2162 return Constraint;
2163 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2164 if (!Attr)
2165 return Constraint;
2166 StringRef Register = Attr->getLabel();
2167 assert(Target.isValidGCCRegisterName(Register));
2168 // We're using validateOutputConstraint here because we only care if
2169 // this is a register constraint.
2170 TargetInfo::ConstraintInfo Info(Constraint, "");
2171 if (Target.validateOutputConstraint(Info) &&
2172 !Info.allowsRegister()) {
2173 CGM.ErrorUnsupported(&Stmt, "__asm__");
2174 return Constraint;
2175 }
2176 // Canonicalize the register here before returning it.
2177 Register = Target.getNormalizedGCCRegisterName(Register);
2178 if (GCCReg != nullptr)
2179 *GCCReg = Register.str();
2180 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2181}
2182
2183std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2184 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2185 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2186 if (Info.allowsRegister() || !Info.allowsMemory()) {
2188 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2189
2190 llvm::Type *Ty = ConvertType(InputType);
2191 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2192 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2193 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2194 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2195
2197 InputValue.getAddress(*this), Ty)),
2198 nullptr};
2199 }
2200 }
2201
2202 Address Addr = InputValue.getAddress(*this);
2203 ConstraintStr += '*';
2204 return {Addr.getPointer(), Addr.getElementType()};
2205}
2206
2207std::pair<llvm::Value *, llvm::Type *>
2208CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2209 const Expr *InputExpr,
2210 std::string &ConstraintStr) {
2211 // If this can't be a register or memory, i.e., has to be a constant
2212 // (immediate or symbolic), try to emit it as such.
2213 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2214 if (Info.requiresImmediateConstant()) {
2215 Expr::EvalResult EVResult;
2216 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2217
2218 llvm::APSInt IntResult;
2219 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2220 getContext()))
2221 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2222 }
2223
2225 if (InputExpr->EvaluateAsInt(Result, getContext()))
2226 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2227 nullptr};
2228 }
2229
2230 if (Info.allowsRegister() || !Info.allowsMemory())
2232 return {EmitScalarExpr(InputExpr), nullptr};
2233 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2234 return {EmitScalarExpr(InputExpr), nullptr};
2235 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2236 LValue Dest = EmitLValue(InputExpr);
2237 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2238 InputExpr->getExprLoc());
2239}
2240
2241/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2242/// asm call instruction. The !srcloc MDNode contains a list of constant
2243/// integers which are the source locations of the start of each line in the
2244/// asm.
2245static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2246 CodeGenFunction &CGF) {
2248 // Add the location of the first line to the MDNode.
2249 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2250 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2251 StringRef StrVal = Str->getString();
2252 if (!StrVal.empty()) {
2254 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2255 unsigned StartToken = 0;
2256 unsigned ByteOffset = 0;
2257
2258 // Add the location of the start of each subsequent line of the asm to the
2259 // MDNode.
2260 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2261 if (StrVal[i] != '\n') continue;
2262 SourceLocation LineLoc = Str->getLocationOfByte(
2263 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2264 Locs.push_back(llvm::ConstantAsMetadata::get(
2265 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2266 }
2267 }
2268
2269 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2270}
2271
2272static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2273 bool HasUnwindClobber, bool ReadOnly,
2274 bool ReadNone, bool NoMerge, const AsmStmt &S,
2275 const std::vector<llvm::Type *> &ResultRegTypes,
2276 const std::vector<llvm::Type *> &ArgElemTypes,
2277 CodeGenFunction &CGF,
2278 std::vector<llvm::Value *> &RegResults) {
2279 if (!HasUnwindClobber)
2280 Result.addFnAttr(llvm::Attribute::NoUnwind);
2281
2282 if (NoMerge)
2283 Result.addFnAttr(llvm::Attribute::NoMerge);
2284 // Attach readnone and readonly attributes.
2285 if (!HasSideEffect) {
2286 if (ReadNone)
2287 Result.setDoesNotAccessMemory();
2288 else if (ReadOnly)
2289 Result.setOnlyReadsMemory();
2290 }
2291
2292 // Add elementtype attribute for indirect constraints.
2293 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2294 if (Pair.value()) {
2295 auto Attr = llvm::Attribute::get(
2296 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2297 Result.addParamAttr(Pair.index(), Attr);
2298 }
2299 }
2300
2301 // Slap the source location of the inline asm into a !srcloc metadata on the
2302 // call.
2303 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2304 Result.setMetadata("srcloc",
2305 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2306 else {
2307 // At least put the line number on MS inline asm blobs.
2308 llvm::Constant *Loc =
2309 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2310 Result.setMetadata("srcloc",
2311 llvm::MDNode::get(CGF.getLLVMContext(),
2312 llvm::ConstantAsMetadata::get(Loc)));
2313 }
2314
2316 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2317 // convergent (meaning, they may call an intrinsically convergent op, such
2318 // as bar.sync, and so can't have certain optimizations applied around
2319 // them).
2320 Result.addFnAttr(llvm::Attribute::Convergent);
2321 // Extract all of the register value results from the asm.
2322 if (ResultRegTypes.size() == 1) {
2323 RegResults.push_back(&Result);
2324 } else {
2325 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2326 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2327 RegResults.push_back(Tmp);
2328 }
2329 }
2330}
2331
2332static void
2334 const llvm::ArrayRef<llvm::Value *> RegResults,
2335 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2336 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2337 const llvm::ArrayRef<LValue> ResultRegDests,
2338 const llvm::ArrayRef<QualType> ResultRegQualTys,
2339 const llvm::BitVector &ResultTypeRequiresCast,
2340 const llvm::BitVector &ResultRegIsFlagReg) {
2342 CodeGenModule &CGM = CGF.CGM;
2343 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2344
2345 assert(RegResults.size() == ResultRegTypes.size());
2346 assert(RegResults.size() == ResultTruncRegTypes.size());
2347 assert(RegResults.size() == ResultRegDests.size());
2348 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2349 // in which case its size may grow.
2350 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2351 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2352
2353 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2354 llvm::Value *Tmp = RegResults[i];
2355 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2356
2357 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2358 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2359 // value.
2360 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2361 llvm::Value *IsBooleanValue =
2362 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2363 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2364 Builder.CreateCall(FnAssume, IsBooleanValue);
2365 }
2366
2367 // If the result type of the LLVM IR asm doesn't match the result type of
2368 // the expression, do the conversion.
2369 if (ResultRegTypes[i] != TruncTy) {
2370
2371 // Truncate the integer result to the right size, note that TruncTy can be
2372 // a pointer.
2373 if (TruncTy->isFloatingPointTy())
2374 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2375 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2376 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2377 Tmp = Builder.CreateTrunc(
2378 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2379 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2380 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2381 uint64_t TmpSize =
2382 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2383 Tmp = Builder.CreatePtrToInt(
2384 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2385 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2386 } else if (TruncTy->isIntegerTy()) {
2387 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2388 } else if (TruncTy->isVectorTy()) {
2389 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2390 }
2391 }
2392
2393 LValue Dest = ResultRegDests[i];
2394 // ResultTypeRequiresCast elements correspond to the first
2395 // ResultTypeRequiresCast.size() elements of RegResults.
2396 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2397 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2398 Address A =
2399 Builder.CreateElementBitCast(Dest.getAddress(CGF), ResultRegTypes[i]);
2400 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2401 Builder.CreateStore(Tmp, A);
2402 continue;
2403 }
2404
2405 QualType Ty =
2406 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2407 if (Ty.isNull()) {
2408 const Expr *OutExpr = S.getOutputExpr(i);
2409 CGM.getDiags().Report(OutExpr->getExprLoc(),
2410 diag::err_store_value_to_reg);
2411 return;
2412 }
2413 Dest = CGF.MakeAddrLValue(A, Ty);
2414 }
2415 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2416 }
2417}
2418
2420 // Pop all cleanup blocks at the end of the asm statement.
2421 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2422
2423 // Assemble the final asm string.
2424 std::string AsmString = S.generateAsmString(getContext());
2425
2426 // Get all the output and input constraints together.
2427 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2428 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2429
2430 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2431 StringRef Name;
2432 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2433 Name = GAS->getOutputName(i);
2434 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2435 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2436 assert(IsValid && "Failed to parse output constraint");
2437 OutputConstraintInfos.push_back(Info);
2438 }
2439
2440 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2441 StringRef Name;
2442 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2443 Name = GAS->getInputName(i);
2444 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2445 bool IsValid =
2446 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2447 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2448 InputConstraintInfos.push_back(Info);
2449 }
2450
2451 std::string Constraints;
2452
2453 std::vector<LValue> ResultRegDests;
2454 std::vector<QualType> ResultRegQualTys;
2455 std::vector<llvm::Type *> ResultRegTypes;
2456 std::vector<llvm::Type *> ResultTruncRegTypes;
2457 std::vector<llvm::Type *> ArgTypes;
2458 std::vector<llvm::Type *> ArgElemTypes;
2459 std::vector<llvm::Value*> Args;
2460 llvm::BitVector ResultTypeRequiresCast;
2461 llvm::BitVector ResultRegIsFlagReg;
2462
2463 // Keep track of inout constraints.
2464 std::string InOutConstraints;
2465 std::vector<llvm::Value*> InOutArgs;
2466 std::vector<llvm::Type*> InOutArgTypes;
2467 std::vector<llvm::Type*> InOutArgElemTypes;
2468
2469 // Keep track of out constraints for tied input operand.
2470 std::vector<std::string> OutputConstraints;
2471
2472 // Keep track of defined physregs.
2473 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2474
2475 // An inline asm can be marked readonly if it meets the following conditions:
2476 // - it doesn't have any sideeffects
2477 // - it doesn't clobber memory
2478 // - it doesn't return a value by-reference
2479 // It can be marked readnone if it doesn't have any input memory constraints
2480 // in addition to meeting the conditions listed above.
2481 bool ReadOnly = true, ReadNone = true;
2482
2483 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2484 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2485
2486 // Simplify the output constraint.
2487 std::string OutputConstraint(S.getOutputConstraint(i));
2488 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2489 getTarget(), &OutputConstraintInfos);
2490
2491 const Expr *OutExpr = S.getOutputExpr(i);
2492 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2493
2494 std::string GCCReg;
2495 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2496 getTarget(), CGM, S,
2497 Info.earlyClobber(),
2498 &GCCReg);
2499 // Give an error on multiple outputs to same physreg.
2500 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2501 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2502
2503 OutputConstraints.push_back(OutputConstraint);
2504 LValue Dest = EmitLValue(OutExpr);
2505 if (!Constraints.empty())
2506 Constraints += ',';
2507
2508 // If this is a register output, then make the inline asm return it
2509 // by-value. If this is a memory result, return the value by-reference.
2510 QualType QTy = OutExpr->getType();
2511 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2513 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2514
2515 Constraints += "=" + OutputConstraint;
2516 ResultRegQualTys.push_back(QTy);
2517 ResultRegDests.push_back(Dest);
2518
2519 bool IsFlagReg = llvm::StringRef(OutputConstraint).startswith("{@cc");
2520 ResultRegIsFlagReg.push_back(IsFlagReg);
2521
2522 llvm::Type *Ty = ConvertTypeForMem(QTy);
2523 const bool RequiresCast = Info.allowsRegister() &&
2525 Ty->isAggregateType());
2526
2527 ResultTruncRegTypes.push_back(Ty);
2528 ResultTypeRequiresCast.push_back(RequiresCast);
2529
2530 if (RequiresCast) {
2531 unsigned Size = getContext().getTypeSize(QTy);
2532 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2533 }
2534 ResultRegTypes.push_back(Ty);
2535 // If this output is tied to an input, and if the input is larger, then
2536 // we need to set the actual result type of the inline asm node to be the
2537 // same as the input type.
2538 if (Info.hasMatchingInput()) {
2539 unsigned InputNo;
2540 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2541 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2542 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2543 break;
2544 }
2545 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2546
2547 QualType InputTy = S.getInputExpr(InputNo)->getType();
2548 QualType OutputType = OutExpr->getType();
2549
2550 uint64_t InputSize = getContext().getTypeSize(InputTy);
2551 if (getContext().getTypeSize(OutputType) < InputSize) {
2552 // Form the asm to return the value as a larger integer or fp type.
2553 ResultRegTypes.back() = ConvertType(InputTy);
2554 }
2555 }
2556 if (llvm::Type* AdjTy =
2557 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2558 ResultRegTypes.back()))
2559 ResultRegTypes.back() = AdjTy;
2560 else {
2561 CGM.getDiags().Report(S.getAsmLoc(),
2562 diag::err_asm_invalid_type_in_input)
2563 << OutExpr->getType() << OutputConstraint;
2564 }
2565
2566 // Update largest vector width for any vector types.
2567 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2568 LargestVectorWidth =
2569 std::max((uint64_t)LargestVectorWidth,
2570 VT->getPrimitiveSizeInBits().getKnownMinValue());
2571 } else {
2572 Address DestAddr = Dest.getAddress(*this);
2573 // Matrix types in memory are represented by arrays, but accessed through
2574 // vector pointers, with the alignment specified on the access operation.
2575 // For inline assembly, update pointer arguments to use vector pointers.
2576 // Otherwise there will be a mis-match if the matrix is also an
2577 // input-argument which is represented as vector.
2578 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2579 DestAddr = Builder.CreateElementBitCast(
2580 DestAddr, ConvertType(OutExpr->getType()));
2581
2582 ArgTypes.push_back(DestAddr.getType());
2583 ArgElemTypes.push_back(DestAddr.getElementType());
2584 Args.push_back(DestAddr.getPointer());
2585 Constraints += "=*";
2586 Constraints += OutputConstraint;
2587 ReadOnly = ReadNone = false;
2588 }
2589
2590 if (Info.isReadWrite()) {
2591 InOutConstraints += ',';
2592
2593 const Expr *InputExpr = S.getOutputExpr(i);
2594 llvm::Value *Arg;
2595 llvm::Type *ArgElemType;
2596 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2597 Info, Dest, InputExpr->getType(), InOutConstraints,
2598 InputExpr->getExprLoc());
2599
2600 if (llvm::Type* AdjTy =
2601 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2602 Arg->getType()))
2603 Arg = Builder.CreateBitCast(Arg, AdjTy);
2604
2605 // Update largest vector width for any vector types.
2606 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2607 LargestVectorWidth =
2608 std::max((uint64_t)LargestVectorWidth,
2609 VT->getPrimitiveSizeInBits().getKnownMinValue());
2610 // Only tie earlyclobber physregs.
2611 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2612 InOutConstraints += llvm::utostr(i);
2613 else
2614 InOutConstraints += OutputConstraint;
2615
2616 InOutArgTypes.push_back(Arg->getType());
2617 InOutArgElemTypes.push_back(ArgElemType);
2618 InOutArgs.push_back(Arg);
2619 }
2620 }
2621
2622 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2623 // to the return value slot. Only do this when returning in registers.
2624 if (isa<MSAsmStmt>(&S)) {
2625 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2626 if (RetAI.isDirect() || RetAI.isExtend()) {
2627 // Make a fake lvalue for the return value slot.
2630 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2631 ResultRegDests, AsmString, S.getNumOutputs());
2632 SawAsmBlock = true;
2633 }
2634 }
2635
2636 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2637 const Expr *InputExpr = S.getInputExpr(i);
2638
2639 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2640
2641 if (Info.allowsMemory())
2642 ReadNone = false;
2643
2644 if (!Constraints.empty())
2645 Constraints += ',';
2646
2647 // Simplify the input constraint.
2648 std::string InputConstraint(S.getInputConstraint(i));
2649 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2650 &OutputConstraintInfos);
2651
2652 InputConstraint = AddVariableConstraints(
2653 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2654 getTarget(), CGM, S, false /* No EarlyClobber */);
2655
2656 std::string ReplaceConstraint (InputConstraint);
2657 llvm::Value *Arg;
2658 llvm::Type *ArgElemType;
2659 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2660
2661 // If this input argument is tied to a larger output result, extend the
2662 // input to be the same size as the output. The LLVM backend wants to see
2663 // the input and output of a matching constraint be the same size. Note
2664 // that GCC does not define what the top bits are here. We use zext because
2665 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2666 if (Info.hasTiedOperand()) {
2667 unsigned Output = Info.getTiedOperand();
2668 QualType OutputType = S.getOutputExpr(Output)->getType();
2669 QualType InputTy = InputExpr->getType();
2670
2671 if (getContext().getTypeSize(OutputType) >
2672 getContext().getTypeSize(InputTy)) {
2673 // Use ptrtoint as appropriate so that we can do our extension.
2674 if (isa<llvm::PointerType>(Arg->getType()))
2675 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2676 llvm::Type *OutputTy = ConvertType(OutputType);
2677 if (isa<llvm::IntegerType>(OutputTy))
2678 Arg = Builder.CreateZExt(Arg, OutputTy);
2679 else if (isa<llvm::PointerType>(OutputTy))
2680 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2681 else if (OutputTy->isFloatingPointTy())
2682 Arg = Builder.CreateFPExt(Arg, OutputTy);
2683 }
2684 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2685 ReplaceConstraint = OutputConstraints[Output];
2686 }
2687 if (llvm::Type* AdjTy =
2688 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2689 Arg->getType()))
2690 Arg = Builder.CreateBitCast(Arg, AdjTy);
2691 else
2692 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2693 << InputExpr->getType() << InputConstraint;
2694
2695 // Update largest vector width for any vector types.
2696 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2697 LargestVectorWidth =
2698 std::max((uint64_t)LargestVectorWidth,
2699 VT->getPrimitiveSizeInBits().getKnownMinValue());
2700
2701 ArgTypes.push_back(Arg->getType());
2702 ArgElemTypes.push_back(ArgElemType);
2703 Args.push_back(Arg);
2704 Constraints += InputConstraint;
2705 }
2706
2707 // Append the "input" part of inout constraints.
2708 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2709 ArgTypes.push_back(InOutArgTypes[i]);
2710 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2711 Args.push_back(InOutArgs[i]);
2712 }
2713 Constraints += InOutConstraints;
2714
2715 // Labels
2717 llvm::BasicBlock *Fallthrough = nullptr;
2718 bool IsGCCAsmGoto = false;
2719 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2720 IsGCCAsmGoto = GS->isAsmGoto();
2721 if (IsGCCAsmGoto) {
2722 for (const auto *E : GS->labels()) {
2723 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2724 Transfer.push_back(Dest.getBlock());
2725 if (!Constraints.empty())
2726 Constraints += ',';
2727 Constraints += "!i";
2728 }
2729 Fallthrough = createBasicBlock("asm.fallthrough");
2730 }
2731 }
2732
2733 bool HasUnwindClobber = false;
2734
2735 // Clobbers
2736 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2737 StringRef Clobber = S.getClobber(i);
2738
2739 if (Clobber == "memory")
2740 ReadOnly = ReadNone = false;
2741 else if (Clobber == "unwind") {
2742 HasUnwindClobber = true;
2743 continue;
2744 } else if (Clobber != "cc") {
2745 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2746 if (CGM.getCodeGenOpts().StackClashProtector &&
2747 getTarget().isSPRegName(Clobber)) {
2748 CGM.getDiags().Report(S.getAsmLoc(),
2749 diag::warn_stack_clash_protection_inline_asm);
2750 }
2751 }
2752
2753 if (isa<MSAsmStmt>(&S)) {
2754 if (Clobber == "eax" || Clobber == "edx") {
2755 if (Constraints.find("=&A") != std::string::npos)
2756 continue;
2757 std::string::size_type position1 =
2758 Constraints.find("={" + Clobber.str() + "}");
2759 if (position1 != std::string::npos) {
2760 Constraints.insert(position1 + 1, "&");
2761 continue;
2762 }
2763 std::string::size_type position2 = Constraints.find("=A");
2764 if (position2 != std::string::npos) {
2765 Constraints.insert(position2 + 1, "&");
2766 continue;
2767 }
2768 }
2769 }
2770 if (!Constraints.empty())
2771 Constraints += ',';
2772
2773 Constraints += "~{";
2774 Constraints += Clobber;
2775 Constraints += '}';
2776 }
2777
2778 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2779 "unwind clobber can't be used with asm goto");
2780
2781 // Add machine specific clobbers
2782 std::string MachineClobbers = getTarget().getClobbers();
2783 if (!MachineClobbers.empty()) {
2784 if (!Constraints.empty())
2785 Constraints += ',';
2786 Constraints += MachineClobbers;
2787 }
2788
2789 llvm::Type *ResultType;
2790 if (ResultRegTypes.empty())
2791 ResultType = VoidTy;
2792 else if (ResultRegTypes.size() == 1)
2793 ResultType = ResultRegTypes[0];
2794 else
2795 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2796
2797 llvm::FunctionType *FTy =
2798 llvm::FunctionType::get(ResultType, ArgTypes, false);
2799
2800 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2801
2802 llvm::InlineAsm::AsmDialect GnuAsmDialect =
2803 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2804 ? llvm::InlineAsm::AD_ATT
2805 : llvm::InlineAsm::AD_Intel;
2806 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2807 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2808
2809 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2810 FTy, AsmString, Constraints, HasSideEffect,
2811 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2812 std::vector<llvm::Value*> RegResults;
2813 llvm::CallBrInst *CBR;
2814 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
2815 CBRRegResults;
2816 if (IsGCCAsmGoto) {
2817 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2818 EmitBlock(Fallthrough);
2819 UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
2820 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2821 *this, RegResults);
2822 // Because we are emitting code top to bottom, we don't have enough
2823 // information at this point to know precisely whether we have a critical
2824 // edge. If we have outputs, split all indirect destinations.
2825 if (!RegResults.empty()) {
2826 unsigned i = 0;
2827 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
2828 llvm::Twine SynthName = Dest->getName() + ".split";
2829 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
2830 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2831 Builder.SetInsertPoint(SynthBB);
2832
2833 if (ResultRegTypes.size() == 1) {
2834 CBRRegResults[SynthBB].push_back(CBR);
2835 } else {
2836 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
2837 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
2838 CBRRegResults[SynthBB].push_back(Tmp);
2839 }
2840 }
2841
2842 EmitBranch(Dest);
2843 EmitBlock(SynthBB);
2844 CBR->setIndirectDest(i++, SynthBB);
2845 }
2846 }
2847 } else if (HasUnwindClobber) {
2848 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2849 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2850 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2851 *this, RegResults);
2852 } else {
2853 llvm::CallInst *Result =
2854 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2855 UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
2856 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2857 *this, RegResults);
2858 }
2859
2860 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
2861 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
2862 ResultRegIsFlagReg);
2863
2864 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
2865 // different insertion point; one for each indirect destination and with
2866 // CBRRegResults rather than RegResults.
2867 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
2868 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
2869 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2870 Builder.SetInsertPoint(Succ, --(Succ->end()));
2871 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
2872 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
2873 ResultTypeRequiresCast, ResultRegIsFlagReg);
2874 }
2875 }
2876}
2877
2879 const RecordDecl *RD = S.getCapturedRecordDecl();
2880 QualType RecordTy = getContext().getRecordType(RD);
2881
2882 // Initialize the captured struct.
2883 LValue SlotLV =
2884 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2885
2886 RecordDecl::field_iterator CurField = RD->field_begin();
2887 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2888 E = S.capture_init_end();
2889 I != E; ++I, ++CurField) {
2890 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2891 if (CurField->hasCapturedVLAType()) {
2892 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2893 } else {
2894 EmitInitializerForField(*CurField, LV, *I);
2895 }
2896 }
2897
2898 return SlotLV;
2899}
2900
2901/// Generate an outlined function for the body of a CapturedStmt, store any
2902/// captured variables into the captured struct, and call the outlined function.
2903llvm::Function *
2905 LValue CapStruct = InitCapturedStruct(S);
2906
2907 // Emit the CapturedDecl
2908 CodeGenFunction CGF(CGM, true);
2909 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2910 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2911 delete CGF.CapturedStmtInfo;
2912
2913 // Emit call to the helper function.
2914 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2915
2916 return F;
2917}
2918
2920 LValue CapStruct = InitCapturedStruct(S);
2921 return CapStruct.getAddress(*this);
2922}
2923
2924/// Creates the outlined function for a CapturedStmt.
2925llvm::Function *
2927 assert(CapturedStmtInfo &&
2928 "CapturedStmtInfo should be set when generating the captured function");
2929 const CapturedDecl *CD = S.getCapturedDecl();
2930 const RecordDecl *RD = S.getCapturedRecordDecl();
2931 SourceLocation Loc = S.getBeginLoc();
2932 assert(CD->hasBody() && "missing CapturedDecl body");
2933
2934 // Build the argument list.
2935 ASTContext &Ctx = CGM.getContext();
2936 FunctionArgList Args;
2937 Args.append(CD->param_begin(), CD->param_end());
2938
2939 // Create the function declaration.
2940 const CGFunctionInfo &FuncInfo =
2942 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2943
2944 llvm::Function *F =
2945 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2947 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2948 if (CD->isNothrow())
2949 F->addFnAttr(llvm::Attribute::NoUnwind);
2950
2951 // Generate the function.
2952 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2953 CD->getBody()->getBeginLoc());
2954 // Set the context parameter in CapturedStmtInfo.
2955 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2957
2958 // Initialize variable-length arrays.
2960 Ctx.getTagDeclType(RD));
2961 for (auto *FD : RD->fields()) {
2962 if (FD->hasCapturedVLAType()) {
2963 auto *ExprArg =
2964 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2965 .getScalarVal();
2966 auto VAT = FD->getCapturedVLAType();
2967 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2968 }
2969 }
2970
2971 // If 'this' is captured, load it into CXXThisValue.
2974 LValue ThisLValue = EmitLValueForField(Base, FD);
2975 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2976 }
2977
2978 PGO.assignRegionCounters(GlobalDecl(CD), F);
2979 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2981
2982 return F;
2983}
#define V(N, I)
Definition: ASTContext.h:3217
#define SM(sm)
Definition: Cuda.cpp:78
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2150
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:1821
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:1875
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2245
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2097
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2272
static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder, const CGFunctionInfo *CurFnInfo)
If we have 'return f(...);', where both caller and callee are SwiftAsync, codegen it as 'tail call ....
Definition: CGStmt.cpp:1255
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1666
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2333
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1665
@ CSFC_Failure
Definition: CGStmt.cpp:1665
@ CSFC_Success
Definition: CGStmt.cpp:1665
@ CSFC_FallThrough
Definition: CGStmt.cpp:1665
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:944
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
SourceManager & getSourceManager()
Definition: ASTContext.h:692
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2279
CanQualType VoidTy
Definition: ASTContext.h:1078
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:2887
Attr - This represents one attribute.
Definition: Attr.h:40
Represents an attribute applied to a statement.
Definition: Stmt.h:1892
BreakStmt - This represents a break.
Definition: Stmt.h:2767
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:134
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2812
Expr * getCallee()
Definition: Expr.h:2962
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4526
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4588
bool isNothrow() const
Definition: Decl.cpp:5139
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4605
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4603
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5136
This captures a statement into a function.
Definition: Stmt.h:3544
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3705
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1415
CaseStmt - Represent a case statement.
Definition: Stmt.h:1613
Stmt * getSubStmt()
Definition: Stmt.h:1730
Expr * getLHS()
Definition: Stmt.h:1700
Expr * getRHS()
Definition: Stmt.h:1712
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
An aligned address.
Definition: Address.h:29
static Address invalid()
Definition: Address.h:49
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:65
llvm::Value * getPointer() const
Definition: Address.h:54
bool isValid() const
Definition: Address.h:50
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:60
An aggregate value slot.
Definition: CGValue.h:514
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:597
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:847
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:129
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:99
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:169
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:71
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:668
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
bool checkIfLoopMustProgress(bool HasConstantCond)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:795
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:74
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:51
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1618
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:671
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:361
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:384
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:350
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:355
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:389
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:353
LValue - This represents an lvalue references.
Definition: CGValue.h:171
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:352
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:348
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:805
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:39
bool isScalar() const
Definition: CGValue.h:54
static RValue get(llvm::Value *V)
Definition: CGValue.h:89
bool isAggregate() const
Definition: CGValue.h:56
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:73
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:61
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:68
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:171
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:165
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1420
Stmt *const * const_body_iterator
Definition: Stmt.h:1485
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1045
ContinueStmt - This represents a continue.
Definition: Stmt.h:2737
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2208
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1238
ValueDecl * getDecl()
Definition: Expr.h:1306
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1311
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:969
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1058
SourceLocation getLocation() const
Definition: DeclBase.h:432
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1542
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2522
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parenthese and casts which do not change the value (including ptr->int casts of the sam...
Definition: Expr.cpp:3073
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3042
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:330
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:2941
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2578
const Expr * getSubExpr() const
Definition: Expr.h:1028
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3694
CallingConv getCallConv() const
Definition: Type.h:3970
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3046
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2649
IfStmt - This represents an if/then/else.
Definition: Stmt.h:1950
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2688
Represents the declaration of a label.
Definition: Decl.h:494
LabelStmt * getStmt() const
Definition: Decl.h:518
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:1843
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:82
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:559
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:274
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:736
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:803
QualType getCanonicalType() const
Definition: Type.h:6701
The collection of all-type qualifiers we support.
Definition: Type.h:146
Represents a struct/union/class.
Definition: Decl.h:3998
field_range fields() const
Definition: Decl.h:4225
field_iterator field_begin() const
Definition: Decl.cpp:4783
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:2806
Expr * getRetValue()
Definition: Stmt.h:2837
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:72
@ NoStmtClass
Definition: Stmt.h:75
StmtClass getStmtClass() const
Definition: Stmt.h:1177
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1120
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1121
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1122
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1124
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:162
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:337
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:154
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1781
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1953
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1314
StringRef getString() const
Definition: Expr.h:1861
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1586
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2195
Exposes information about the current target.
Definition: TargetInfo.h:206
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:810
bool resolveSymbolicName(const char *&Name, ArrayRef< ConstraintInfo > OutputConstraints, unsigned &Index) const
Definition: TargetInfo.cpp:787
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:672
virtual const char * getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:713
virtual std::string convertConstraint(const char *&Constraint) const
Definition: TargetInfo.h:1167
virtual bool isValidGCCRegisterName(StringRef Name) const
Returns whether the passed in string is a valid register name according to GCC.
Definition: TargetInfo.cpp:627
bool isVoidType() const
Definition: Type.h:7218
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7491
bool isReferenceType() const
Definition: Type.h:6922
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:629
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:701
Represents a variable declaration or definition.
Definition: Decl.h:913
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2386
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:344
bool Call(InterpState &S, CodePtr &PC, const Function *Func)
Definition: Interp.h:1493
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:245
@ C
Languages that the frontend can parse and compile.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:282
unsigned long uint64_t
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:623
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:625
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1059
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1066