clang 20.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 PGO.markStmtMaybeUsed(S);
80 return;
81 }
82
83 // Otherwise, make a new block to hold the code.
85 }
86
87 // Generate a stoppoint if we are emitting debug info.
89
90 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
91 // enabled.
92 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
93 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
95 return;
96 }
97 }
98
99 switch (S->getStmtClass()) {
101 case Stmt::CXXCatchStmtClass:
102 case Stmt::SEHExceptStmtClass:
103 case Stmt::SEHFinallyStmtClass:
104 case Stmt::MSDependentExistsStmtClass:
105 llvm_unreachable("invalid statement class to emit generically");
106 case Stmt::NullStmtClass:
107 case Stmt::CompoundStmtClass:
108 case Stmt::DeclStmtClass:
109 case Stmt::LabelStmtClass:
110 case Stmt::AttributedStmtClass:
111 case Stmt::GotoStmtClass:
112 case Stmt::BreakStmtClass:
113 case Stmt::ContinueStmtClass:
114 case Stmt::DefaultStmtClass:
115 case Stmt::CaseStmtClass:
116 case Stmt::SEHLeaveStmtClass:
117 llvm_unreachable("should have emitted these statements as simple");
118
119#define STMT(Type, Base)
120#define ABSTRACT_STMT(Op)
121#define EXPR(Type, Base) \
122 case Stmt::Type##Class:
123#include "clang/AST/StmtNodes.inc"
124 {
125 // Remember the block we came in on.
126 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
127 assert(incoming && "expression emission must have an insertion point");
128
129 EmitIgnoredExpr(cast<Expr>(S));
130
131 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
132 assert(outgoing && "expression emission cleared block!");
133
134 // The expression emitters assume (reasonably!) that the insertion
135 // point is always set. To maintain that, the call-emission code
136 // for noreturn functions has to enter a new block with no
137 // predecessors. We want to kill that block and mark the current
138 // insertion point unreachable in the common case of a call like
139 // "exit();". Since expression emission doesn't otherwise create
140 // blocks with no predecessors, we can just test for that.
141 // However, we must be careful not to do this to our incoming
142 // block, because *statement* emission does sometimes create
143 // reachable blocks which will have no predecessors until later in
144 // the function. This occurs with, e.g., labels that are not
145 // reachable by fallthrough.
146 if (incoming != outgoing && outgoing->use_empty()) {
147 outgoing->eraseFromParent();
148 Builder.ClearInsertionPoint();
149 }
150 break;
151 }
152
153 case Stmt::IndirectGotoStmtClass:
154 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
155
156 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
157 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
158 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
159 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
160
161 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
162
163 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
164 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
165 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
166 case Stmt::CoroutineBodyStmtClass:
167 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
168 break;
169 case Stmt::CoreturnStmtClass:
170 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
171 break;
172 case Stmt::CapturedStmtClass: {
173 const CapturedStmt *CS = cast<CapturedStmt>(S);
175 }
176 break;
177 case Stmt::ObjCAtTryStmtClass:
178 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
179 break;
180 case Stmt::ObjCAtCatchStmtClass:
181 llvm_unreachable(
182 "@catch statements should be handled by EmitObjCAtTryStmt");
183 case Stmt::ObjCAtFinallyStmtClass:
184 llvm_unreachable(
185 "@finally statements should be handled by EmitObjCAtTryStmt");
186 case Stmt::ObjCAtThrowStmtClass:
187 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
188 break;
189 case Stmt::ObjCAtSynchronizedStmtClass:
190 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
191 break;
192 case Stmt::ObjCForCollectionStmtClass:
193 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
194 break;
195 case Stmt::ObjCAutoreleasePoolStmtClass:
196 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
197 break;
198
199 case Stmt::CXXTryStmtClass:
200 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
201 break;
202 case Stmt::CXXForRangeStmtClass:
203 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
204 break;
205 case Stmt::SEHTryStmtClass:
206 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
207 break;
208 case Stmt::OMPMetaDirectiveClass:
209 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
210 break;
211 case Stmt::OMPCanonicalLoopClass:
212 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
213 break;
214 case Stmt::OMPParallelDirectiveClass:
215 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
216 break;
217 case Stmt::OMPSimdDirectiveClass:
218 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
219 break;
220 case Stmt::OMPTileDirectiveClass:
221 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
222 break;
223 case Stmt::OMPUnrollDirectiveClass:
224 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
225 break;
226 case Stmt::OMPReverseDirectiveClass:
227 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
228 break;
229 case Stmt::OMPInterchangeDirectiveClass:
230 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
231 break;
232 case Stmt::OMPForDirectiveClass:
233 EmitOMPForDirective(cast<OMPForDirective>(*S));
234 break;
235 case Stmt::OMPForSimdDirectiveClass:
236 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
237 break;
238 case Stmt::OMPSectionsDirectiveClass:
239 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
240 break;
241 case Stmt::OMPSectionDirectiveClass:
242 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
243 break;
244 case Stmt::OMPSingleDirectiveClass:
245 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
246 break;
247 case Stmt::OMPMasterDirectiveClass:
248 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
249 break;
250 case Stmt::OMPCriticalDirectiveClass:
251 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
252 break;
253 case Stmt::OMPParallelForDirectiveClass:
254 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
255 break;
256 case Stmt::OMPParallelForSimdDirectiveClass:
257 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
258 break;
259 case Stmt::OMPParallelMasterDirectiveClass:
260 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
261 break;
262 case Stmt::OMPParallelSectionsDirectiveClass:
263 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
264 break;
265 case Stmt::OMPTaskDirectiveClass:
266 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
267 break;
268 case Stmt::OMPTaskyieldDirectiveClass:
269 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
270 break;
271 case Stmt::OMPErrorDirectiveClass:
272 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
273 break;
274 case Stmt::OMPBarrierDirectiveClass:
275 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
276 break;
277 case Stmt::OMPTaskwaitDirectiveClass:
278 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
279 break;
280 case Stmt::OMPTaskgroupDirectiveClass:
281 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
282 break;
283 case Stmt::OMPFlushDirectiveClass:
284 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
285 break;
286 case Stmt::OMPDepobjDirectiveClass:
287 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
288 break;
289 case Stmt::OMPScanDirectiveClass:
290 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
291 break;
292 case Stmt::OMPOrderedDirectiveClass:
293 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
294 break;
295 case Stmt::OMPAtomicDirectiveClass:
296 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
297 break;
298 case Stmt::OMPTargetDirectiveClass:
299 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
300 break;
301 case Stmt::OMPTeamsDirectiveClass:
302 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
303 break;
304 case Stmt::OMPCancellationPointDirectiveClass:
305 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
306 break;
307 case Stmt::OMPCancelDirectiveClass:
308 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
309 break;
310 case Stmt::OMPTargetDataDirectiveClass:
311 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
312 break;
313 case Stmt::OMPTargetEnterDataDirectiveClass:
314 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
315 break;
316 case Stmt::OMPTargetExitDataDirectiveClass:
317 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
318 break;
319 case Stmt::OMPTargetParallelDirectiveClass:
320 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
321 break;
322 case Stmt::OMPTargetParallelForDirectiveClass:
323 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
324 break;
325 case Stmt::OMPTaskLoopDirectiveClass:
326 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
327 break;
328 case Stmt::OMPTaskLoopSimdDirectiveClass:
329 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
330 break;
331 case Stmt::OMPMasterTaskLoopDirectiveClass:
332 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
333 break;
334 case Stmt::OMPMaskedTaskLoopDirectiveClass:
335 EmitOMPMaskedTaskLoopDirective(cast<OMPMaskedTaskLoopDirective>(*S));
336 break;
337 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
339 cast<OMPMasterTaskLoopSimdDirective>(*S));
340 break;
341 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
343 cast<OMPMaskedTaskLoopSimdDirective>(*S));
344 break;
345 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
347 cast<OMPParallelMasterTaskLoopDirective>(*S));
348 break;
349 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
351 cast<OMPParallelMaskedTaskLoopDirective>(*S));
352 break;
353 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
355 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
356 break;
357 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
359 cast<OMPParallelMaskedTaskLoopSimdDirective>(*S));
360 break;
361 case Stmt::OMPDistributeDirectiveClass:
362 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
363 break;
364 case Stmt::OMPTargetUpdateDirectiveClass:
365 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
366 break;
367 case Stmt::OMPDistributeParallelForDirectiveClass:
369 cast<OMPDistributeParallelForDirective>(*S));
370 break;
371 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
373 cast<OMPDistributeParallelForSimdDirective>(*S));
374 break;
375 case Stmt::OMPDistributeSimdDirectiveClass:
376 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
377 break;
378 case Stmt::OMPTargetParallelForSimdDirectiveClass:
380 cast<OMPTargetParallelForSimdDirective>(*S));
381 break;
382 case Stmt::OMPTargetSimdDirectiveClass:
383 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
384 break;
385 case Stmt::OMPTeamsDistributeDirectiveClass:
386 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
387 break;
388 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
390 cast<OMPTeamsDistributeSimdDirective>(*S));
391 break;
392 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
394 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
395 break;
396 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
398 cast<OMPTeamsDistributeParallelForDirective>(*S));
399 break;
400 case Stmt::OMPTargetTeamsDirectiveClass:
401 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
402 break;
403 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
405 cast<OMPTargetTeamsDistributeDirective>(*S));
406 break;
407 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
409 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
410 break;
411 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
413 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
414 break;
415 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
417 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
418 break;
419 case Stmt::OMPInteropDirectiveClass:
420 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
421 break;
422 case Stmt::OMPDispatchDirectiveClass:
423 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
424 break;
425 case Stmt::OMPScopeDirectiveClass:
426 EmitOMPScopeDirective(cast<OMPScopeDirective>(*S));
427 break;
428 case Stmt::OMPMaskedDirectiveClass:
429 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
430 break;
431 case Stmt::OMPGenericLoopDirectiveClass:
432 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
433 break;
434 case Stmt::OMPTeamsGenericLoopDirectiveClass:
435 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
436 break;
437 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
439 cast<OMPTargetTeamsGenericLoopDirective>(*S));
440 break;
441 case Stmt::OMPParallelGenericLoopDirectiveClass:
443 cast<OMPParallelGenericLoopDirective>(*S));
444 break;
445 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
447 cast<OMPTargetParallelGenericLoopDirective>(*S));
448 break;
449 case Stmt::OMPParallelMaskedDirectiveClass:
450 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
451 break;
452 case Stmt::OMPAssumeDirectiveClass:
453 EmitOMPAssumeDirective(cast<OMPAssumeDirective>(*S));
454 break;
455 case Stmt::OpenACCComputeConstructClass:
456 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
457 break;
458 case Stmt::OpenACCLoopConstructClass:
459 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
460 break;
461 case Stmt::OpenACCCombinedConstructClass:
462 EmitOpenACCCombinedConstruct(cast<OpenACCCombinedConstruct>(*S));
463 break;
464 case Stmt::OpenACCDataConstructClass:
465 EmitOpenACCDataConstruct(cast<OpenACCDataConstruct>(*S));
466 break;
467 case Stmt::OpenACCEnterDataConstructClass:
468 EmitOpenACCEnterDataConstruct(cast<OpenACCEnterDataConstruct>(*S));
469 break;
470 case Stmt::OpenACCExitDataConstructClass:
471 EmitOpenACCExitDataConstruct(cast<OpenACCExitDataConstruct>(*S));
472 break;
473 case Stmt::OpenACCHostDataConstructClass:
474 EmitOpenACCHostDataConstruct(cast<OpenACCHostDataConstruct>(*S));
475 break;
476 case Stmt::OpenACCWaitConstructClass:
477 EmitOpenACCWaitConstruct(cast<OpenACCWaitConstruct>(*S));
478 break;
479 case Stmt::OpenACCInitConstructClass:
480 EmitOpenACCInitConstruct(cast<OpenACCInitConstruct>(*S));
481 break;
482 case Stmt::OpenACCShutdownConstructClass:
483 EmitOpenACCShutdownConstruct(cast<OpenACCShutdownConstruct>(*S));
484 break;
485 case Stmt::OpenACCSetConstructClass:
486 EmitOpenACCSetConstruct(cast<OpenACCSetConstruct>(*S));
487 break;
488 case Stmt::OpenACCUpdateConstructClass:
489 EmitOpenACCUpdateConstruct(cast<OpenACCUpdateConstruct>(*S));
490 break;
491 }
492}
493
496 switch (S->getStmtClass()) {
497 default:
498 return false;
499 case Stmt::NullStmtClass:
500 break;
501 case Stmt::CompoundStmtClass:
502 EmitCompoundStmt(cast<CompoundStmt>(*S));
503 break;
504 case Stmt::DeclStmtClass:
505 EmitDeclStmt(cast<DeclStmt>(*S));
506 break;
507 case Stmt::LabelStmtClass:
508 EmitLabelStmt(cast<LabelStmt>(*S));
509 break;
510 case Stmt::AttributedStmtClass:
511 EmitAttributedStmt(cast<AttributedStmt>(*S));
512 break;
513 case Stmt::GotoStmtClass:
514 EmitGotoStmt(cast<GotoStmt>(*S));
515 break;
516 case Stmt::BreakStmtClass:
517 EmitBreakStmt(cast<BreakStmt>(*S));
518 break;
519 case Stmt::ContinueStmtClass:
520 EmitContinueStmt(cast<ContinueStmt>(*S));
521 break;
522 case Stmt::DefaultStmtClass:
523 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
524 break;
525 case Stmt::CaseStmtClass:
526 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
527 break;
528 case Stmt::SEHLeaveStmtClass:
529 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
530 break;
531 }
532 return true;
533}
534
535/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
536/// this captures the expression result of the last sub-statement and returns it
537/// (for use by the statement expression extension).
539 AggValueSlot AggSlot) {
540 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
541 "LLVM IR generation of compound statement ('{}')");
542
543 // Keep track of the current cleanup stack depth, including debug scopes.
544 LexicalScope Scope(*this, S.getSourceRange());
545
546 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
547}
548
551 bool GetLast,
552 AggValueSlot AggSlot) {
553
554 const Stmt *ExprResult = S.getStmtExprResult();
555 assert((!GetLast || (GetLast && ExprResult)) &&
556 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
557
558 Address RetAlloca = Address::invalid();
559
560 for (auto *CurStmt : S.body()) {
561 if (GetLast && ExprResult == CurStmt) {
562 // We have to special case labels here. They are statements, but when put
563 // at the end of a statement expression, they yield the value of their
564 // subexpression. Handle this by walking through all labels we encounter,
565 // emitting them before we evaluate the subexpr.
566 // Similar issues arise for attributed statements.
567 while (!isa<Expr>(ExprResult)) {
568 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
569 EmitLabel(LS->getDecl());
570 ExprResult = LS->getSubStmt();
571 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
572 // FIXME: Update this if we ever have attributes that affect the
573 // semantics of an expression.
574 ExprResult = AS->getSubStmt();
575 } else {
576 llvm_unreachable("unknown value statement");
577 }
578 }
579
581
582 const Expr *E = cast<Expr>(ExprResult);
583 QualType ExprTy = E->getType();
584 if (hasAggregateEvaluationKind(ExprTy)) {
585 EmitAggExpr(E, AggSlot);
586 } else {
587 // We can't return an RValue here because there might be cleanups at
588 // the end of the StmtExpr. Because of that, we have to emit the result
589 // here into a temporary alloca.
590 RetAlloca = CreateMemTemp(ExprTy);
591 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
592 /*IsInit*/ false);
593 }
594 } else {
595 EmitStmt(CurStmt);
596 }
597 }
598
599 return RetAlloca;
600}
601
602void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
603 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
604
605 // If there is a cleanup stack, then we it isn't worth trying to
606 // simplify this block (we would need to remove it from the scope map
607 // and cleanup entry).
608 if (!EHStack.empty())
609 return;
610
611 // Can only simplify direct branches.
612 if (!BI || !BI->isUnconditional())
613 return;
614
615 // Can only simplify empty blocks.
616 if (BI->getIterator() != BB->begin())
617 return;
618
619 BB->replaceAllUsesWith(BI->getSuccessor(0));
620 BI->eraseFromParent();
621 BB->eraseFromParent();
622}
623
624void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
625 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
626
627 // Fall out of the current block (if necessary).
628 EmitBranch(BB);
629
630 if (IsFinished && BB->use_empty()) {
631 delete BB;
632 return;
633 }
634
635 // Place the block after the current block, if possible, or else at
636 // the end of the function.
637 if (CurBB && CurBB->getParent())
638 CurFn->insert(std::next(CurBB->getIterator()), BB);
639 else
640 CurFn->insert(CurFn->end(), BB);
641 Builder.SetInsertPoint(BB);
642}
643
644void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
645 // Emit a branch from the current block to the target one if this
646 // was a real block. If this was just a fall-through block after a
647 // terminator, don't emit it.
648 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
649
650 if (!CurBB || CurBB->getTerminator()) {
651 // If there is no insert point or the previous block is already
652 // terminated, don't touch it.
653 } else {
654 // Otherwise, create a fall-through branch.
655 Builder.CreateBr(Target);
656 }
657
658 Builder.ClearInsertionPoint();
659}
660
661void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
662 bool inserted = false;
663 for (llvm::User *u : block->users()) {
664 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
665 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
666 inserted = true;
667 break;
668 }
669 }
670
671 if (!inserted)
672 CurFn->insert(CurFn->end(), block);
673
674 Builder.SetInsertPoint(block);
675}
676
677CodeGenFunction::JumpDest
679 JumpDest &Dest = LabelMap[D];
680 if (Dest.isValid()) return Dest;
681
682 // Create, but don't insert, the new block.
683 Dest = JumpDest(createBasicBlock(D->getName()),
686 return Dest;
687}
688
690 // Add this label to the current lexical scope if we're within any
691 // normal cleanups. Jumps "in" to this label --- when permitted by
692 // the language --- may need to be routed around such cleanups.
693 if (EHStack.hasNormalCleanups() && CurLexicalScope)
694 CurLexicalScope->addLabel(D);
695
696 JumpDest &Dest = LabelMap[D];
697
698 // If we didn't need a forward reference to this label, just go
699 // ahead and create a destination at the current scope.
700 if (!Dest.isValid()) {
701 Dest = getJumpDestInCurrentScope(D->getName());
702
703 // Otherwise, we need to give this label a target depth and remove
704 // it from the branch-fixups list.
705 } else {
706 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
707 Dest.setScopeDepth(EHStack.stable_begin());
708 ResolveBranchFixups(Dest.getBlock());
709 }
710
711 EmitBlock(Dest.getBlock());
712
713 // Emit debug info for labels.
714 if (CGDebugInfo *DI = getDebugInfo()) {
716 DI->setLocation(D->getLocation());
717 DI->EmitLabel(D, Builder);
718 }
719 }
720
721 incrementProfileCounter(D->getStmt());
722}
723
724/// Change the cleanup scope of the labels in this lexical scope to
725/// match the scope of the enclosing context.
727 assert(!Labels.empty());
728 EHScopeStack::stable_iterator innermostScope
730
731 // Change the scope depth of all the labels.
733 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
734 assert(CGF.LabelMap.count(*i));
735 JumpDest &dest = CGF.LabelMap.find(*i)->second;
736 assert(dest.getScopeDepth().isValid());
737 assert(innermostScope.encloses(dest.getScopeDepth()));
738 dest.setScopeDepth(innermostScope);
739 }
740
741 // Reparent the labels if the new scope also has cleanups.
742 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
743 ParentScope->Labels.append(Labels.begin(), Labels.end());
744 }
745}
746
747
749 EmitLabel(S.getDecl());
750
751 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
752 if (getLangOpts().EHAsynch && S.isSideEntry())
754
755 EmitStmt(S.getSubStmt());
756}
757
759 bool nomerge = false;
760 bool noinline = false;
761 bool alwaysinline = false;
762 bool noconvergent = false;
763 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
764 HLSLControlFlowHintAttr::SpellingNotCalculated;
765 const CallExpr *musttail = nullptr;
766
767 for (const auto *A : S.getAttrs()) {
768 switch (A->getKind()) {
769 default:
770 break;
771 case attr::NoMerge:
772 nomerge = true;
773 break;
774 case attr::NoInline:
775 noinline = true;
776 break;
777 case attr::AlwaysInline:
778 alwaysinline = true;
779 break;
780 case attr::NoConvergent:
781 noconvergent = true;
782 break;
783 case attr::MustTail: {
784 const Stmt *Sub = S.getSubStmt();
785 const ReturnStmt *R = cast<ReturnStmt>(Sub);
786 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
787 } break;
788 case attr::CXXAssume: {
789 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
790 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
791 !Assumption->HasSideEffects(getContext())) {
792 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
793 Builder.CreateAssumption(AssumptionVal);
794 }
795 } break;
796 case attr::HLSLControlFlowHint: {
797 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
798 } break;
799 }
800 }
801 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
802 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
803 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
804 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
805 SaveAndRestore save_musttail(MustTailCall, musttail);
806 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
807 EmitStmt(S.getSubStmt(), S.getAttrs());
808}
809
811 // If this code is reachable then emit a stop point (if generating
812 // debug info). We have to do this ourselves because we are on the
813 // "simple" statement path.
814 if (HaveInsertPoint())
815 EmitStopPoint(&S);
816
818}
819
820
822 if (const LabelDecl *Target = S.getConstantTarget()) {
824 return;
825 }
826
827 // Ensure that we have an i8* for our PHI node.
828 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
829 Int8PtrTy, "addr");
830 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
831
832 // Get the basic block for the indirect goto.
833 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
834
835 // The first instruction in the block has to be the PHI for the switch dest,
836 // add an entry for this branch.
837 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
838
839 EmitBranch(IndGotoBB);
840}
841
842void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
843 const Stmt *Else = S.getElse();
844
845 // The else branch of a consteval if statement is always the only branch that
846 // can be runtime evaluated.
847 if (S.isConsteval()) {
848 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
849 if (Executed) {
850 RunCleanupsScope ExecutedScope(*this);
851 EmitStmt(Executed);
852 }
853 return;
854 }
855
856 // C99 6.8.4.1: The first substatement is executed if the expression compares
857 // unequal to 0. The condition must be a scalar type.
858 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
859 ApplyDebugLocation DL(*this, S.getCond());
860
861 if (S.getInit())
862 EmitStmt(S.getInit());
863
864 if (S.getConditionVariable())
865 EmitDecl(*S.getConditionVariable());
866
867 // If the condition constant folds and can be elided, try to avoid emitting
868 // the condition and the dead arm of the if/else.
869 bool CondConstant;
870 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
871 S.isConstexpr())) {
872 // Figure out which block (then or else) is executed.
873 const Stmt *Executed = S.getThen();
874 const Stmt *Skipped = Else;
875 if (!CondConstant) // Condition false?
876 std::swap(Executed, Skipped);
877
878 // If the skipped block has no labels in it, just emit the executed block.
879 // This avoids emitting dead code and simplifies the CFG substantially.
880 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
881 if (CondConstant)
883 if (Executed) {
884 RunCleanupsScope ExecutedScope(*this);
885 EmitStmt(Executed);
886 }
887 PGO.markStmtMaybeUsed(Skipped);
888 return;
889 }
890 }
891
892 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
893 // the conditional branch.
894 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
895 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
896 llvm::BasicBlock *ElseBlock = ContBlock;
897 if (Else)
898 ElseBlock = createBasicBlock("if.else");
899
900 // Prefer the PGO based weights over the likelihood attribute.
901 // When the build isn't optimized the metadata isn't used, so don't generate
902 // it.
903 // Also, differentiate between disabled PGO and a never executed branch with
904 // PGO. Assuming PGO is in use:
905 // - we want to ignore the [[likely]] attribute if the branch is never
906 // executed,
907 // - assuming the profile is poor, preserving the attribute may still be
908 // beneficial.
909 // As an approximation, preserve the attribute only if both the branch and the
910 // parent context were not executed.
912 uint64_t ThenCount = getProfileCount(S.getThen());
913 if (!ThenCount && !getCurrentProfileCount() &&
914 CGM.getCodeGenOpts().OptimizationLevel)
915 LH = Stmt::getLikelihood(S.getThen(), Else);
916
917 // When measuring MC/DC, always fully evaluate the condition up front using
918 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
919 // executing the body of the if.then or if.else. This is useful for when
920 // there is a 'return' within the body, but this is particularly beneficial
921 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
922 // updates are kept linear and consistent.
923 if (!CGM.getCodeGenOpts().MCDCCoverage)
924 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
925 else {
926 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
927 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
928 }
929
930 // Emit the 'then' code.
931 EmitBlock(ThenBlock);
933 incrementProfileCounter(S.getThen());
934 else
936 {
937 RunCleanupsScope ThenScope(*this);
938 EmitStmt(S.getThen());
939 }
940 EmitBranch(ContBlock);
941
942 // Emit the 'else' code if present.
943 if (Else) {
944 {
945 // There is no need to emit line number for an unconditional branch.
946 auto NL = ApplyDebugLocation::CreateEmpty(*this);
947 EmitBlock(ElseBlock);
948 }
949 // When single byte coverage mode is enabled, add a counter to else block.
952 {
953 RunCleanupsScope ElseScope(*this);
954 EmitStmt(Else);
955 }
956 {
957 // There is no need to emit line number for an unconditional branch.
958 auto NL = ApplyDebugLocation::CreateEmpty(*this);
959 EmitBranch(ContBlock);
960 }
961 }
962
963 // Emit the continuation block for code after the if.
964 EmitBlock(ContBlock, true);
965
966 // When single byte coverage mode is enabled, add a counter to continuation
967 // block.
970}
971
972bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
973 bool HasEmptyBody) {
974 if (CGM.getCodeGenOpts().getFiniteLoops() ==
976 return false;
977
978 // Now apply rules for plain C (see 6.8.5.6 in C11).
979 // Loops with constant conditions do not have to make progress in any C
980 // version.
981 // As an extension, we consisider loops whose constant expression
982 // can be constant-folded.
984 bool CondIsConstInt =
985 !ControllingExpression ||
986 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
987 Result.Val.isInt());
988
989 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
990 Result.Val.getInt().getBoolValue());
991
992 // Loops with non-constant conditions must make progress in C11 and later.
993 if (getLangOpts().C11 && !CondIsConstInt)
994 return true;
995
996 // [C++26][intro.progress] (DR)
997 // The implementation may assume that any thread will eventually do one of the
998 // following:
999 // [...]
1000 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1001 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1003 getLangOpts().CPlusPlus11) {
1004 if (HasEmptyBody && CondIsTrue) {
1005 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1006 return false;
1007 }
1008 return true;
1009 }
1010 return false;
1011}
1012
1013// [C++26][stmt.iter.general] (DR)
1014// A trivially empty iteration statement is an iteration statement matching one
1015// of the following forms:
1016// - while ( expression ) ;
1017// - while ( expression ) { }
1018// - do ; while ( expression ) ;
1019// - do { } while ( expression ) ;
1020// - for ( init-statement expression(opt); ) ;
1021// - for ( init-statement expression(opt); ) { }
1022template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1023 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1024 if (S.getInc())
1025 return false;
1026 }
1027 const Stmt *Body = S.getBody();
1028 if (!Body || isa<NullStmt>(Body))
1029 return true;
1030 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1031 return Compound->body_empty();
1032 return false;
1033}
1034
1036 ArrayRef<const Attr *> WhileAttrs) {
1037 // Emit the header for the loop, which will also become
1038 // the continue target.
1039 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1040 EmitBlock(LoopHeader.getBlock());
1041
1043 ConvergenceTokenStack.push_back(
1044 emitConvergenceLoopToken(LoopHeader.getBlock()));
1045
1046 // Create an exit block for when the condition fails, which will
1047 // also become the break target.
1048 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
1049
1050 // Store the blocks to use for break and continue.
1051 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
1052
1053 // C++ [stmt.while]p2:
1054 // When the condition of a while statement is a declaration, the
1055 // scope of the variable that is declared extends from its point
1056 // of declaration (3.3.2) to the end of the while statement.
1057 // [...]
1058 // The object created in a condition is destroyed and created
1059 // with each iteration of the loop.
1060 RunCleanupsScope ConditionScope(*this);
1061
1062 if (S.getConditionVariable())
1063 EmitDecl(*S.getConditionVariable());
1064
1065 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1066 // evaluation of the controlling expression takes place before each
1067 // execution of the loop body.
1068 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1069
1070 // while(1) is common, avoid extra exit blocks. Be sure
1071 // to correctly handle break/continue though.
1072 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1073 bool EmitBoolCondBranch = !C || !C->isOne();
1074 const SourceRange &R = S.getSourceRange();
1075 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1076 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1078 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1079
1080 // When single byte coverage mode is enabled, add a counter to loop condition.
1082 incrementProfileCounter(S.getCond());
1083
1084 // As long as the condition is true, go to the loop body.
1085 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1086 if (EmitBoolCondBranch) {
1087 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1088 if (ConditionScope.requiresCleanups())
1089 ExitBlock = createBasicBlock("while.exit");
1090 llvm::MDNode *Weights =
1091 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1092 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1093 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1094 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1095 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1096
1097 if (ExitBlock != LoopExit.getBlock()) {
1098 EmitBlock(ExitBlock);
1100 }
1101 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1102 CGM.getDiags().Report(A->getLocation(),
1103 diag::warn_attribute_has_no_effect_on_infinite_loop)
1104 << A << A->getRange();
1106 S.getWhileLoc(),
1107 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1108 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1109 }
1110
1111 // Emit the loop body. We have to emit this in a cleanup scope
1112 // because it might be a singleton DeclStmt.
1113 {
1114 RunCleanupsScope BodyScope(*this);
1115 EmitBlock(LoopBody);
1116 // When single byte coverage mode is enabled, add a counter to the body.
1118 incrementProfileCounter(S.getBody());
1119 else
1121 EmitStmt(S.getBody());
1122 }
1123
1124 BreakContinueStack.pop_back();
1125
1126 // Immediately force cleanup.
1127 ConditionScope.ForceCleanup();
1128
1129 EmitStopPoint(&S);
1130 // Branch to the loop header again.
1131 EmitBranch(LoopHeader.getBlock());
1132
1133 LoopStack.pop();
1134
1135 // Emit the exit block.
1136 EmitBlock(LoopExit.getBlock(), true);
1137
1138 // The LoopHeader typically is just a branch if we skipped emitting
1139 // a branch, try to erase it.
1140 if (!EmitBoolCondBranch)
1141 SimplifyForwardingBlocks(LoopHeader.getBlock());
1142
1143 // When single byte coverage mode is enabled, add a counter to continuation
1144 // block.
1147
1149 ConvergenceTokenStack.pop_back();
1150}
1151
1153 ArrayRef<const Attr *> DoAttrs) {
1154 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1155 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1156
1157 uint64_t ParentCount = getCurrentProfileCount();
1158
1159 // Store the blocks to use for break and continue.
1160 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1161
1162 // Emit the body of the loop.
1163 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1164
1166 EmitBlockWithFallThrough(LoopBody, S.getBody());
1167 else
1168 EmitBlockWithFallThrough(LoopBody, &S);
1169
1171 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1172
1173 {
1174 RunCleanupsScope BodyScope(*this);
1175 EmitStmt(S.getBody());
1176 }
1177
1178 EmitBlock(LoopCond.getBlock());
1179 // When single byte coverage mode is enabled, add a counter to loop condition.
1181 incrementProfileCounter(S.getCond());
1182
1183 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1184 // after each execution of the loop body."
1185
1186 // Evaluate the conditional in the while header.
1187 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1188 // compares unequal to 0. The condition must be a scalar type.
1189 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1190
1191 BreakContinueStack.pop_back();
1192
1193 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1194 // to correctly handle break/continue though.
1195 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1196 bool EmitBoolCondBranch = !C || !C->isZero();
1197
1198 const SourceRange &R = S.getSourceRange();
1199 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1202 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1203
1204 // As long as the condition is true, iterate the loop.
1205 if (EmitBoolCondBranch) {
1206 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1207 Builder.CreateCondBr(
1208 BoolCondVal, LoopBody, LoopExit.getBlock(),
1209 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1210 }
1211
1212 LoopStack.pop();
1213
1214 // Emit the exit block.
1215 EmitBlock(LoopExit.getBlock());
1216
1217 // The DoCond block typically is just a branch if we skipped
1218 // emitting a branch, try to erase it.
1219 if (!EmitBoolCondBranch)
1220 SimplifyForwardingBlocks(LoopCond.getBlock());
1221
1222 // When single byte coverage mode is enabled, add a counter to continuation
1223 // block.
1226
1228 ConvergenceTokenStack.pop_back();
1229}
1230
1232 ArrayRef<const Attr *> ForAttrs) {
1233 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1234
1235 LexicalScope ForScope(*this, S.getSourceRange());
1236
1237 // Evaluate the first part before the loop.
1238 if (S.getInit())
1239 EmitStmt(S.getInit());
1240
1241 // Start the loop with a block that tests the condition.
1242 // If there's an increment, the continue scope will be overwritten
1243 // later.
1244 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1245 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1246 EmitBlock(CondBlock);
1247
1249 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1250
1251 const SourceRange &R = S.getSourceRange();
1252 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1255 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1256
1257 // Create a cleanup scope for the condition variable cleanups.
1258 LexicalScope ConditionScope(*this, S.getSourceRange());
1259
1260 // If the for loop doesn't have an increment we can just use the condition as
1261 // the continue block. Otherwise, if there is no condition variable, we can
1262 // form the continue block now. If there is a condition variable, we can't
1263 // form the continue block until after we've emitted the condition, because
1264 // the condition is in scope in the increment, but Sema's jump diagnostics
1265 // ensure that there are no continues from the condition variable that jump
1266 // to the loop increment.
1267 JumpDest Continue;
1268 if (!S.getInc())
1269 Continue = CondDest;
1270 else if (!S.getConditionVariable())
1271 Continue = getJumpDestInCurrentScope("for.inc");
1272 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1273
1274 if (S.getCond()) {
1275 // If the for statement has a condition scope, emit the local variable
1276 // declaration.
1277 if (S.getConditionVariable()) {
1278 EmitDecl(*S.getConditionVariable());
1279
1280 // We have entered the condition variable's scope, so we're now able to
1281 // jump to the continue block.
1282 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1283 BreakContinueStack.back().ContinueBlock = Continue;
1284 }
1285
1286 // When single byte coverage mode is enabled, add a counter to loop
1287 // condition.
1289 incrementProfileCounter(S.getCond());
1290
1291 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1292 // If there are any cleanups between here and the loop-exit scope,
1293 // create a block to stage a loop exit along.
1294 if (ForScope.requiresCleanups())
1295 ExitBlock = createBasicBlock("for.cond.cleanup");
1296
1297 // As long as the condition is true, iterate the loop.
1298 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1299
1300 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1301 // compares unequal to 0. The condition must be a scalar type.
1302 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1303 llvm::MDNode *Weights =
1304 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1305 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1306 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1307 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1308
1309 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1310
1311 if (ExitBlock != LoopExit.getBlock()) {
1312 EmitBlock(ExitBlock);
1314 }
1315
1316 EmitBlock(ForBody);
1317 } else {
1318 // Treat it as a non-zero constant. Don't even create a new block for the
1319 // body, just fall into it.
1320 }
1321
1322 // When single byte coverage mode is enabled, add a counter to the body.
1324 incrementProfileCounter(S.getBody());
1325 else
1327 {
1328 // Create a separate cleanup scope for the body, in case it is not
1329 // a compound statement.
1330 RunCleanupsScope BodyScope(*this);
1331 EmitStmt(S.getBody());
1332 }
1333
1334 // If there is an increment, emit it next.
1335 if (S.getInc()) {
1336 EmitBlock(Continue.getBlock());
1337 EmitStmt(S.getInc());
1339 incrementProfileCounter(S.getInc());
1340 }
1341
1342 BreakContinueStack.pop_back();
1343
1344 ConditionScope.ForceCleanup();
1345
1346 EmitStopPoint(&S);
1347 EmitBranch(CondBlock);
1348
1349 ForScope.ForceCleanup();
1350
1351 LoopStack.pop();
1352
1353 // Emit the fall-through block.
1354 EmitBlock(LoopExit.getBlock(), true);
1355
1356 // When single byte coverage mode is enabled, add a counter to continuation
1357 // block.
1360
1362 ConvergenceTokenStack.pop_back();
1363}
1364
1365void
1367 ArrayRef<const Attr *> ForAttrs) {
1368 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1369
1370 LexicalScope ForScope(*this, S.getSourceRange());
1371
1372 // Evaluate the first pieces before the loop.
1373 if (S.getInit())
1374 EmitStmt(S.getInit());
1375 EmitStmt(S.getRangeStmt());
1376 EmitStmt(S.getBeginStmt());
1377 EmitStmt(S.getEndStmt());
1378
1379 // Start the loop with a block that tests the condition.
1380 // If there's an increment, the continue scope will be overwritten
1381 // later.
1382 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1383 EmitBlock(CondBlock);
1384
1386 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1387
1388 const SourceRange &R = S.getSourceRange();
1389 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1392
1393 // If there are any cleanups between here and the loop-exit scope,
1394 // create a block to stage a loop exit along.
1395 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1396 if (ForScope.requiresCleanups())
1397 ExitBlock = createBasicBlock("for.cond.cleanup");
1398
1399 // The loop body, consisting of the specified body and the loop variable.
1400 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1401
1402 // The body is executed if the expression, contextually converted
1403 // to bool, is true.
1404 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1405 llvm::MDNode *Weights =
1406 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1407 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1408 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1409 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1410 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1411
1412 if (ExitBlock != LoopExit.getBlock()) {
1413 EmitBlock(ExitBlock);
1415 }
1416
1417 EmitBlock(ForBody);
1419 incrementProfileCounter(S.getBody());
1420 else
1422
1423 // Create a block for the increment. In case of a 'continue', we jump there.
1424 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1425
1426 // Store the blocks to use for break and continue.
1427 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1428
1429 {
1430 // Create a separate cleanup scope for the loop variable and body.
1431 LexicalScope BodyScope(*this, S.getSourceRange());
1432 EmitStmt(S.getLoopVarStmt());
1433 EmitStmt(S.getBody());
1434 }
1435
1436 EmitStopPoint(&S);
1437 // If there is an increment, emit it next.
1438 EmitBlock(Continue.getBlock());
1439 EmitStmt(S.getInc());
1440
1441 BreakContinueStack.pop_back();
1442
1443 EmitBranch(CondBlock);
1444
1445 ForScope.ForceCleanup();
1446
1447 LoopStack.pop();
1448
1449 // Emit the fall-through block.
1450 EmitBlock(LoopExit.getBlock(), true);
1451
1452 // When single byte coverage mode is enabled, add a counter to continuation
1453 // block.
1456
1458 ConvergenceTokenStack.pop_back();
1459}
1460
1461void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1462 if (RV.isScalar()) {
1464 } else if (RV.isAggregate()) {
1465 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1468 } else {
1470 /*init*/ true);
1471 }
1473}
1474
1475namespace {
1476// RAII struct used to save and restore a return statment's result expression.
1477struct SaveRetExprRAII {
1478 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1479 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1480 CGF.RetExpr = RetExpr;
1481 }
1482 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1483 const Expr *OldRetExpr;
1484 CodeGenFunction &CGF;
1485};
1486} // namespace
1487
1488/// Determine if the given call uses the swiftasync calling convention.
1489static bool isSwiftAsyncCallee(const CallExpr *CE) {
1490 auto calleeQualType = CE->getCallee()->getType();
1491 const FunctionType *calleeType = nullptr;
1492 if (calleeQualType->isFunctionPointerType() ||
1493 calleeQualType->isFunctionReferenceType() ||
1494 calleeQualType->isBlockPointerType() ||
1495 calleeQualType->isMemberFunctionPointerType()) {
1496 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1497 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1498 calleeType = ty;
1499 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1500 if (auto methodDecl = CMCE->getMethodDecl()) {
1501 // getMethodDecl() doesn't handle member pointers at the moment.
1502 calleeType = methodDecl->getType()->castAs<FunctionType>();
1503 } else {
1504 return false;
1505 }
1506 } else {
1507 return false;
1508 }
1509 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1510}
1511
1512/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1513/// if the function returns void, or may be missing one if the function returns
1514/// non-void. Fun stuff :).
1516 if (requiresReturnValueCheck()) {
1517 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1518 auto *SLocPtr =
1519 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1520 llvm::GlobalVariable::PrivateLinkage, SLoc);
1521 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1523 assert(ReturnLocation.isValid() && "No valid return location");
1524 Builder.CreateStore(SLocPtr, ReturnLocation);
1525 }
1526
1527 // Returning from an outlined SEH helper is UB, and we already warn on it.
1528 if (IsOutlinedSEHHelper) {
1529 Builder.CreateUnreachable();
1530 Builder.ClearInsertionPoint();
1531 }
1532
1533 // Emit the result value, even if unused, to evaluate the side effects.
1534 const Expr *RV = S.getRetValue();
1535
1536 // Record the result expression of the return statement. The recorded
1537 // expression is used to determine whether a block capture's lifetime should
1538 // end at the end of the full expression as opposed to the end of the scope
1539 // enclosing the block expression.
1540 //
1541 // This permits a small, easily-implemented exception to our over-conservative
1542 // rules about not jumping to statements following block literals with
1543 // non-trivial cleanups.
1544 SaveRetExprRAII SaveRetExpr(RV, *this);
1545
1546 RunCleanupsScope cleanupScope(*this);
1547 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1548 RV = EWC->getSubExpr();
1549
1550 // If we're in a swiftasynccall function, and the return expression is a
1551 // call to a swiftasynccall function, mark the call as the musttail call.
1552 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1553 if (RV && CurFnInfo &&
1555 if (auto CE = dyn_cast<CallExpr>(RV)) {
1556 if (isSwiftAsyncCallee(CE)) {
1557 SaveMustTail.emplace(MustTailCall, CE);
1558 }
1559 }
1560 }
1561
1562 // FIXME: Clean this up by using an LValue for ReturnTemp,
1563 // EmitStoreThroughLValue, and EmitAnyExpr.
1564 // Check if the NRVO candidate was not globalized in OpenMP mode.
1565 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1566 S.getNRVOCandidate()->isNRVOVariable() &&
1567 (!getLangOpts().OpenMP ||
1569 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1570 .isValid())) {
1571 // Apply the named return value optimization for this return statement,
1572 // which means doing nothing: the appropriate result has already been
1573 // constructed into the NRVO variable.
1574
1575 // If there is an NRVO flag for this variable, set it to 1 into indicate
1576 // that the cleanup code should not destroy the variable.
1577 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1578 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1579 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1580 // Make sure not to return anything, but evaluate the expression
1581 // for side effects.
1582 if (RV) {
1583 EmitAnyExpr(RV);
1584 }
1585 } else if (!RV) {
1586 // Do nothing (return value is left uninitialized)
1587 } else if (FnRetTy->isReferenceType()) {
1588 // If this function returns a reference, take the address of the expression
1589 // rather than the value.
1591 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1592 } else {
1593 switch (getEvaluationKind(RV->getType())) {
1594 case TEK_Scalar: {
1595 llvm::Value *Ret = EmitScalarExpr(RV);
1598 /*isInit*/ true);
1599 else
1601 break;
1602 }
1603 case TEK_Complex:
1605 /*isInit*/ true);
1606 break;
1607 case TEK_Aggregate:
1614 break;
1615 }
1616 }
1617
1618 ++NumReturnExprs;
1619 if (!RV || RV->isEvaluatable(getContext()))
1620 ++NumSimpleReturnExprs;
1621
1622 cleanupScope.ForceCleanup();
1624}
1625
1627 // As long as debug info is modeled with instructions, we have to ensure we
1628 // have a place to insert here and write the stop point here.
1629 if (HaveInsertPoint())
1630 EmitStopPoint(&S);
1631
1632 for (const auto *I : S.decls())
1633 EmitDecl(*I);
1634}
1635
1637 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1638
1639 // If this code is reachable then emit a stop point (if generating
1640 // debug info). We have to do this ourselves because we are on the
1641 // "simple" statement path.
1642 if (HaveInsertPoint())
1643 EmitStopPoint(&S);
1644
1645 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1646}
1647
1649 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1650
1651 // If this code is reachable then emit a stop point (if generating
1652 // debug info). We have to do this ourselves because we are on the
1653 // "simple" statement path.
1654 if (HaveInsertPoint())
1655 EmitStopPoint(&S);
1656
1657 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1658}
1659
1660/// EmitCaseStmtRange - If case statement range is not too big then
1661/// add multiple cases to switch instruction, one for each value within
1662/// the range. If range is too big then emit "if" condition check.
1664 ArrayRef<const Attr *> Attrs) {
1665 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1666
1667 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1668 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1669
1670 // Emit the code for this case. We do this first to make sure it is
1671 // properly chained from our predecessor before generating the
1672 // switch machinery to enter this block.
1673 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1674 EmitBlockWithFallThrough(CaseDest, &S);
1675 EmitStmt(S.getSubStmt());
1676
1677 // If range is empty, do nothing.
1678 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1679 return;
1680
1682 llvm::APInt Range = RHS - LHS;
1683 // FIXME: parameters such as this should not be hardcoded.
1684 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1685 // Range is small enough to add multiple switch instruction cases.
1686 uint64_t Total = getProfileCount(&S);
1687 unsigned NCases = Range.getZExtValue() + 1;
1688 // We only have one region counter for the entire set of cases here, so we
1689 // need to divide the weights evenly between the generated cases, ensuring
1690 // that the total weight is preserved. E.g., a weight of 5 over three cases
1691 // will be distributed as weights of 2, 2, and 1.
1692 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1693 for (unsigned I = 0; I != NCases; ++I) {
1694 if (SwitchWeights)
1695 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1696 else if (SwitchLikelihood)
1697 SwitchLikelihood->push_back(LH);
1698
1699 if (Rem)
1700 Rem--;
1701 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1702 ++LHS;
1703 }
1704 return;
1705 }
1706
1707 // The range is too big. Emit "if" condition into a new block,
1708 // making sure to save and restore the current insertion point.
1709 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1710
1711 // Push this test onto the chain of range checks (which terminates
1712 // in the default basic block). The switch's default will be changed
1713 // to the top of this chain after switch emission is complete.
1714 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1715 CaseRangeBlock = createBasicBlock("sw.caserange");
1716
1717 CurFn->insert(CurFn->end(), CaseRangeBlock);
1718 Builder.SetInsertPoint(CaseRangeBlock);
1719
1720 // Emit range check.
1721 llvm::Value *Diff =
1722 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1723 llvm::Value *Cond =
1724 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1725
1726 llvm::MDNode *Weights = nullptr;
1727 if (SwitchWeights) {
1728 uint64_t ThisCount = getProfileCount(&S);
1729 uint64_t DefaultCount = (*SwitchWeights)[0];
1730 Weights = createProfileWeights(ThisCount, DefaultCount);
1731
1732 // Since we're chaining the switch default through each large case range, we
1733 // need to update the weight for the default, ie, the first case, to include
1734 // this case.
1735 (*SwitchWeights)[0] += ThisCount;
1736 } else if (SwitchLikelihood)
1737 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1738
1739 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1740
1741 // Restore the appropriate insertion point.
1742 if (RestoreBB)
1743 Builder.SetInsertPoint(RestoreBB);
1744 else
1745 Builder.ClearInsertionPoint();
1746}
1747
1749 ArrayRef<const Attr *> Attrs) {
1750 // If there is no enclosing switch instance that we're aware of, then this
1751 // case statement and its block can be elided. This situation only happens
1752 // when we've constant-folded the switch, are emitting the constant case,
1753 // and part of the constant case includes another case statement. For
1754 // instance: switch (4) { case 4: do { case 5: } while (1); }
1755 if (!SwitchInsn) {
1756 EmitStmt(S.getSubStmt());
1757 return;
1758 }
1759
1760 // Handle case ranges.
1761 if (S.getRHS()) {
1762 EmitCaseStmtRange(S, Attrs);
1763 return;
1764 }
1765
1766 llvm::ConstantInt *CaseVal =
1767 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1768
1769 // Emit debuginfo for the case value if it is an enum value.
1770 const ConstantExpr *CE;
1771 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1772 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1773 else
1774 CE = dyn_cast<ConstantExpr>(S.getLHS());
1775 if (CE) {
1776 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1777 if (CGDebugInfo *Dbg = getDebugInfo())
1779 Dbg->EmitGlobalVariable(DE->getDecl(),
1780 APValue(llvm::APSInt(CaseVal->getValue())));
1781 }
1782
1783 if (SwitchLikelihood)
1784 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1785
1786 // If the body of the case is just a 'break', try to not emit an empty block.
1787 // If we're profiling or we're not optimizing, leave the block in for better
1788 // debug and coverage analysis.
1790 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1791 isa<BreakStmt>(S.getSubStmt())) {
1792 JumpDest Block = BreakContinueStack.back().BreakBlock;
1793
1794 // Only do this optimization if there are no cleanups that need emitting.
1796 if (SwitchWeights)
1797 SwitchWeights->push_back(getProfileCount(&S));
1798 SwitchInsn->addCase(CaseVal, Block.getBlock());
1799
1800 // If there was a fallthrough into this case, make sure to redirect it to
1801 // the end of the switch as well.
1802 if (Builder.GetInsertBlock()) {
1803 Builder.CreateBr(Block.getBlock());
1804 Builder.ClearInsertionPoint();
1805 }
1806 return;
1807 }
1808 }
1809
1810 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1811 EmitBlockWithFallThrough(CaseDest, &S);
1812 if (SwitchWeights)
1813 SwitchWeights->push_back(getProfileCount(&S));
1814 SwitchInsn->addCase(CaseVal, CaseDest);
1815
1816 // Recursively emitting the statement is acceptable, but is not wonderful for
1817 // code where we have many case statements nested together, i.e.:
1818 // case 1:
1819 // case 2:
1820 // case 3: etc.
1821 // Handling this recursively will create a new block for each case statement
1822 // that falls through to the next case which is IR intensive. It also causes
1823 // deep recursion which can run into stack depth limitations. Handle
1824 // sequential non-range case statements specially.
1825 //
1826 // TODO When the next case has a likelihood attribute the code returns to the
1827 // recursive algorithm. Maybe improve this case if it becomes common practice
1828 // to use a lot of attributes.
1829 const CaseStmt *CurCase = &S;
1830 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1831
1832 // Otherwise, iteratively add consecutive cases to this switch stmt.
1833 while (NextCase && NextCase->getRHS() == nullptr) {
1834 CurCase = NextCase;
1835 llvm::ConstantInt *CaseVal =
1836 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1837
1838 if (SwitchWeights)
1839 SwitchWeights->push_back(getProfileCount(NextCase));
1841 CaseDest = createBasicBlock("sw.bb");
1842 EmitBlockWithFallThrough(CaseDest, CurCase);
1843 }
1844 // Since this loop is only executed when the CaseStmt has no attributes
1845 // use a hard-coded value.
1846 if (SwitchLikelihood)
1847 SwitchLikelihood->push_back(Stmt::LH_None);
1848
1849 SwitchInsn->addCase(CaseVal, CaseDest);
1850 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1851 }
1852
1853 // Generate a stop point for debug info if the case statement is
1854 // followed by a default statement. A fallthrough case before a
1855 // default case gets its own branch target.
1856 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1857 EmitStopPoint(CurCase);
1858
1859 // Normal default recursion for non-cases.
1860 EmitStmt(CurCase->getSubStmt());
1861}
1862
1864 ArrayRef<const Attr *> Attrs) {
1865 // If there is no enclosing switch instance that we're aware of, then this
1866 // default statement can be elided. This situation only happens when we've
1867 // constant-folded the switch.
1868 if (!SwitchInsn) {
1869 EmitStmt(S.getSubStmt());
1870 return;
1871 }
1872
1873 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1874 assert(DefaultBlock->empty() &&
1875 "EmitDefaultStmt: Default block already defined?");
1876
1877 if (SwitchLikelihood)
1878 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1879
1880 EmitBlockWithFallThrough(DefaultBlock, &S);
1881
1882 EmitStmt(S.getSubStmt());
1883}
1884
1885/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1886/// constant value that is being switched on, see if we can dead code eliminate
1887/// the body of the switch to a simple series of statements to emit. Basically,
1888/// on a switch (5) we want to find these statements:
1889/// case 5:
1890/// printf(...); <--
1891/// ++i; <--
1892/// break;
1893///
1894/// and add them to the ResultStmts vector. If it is unsafe to do this
1895/// transformation (for example, one of the elided statements contains a label
1896/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1897/// should include statements after it (e.g. the printf() line is a substmt of
1898/// the case) then return CSFC_FallThrough. If we handled it and found a break
1899/// statement, then return CSFC_Success.
1900///
1901/// If Case is non-null, then we are looking for the specified case, checking
1902/// that nothing we jump over contains labels. If Case is null, then we found
1903/// the case and are looking for the break.
1904///
1905/// If the recursive walk actually finds our Case, then we set FoundCase to
1906/// true.
1907///
1910 const SwitchCase *Case,
1911 bool &FoundCase,
1912 SmallVectorImpl<const Stmt*> &ResultStmts) {
1913 // If this is a null statement, just succeed.
1914 if (!S)
1915 return Case ? CSFC_Success : CSFC_FallThrough;
1916
1917 // If this is the switchcase (case 4: or default) that we're looking for, then
1918 // we're in business. Just add the substatement.
1919 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1920 if (S == Case) {
1921 FoundCase = true;
1922 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1923 ResultStmts);
1924 }
1925
1926 // Otherwise, this is some other case or default statement, just ignore it.
1927 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1928 ResultStmts);
1929 }
1930
1931 // If we are in the live part of the code and we found our break statement,
1932 // return a success!
1933 if (!Case && isa<BreakStmt>(S))
1934 return CSFC_Success;
1935
1936 // If this is a switch statement, then it might contain the SwitchCase, the
1937 // break, or neither.
1938 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1939 // Handle this as two cases: we might be looking for the SwitchCase (if so
1940 // the skipped statements must be skippable) or we might already have it.
1941 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1942 bool StartedInLiveCode = FoundCase;
1943 unsigned StartSize = ResultStmts.size();
1944
1945 // If we've not found the case yet, scan through looking for it.
1946 if (Case) {
1947 // Keep track of whether we see a skipped declaration. The code could be
1948 // using the declaration even if it is skipped, so we can't optimize out
1949 // the decl if the kept statements might refer to it.
1950 bool HadSkippedDecl = false;
1951
1952 // If we're looking for the case, just see if we can skip each of the
1953 // substatements.
1954 for (; Case && I != E; ++I) {
1955 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1956
1957 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1958 case CSFC_Failure: return CSFC_Failure;
1959 case CSFC_Success:
1960 // A successful result means that either 1) that the statement doesn't
1961 // have the case and is skippable, or 2) does contain the case value
1962 // and also contains the break to exit the switch. In the later case,
1963 // we just verify the rest of the statements are elidable.
1964 if (FoundCase) {
1965 // If we found the case and skipped declarations, we can't do the
1966 // optimization.
1967 if (HadSkippedDecl)
1968 return CSFC_Failure;
1969
1970 for (++I; I != E; ++I)
1971 if (CodeGenFunction::ContainsLabel(*I, true))
1972 return CSFC_Failure;
1973 return CSFC_Success;
1974 }
1975 break;
1976 case CSFC_FallThrough:
1977 // If we have a fallthrough condition, then we must have found the
1978 // case started to include statements. Consider the rest of the
1979 // statements in the compound statement as candidates for inclusion.
1980 assert(FoundCase && "Didn't find case but returned fallthrough?");
1981 // We recursively found Case, so we're not looking for it anymore.
1982 Case = nullptr;
1983
1984 // If we found the case and skipped declarations, we can't do the
1985 // optimization.
1986 if (HadSkippedDecl)
1987 return CSFC_Failure;
1988 break;
1989 }
1990 }
1991
1992 if (!FoundCase)
1993 return CSFC_Success;
1994
1995 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1996 }
1997
1998 // If we have statements in our range, then we know that the statements are
1999 // live and need to be added to the set of statements we're tracking.
2000 bool AnyDecls = false;
2001 for (; I != E; ++I) {
2003
2004 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2005 case CSFC_Failure: return CSFC_Failure;
2006 case CSFC_FallThrough:
2007 // A fallthrough result means that the statement was simple and just
2008 // included in ResultStmt, keep adding them afterwards.
2009 break;
2010 case CSFC_Success:
2011 // A successful result means that we found the break statement and
2012 // stopped statement inclusion. We just ensure that any leftover stmts
2013 // are skippable and return success ourselves.
2014 for (++I; I != E; ++I)
2015 if (CodeGenFunction::ContainsLabel(*I, true))
2016 return CSFC_Failure;
2017 return CSFC_Success;
2018 }
2019 }
2020
2021 // If we're about to fall out of a scope without hitting a 'break;', we
2022 // can't perform the optimization if there were any decls in that scope
2023 // (we'd lose their end-of-lifetime).
2024 if (AnyDecls) {
2025 // If the entire compound statement was live, there's one more thing we
2026 // can try before giving up: emit the whole thing as a single statement.
2027 // We can do that unless the statement contains a 'break;'.
2028 // FIXME: Such a break must be at the end of a construct within this one.
2029 // We could emit this by just ignoring the BreakStmts entirely.
2030 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2031 ResultStmts.resize(StartSize);
2032 ResultStmts.push_back(S);
2033 } else {
2034 return CSFC_Failure;
2035 }
2036 }
2037
2038 return CSFC_FallThrough;
2039 }
2040
2041 // Okay, this is some other statement that we don't handle explicitly, like a
2042 // for statement or increment etc. If we are skipping over this statement,
2043 // just verify it doesn't have labels, which would make it invalid to elide.
2044 if (Case) {
2045 if (CodeGenFunction::ContainsLabel(S, true))
2046 return CSFC_Failure;
2047 return CSFC_Success;
2048 }
2049
2050 // Otherwise, we want to include this statement. Everything is cool with that
2051 // so long as it doesn't contain a break out of the switch we're in.
2053
2054 // Otherwise, everything is great. Include the statement and tell the caller
2055 // that we fall through and include the next statement as well.
2056 ResultStmts.push_back(S);
2057 return CSFC_FallThrough;
2058}
2059
2060/// FindCaseStatementsForValue - Find the case statement being jumped to and
2061/// then invoke CollectStatementsForCase to find the list of statements to emit
2062/// for a switch on constant. See the comment above CollectStatementsForCase
2063/// for more details.
2065 const llvm::APSInt &ConstantCondValue,
2066 SmallVectorImpl<const Stmt*> &ResultStmts,
2067 ASTContext &C,
2068 const SwitchCase *&ResultCase) {
2069 // First step, find the switch case that is being branched to. We can do this
2070 // efficiently by scanning the SwitchCase list.
2071 const SwitchCase *Case = S.getSwitchCaseList();
2072 const DefaultStmt *DefaultCase = nullptr;
2073
2074 for (; Case; Case = Case->getNextSwitchCase()) {
2075 // It's either a default or case. Just remember the default statement in
2076 // case we're not jumping to any numbered cases.
2077 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2078 DefaultCase = DS;
2079 continue;
2080 }
2081
2082 // Check to see if this case is the one we're looking for.
2083 const CaseStmt *CS = cast<CaseStmt>(Case);
2084 // Don't handle case ranges yet.
2085 if (CS->getRHS()) return false;
2086
2087 // If we found our case, remember it as 'case'.
2088 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2089 break;
2090 }
2091
2092 // If we didn't find a matching case, we use a default if it exists, or we
2093 // elide the whole switch body!
2094 if (!Case) {
2095 // It is safe to elide the body of the switch if it doesn't contain labels
2096 // etc. If it is safe, return successfully with an empty ResultStmts list.
2097 if (!DefaultCase)
2099 Case = DefaultCase;
2100 }
2101
2102 // Ok, we know which case is being jumped to, try to collect all the
2103 // statements that follow it. This can fail for a variety of reasons. Also,
2104 // check to see that the recursive walk actually found our case statement.
2105 // Insane cases like this can fail to find it in the recursive walk since we
2106 // don't handle every stmt kind:
2107 // switch (4) {
2108 // while (1) {
2109 // case 4: ...
2110 bool FoundCase = false;
2111 ResultCase = Case;
2112 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2113 ResultStmts) != CSFC_Failure &&
2114 FoundCase;
2115}
2116
2117static std::optional<SmallVector<uint64_t, 16>>
2119 // Are there enough branches to weight them?
2120 if (Likelihoods.size() <= 1)
2121 return std::nullopt;
2122
2123 uint64_t NumUnlikely = 0;
2124 uint64_t NumNone = 0;
2125 uint64_t NumLikely = 0;
2126 for (const auto LH : Likelihoods) {
2127 switch (LH) {
2128 case Stmt::LH_Unlikely:
2129 ++NumUnlikely;
2130 break;
2131 case Stmt::LH_None:
2132 ++NumNone;
2133 break;
2134 case Stmt::LH_Likely:
2135 ++NumLikely;
2136 break;
2137 }
2138 }
2139
2140 // Is there a likelihood attribute used?
2141 if (NumUnlikely == 0 && NumLikely == 0)
2142 return std::nullopt;
2143
2144 // When multiple cases share the same code they can be combined during
2145 // optimization. In that case the weights of the branch will be the sum of
2146 // the individual weights. Make sure the combined sum of all neutral cases
2147 // doesn't exceed the value of a single likely attribute.
2148 // The additions both avoid divisions by 0 and make sure the weights of None
2149 // don't exceed the weight of Likely.
2150 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2151 const uint64_t None = Likely / (NumNone + 1);
2152 const uint64_t Unlikely = 0;
2153
2155 Result.reserve(Likelihoods.size());
2156 for (const auto LH : Likelihoods) {
2157 switch (LH) {
2158 case Stmt::LH_Unlikely:
2159 Result.push_back(Unlikely);
2160 break;
2161 case Stmt::LH_None:
2162 Result.push_back(None);
2163 break;
2164 case Stmt::LH_Likely:
2165 Result.push_back(Likely);
2166 break;
2167 }
2168 }
2169
2170 return Result;
2171}
2172
2174 // Handle nested switch statements.
2175 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2176 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2177 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2178 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2179
2180 // See if we can constant fold the condition of the switch and therefore only
2181 // emit the live case statement (if any) of the switch.
2182 llvm::APSInt ConstantCondValue;
2183 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2185 const SwitchCase *Case = nullptr;
2186 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2187 getContext(), Case)) {
2188 if (Case)
2190 RunCleanupsScope ExecutedScope(*this);
2191
2192 if (S.getInit())
2193 EmitStmt(S.getInit());
2194
2195 // Emit the condition variable if needed inside the entire cleanup scope
2196 // used by this special case for constant folded switches.
2197 if (S.getConditionVariable())
2198 EmitDecl(*S.getConditionVariable());
2199
2200 // At this point, we are no longer "within" a switch instance, so
2201 // we can temporarily enforce this to ensure that any embedded case
2202 // statements are not emitted.
2203 SwitchInsn = nullptr;
2204
2205 // Okay, we can dead code eliminate everything except this case. Emit the
2206 // specified series of statements and we're good.
2207 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2208 EmitStmt(CaseStmts[i]);
2210 PGO.markStmtMaybeUsed(S.getBody());
2211
2212 // Now we want to restore the saved switch instance so that nested
2213 // switches continue to function properly
2214 SwitchInsn = SavedSwitchInsn;
2215
2216 return;
2217 }
2218 }
2219
2220 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2221
2222 RunCleanupsScope ConditionScope(*this);
2223
2224 if (S.getInit())
2225 EmitStmt(S.getInit());
2226
2227 if (S.getConditionVariable())
2228 EmitDecl(*S.getConditionVariable());
2229 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2230
2231 // Create basic block to hold stuff that comes after switch
2232 // statement. We also need to create a default block now so that
2233 // explicit case ranges tests can have a place to jump to on
2234 // failure.
2235 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2236 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2237 if (PGO.haveRegionCounts()) {
2238 // Walk the SwitchCase list to find how many there are.
2239 uint64_t DefaultCount = 0;
2240 unsigned NumCases = 0;
2241 for (const SwitchCase *Case = S.getSwitchCaseList();
2242 Case;
2243 Case = Case->getNextSwitchCase()) {
2244 if (isa<DefaultStmt>(Case))
2245 DefaultCount = getProfileCount(Case);
2246 NumCases += 1;
2247 }
2248 SwitchWeights = new SmallVector<uint64_t, 16>();
2249 SwitchWeights->reserve(NumCases);
2250 // The default needs to be first. We store the edge count, so we already
2251 // know the right weight.
2252 SwitchWeights->push_back(DefaultCount);
2253 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2254 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2255 // Initialize the default case.
2256 SwitchLikelihood->push_back(Stmt::LH_None);
2257 }
2258
2259 CaseRangeBlock = DefaultBlock;
2260
2261 // Clear the insertion point to indicate we are in unreachable code.
2262 Builder.ClearInsertionPoint();
2263
2264 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2265 // then reuse last ContinueBlock.
2266 JumpDest OuterContinue;
2267 if (!BreakContinueStack.empty())
2268 OuterContinue = BreakContinueStack.back().ContinueBlock;
2269
2270 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2271
2272 // Emit switch body.
2273 EmitStmt(S.getBody());
2274
2275 BreakContinueStack.pop_back();
2276
2277 // Update the default block in case explicit case range tests have
2278 // been chained on top.
2279 SwitchInsn->setDefaultDest(CaseRangeBlock);
2280
2281 // If a default was never emitted:
2282 if (!DefaultBlock->getParent()) {
2283 // If we have cleanups, emit the default block so that there's a
2284 // place to jump through the cleanups from.
2285 if (ConditionScope.requiresCleanups()) {
2286 EmitBlock(DefaultBlock);
2287
2288 // Otherwise, just forward the default block to the switch end.
2289 } else {
2290 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2291 delete DefaultBlock;
2292 }
2293 }
2294
2295 ConditionScope.ForceCleanup();
2296
2297 // Emit continuation.
2298 EmitBlock(SwitchExit.getBlock(), true);
2300
2301 // If the switch has a condition wrapped by __builtin_unpredictable,
2302 // create metadata that specifies that the switch is unpredictable.
2303 // Don't bother if not optimizing because that metadata would not be used.
2304 auto *Call = dyn_cast<CallExpr>(S.getCond());
2305 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2306 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2307 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2308 llvm::MDBuilder MDHelper(getLLVMContext());
2309 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2310 MDHelper.createUnpredictable());
2311 }
2312 }
2313
2314 if (SwitchWeights) {
2315 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2316 "switch weights do not match switch cases");
2317 // If there's only one jump destination there's no sense weighting it.
2318 if (SwitchWeights->size() > 1)
2319 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2320 createProfileWeights(*SwitchWeights));
2321 delete SwitchWeights;
2322 } else if (SwitchLikelihood) {
2323 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2324 "switch likelihoods do not match switch cases");
2325 std::optional<SmallVector<uint64_t, 16>> LHW =
2326 getLikelihoodWeights(*SwitchLikelihood);
2327 if (LHW) {
2328 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2329 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2330 createProfileWeights(*LHW));
2331 }
2332 delete SwitchLikelihood;
2333 }
2334 SwitchInsn = SavedSwitchInsn;
2335 SwitchWeights = SavedSwitchWeights;
2336 SwitchLikelihood = SavedSwitchLikelihood;
2337 CaseRangeBlock = SavedCRBlock;
2338}
2339
2340static std::string
2341SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2343 std::string Result;
2344
2345 while (*Constraint) {
2346 switch (*Constraint) {
2347 default:
2348 Result += Target.convertConstraint(Constraint);
2349 break;
2350 // Ignore these
2351 case '*':
2352 case '?':
2353 case '!':
2354 case '=': // Will see this and the following in mult-alt constraints.
2355 case '+':
2356 break;
2357 case '#': // Ignore the rest of the constraint alternative.
2358 while (Constraint[1] && Constraint[1] != ',')
2359 Constraint++;
2360 break;
2361 case '&':
2362 case '%':
2363 Result += *Constraint;
2364 while (Constraint[1] && Constraint[1] == *Constraint)
2365 Constraint++;
2366 break;
2367 case ',':
2368 Result += "|";
2369 break;
2370 case 'g':
2371 Result += "imr";
2372 break;
2373 case '[': {
2374 assert(OutCons &&
2375 "Must pass output names to constraints with a symbolic name");
2376 unsigned Index;
2377 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2378 assert(result && "Could not resolve symbolic name"); (void)result;
2379 Result += llvm::utostr(Index);
2380 break;
2381 }
2382 }
2383
2384 Constraint++;
2385 }
2386
2387 return Result;
2388}
2389
2390/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2391/// as using a particular register add that as a constraint that will be used
2392/// in this asm stmt.
2393static std::string
2394AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2396 const AsmStmt &Stmt, const bool EarlyClobber,
2397 std::string *GCCReg = nullptr) {
2398 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2399 if (!AsmDeclRef)
2400 return Constraint;
2401 const ValueDecl &Value = *AsmDeclRef->getDecl();
2402 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2403 if (!Variable)
2404 return Constraint;
2406 return Constraint;
2407 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2408 if (!Attr)
2409 return Constraint;
2410 StringRef Register = Attr->getLabel();
2411 assert(Target.isValidGCCRegisterName(Register));
2412 // We're using validateOutputConstraint here because we only care if
2413 // this is a register constraint.
2414 TargetInfo::ConstraintInfo Info(Constraint, "");
2415 if (Target.validateOutputConstraint(Info) &&
2416 !Info.allowsRegister()) {
2417 CGM.ErrorUnsupported(&Stmt, "__asm__");
2418 return Constraint;
2419 }
2420 // Canonicalize the register here before returning it.
2421 Register = Target.getNormalizedGCCRegisterName(Register);
2422 if (GCCReg != nullptr)
2423 *GCCReg = Register.str();
2424 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2425}
2426
2427std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2428 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2429 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2430 if (Info.allowsRegister() || !Info.allowsMemory()) {
2432 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2433
2434 llvm::Type *Ty = ConvertType(InputType);
2435 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2436 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2437 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2438 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2439
2440 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2441 nullptr};
2442 }
2443 }
2444
2445 Address Addr = InputValue.getAddress();
2446 ConstraintStr += '*';
2447 return {InputValue.getPointer(*this), Addr.getElementType()};
2448}
2449
2450std::pair<llvm::Value *, llvm::Type *>
2451CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2452 const Expr *InputExpr,
2453 std::string &ConstraintStr) {
2454 // If this can't be a register or memory, i.e., has to be a constant
2455 // (immediate or symbolic), try to emit it as such.
2456 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2457 if (Info.requiresImmediateConstant()) {
2458 Expr::EvalResult EVResult;
2459 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2460
2461 llvm::APSInt IntResult;
2462 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2463 getContext()))
2464 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2465 }
2466
2468 if (InputExpr->EvaluateAsInt(Result, getContext()))
2469 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2470 nullptr};
2471 }
2472
2473 if (Info.allowsRegister() || !Info.allowsMemory())
2475 return {EmitScalarExpr(InputExpr), nullptr};
2476 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2477 return {EmitScalarExpr(InputExpr), nullptr};
2478 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2479 LValue Dest = EmitLValue(InputExpr);
2480 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2481 InputExpr->getExprLoc());
2482}
2483
2484/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2485/// asm call instruction. The !srcloc MDNode contains a list of constant
2486/// integers which are the source locations of the start of each line in the
2487/// asm.
2488static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2489 CodeGenFunction &CGF) {
2491 // Add the location of the first line to the MDNode.
2492 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2493 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2494 StringRef StrVal = Str->getString();
2495 if (!StrVal.empty()) {
2497 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2498 unsigned StartToken = 0;
2499 unsigned ByteOffset = 0;
2500
2501 // Add the location of the start of each subsequent line of the asm to the
2502 // MDNode.
2503 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2504 if (StrVal[i] != '\n') continue;
2505 SourceLocation LineLoc = Str->getLocationOfByte(
2506 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2507 Locs.push_back(llvm::ConstantAsMetadata::get(
2508 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2509 }
2510 }
2511
2512 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2513}
2514
2515static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2516 bool HasUnwindClobber, bool ReadOnly,
2517 bool ReadNone, bool NoMerge, bool NoConvergent,
2518 const AsmStmt &S,
2519 const std::vector<llvm::Type *> &ResultRegTypes,
2520 const std::vector<llvm::Type *> &ArgElemTypes,
2521 CodeGenFunction &CGF,
2522 std::vector<llvm::Value *> &RegResults) {
2523 if (!HasUnwindClobber)
2524 Result.addFnAttr(llvm::Attribute::NoUnwind);
2525
2526 if (NoMerge)
2527 Result.addFnAttr(llvm::Attribute::NoMerge);
2528 // Attach readnone and readonly attributes.
2529 if (!HasSideEffect) {
2530 if (ReadNone)
2531 Result.setDoesNotAccessMemory();
2532 else if (ReadOnly)
2533 Result.setOnlyReadsMemory();
2534 }
2535
2536 // Add elementtype attribute for indirect constraints.
2537 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2538 if (Pair.value()) {
2539 auto Attr = llvm::Attribute::get(
2540 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2541 Result.addParamAttr(Pair.index(), Attr);
2542 }
2543 }
2544
2545 // Slap the source location of the inline asm into a !srcloc metadata on the
2546 // call.
2547 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2548 Result.setMetadata("srcloc",
2549 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2550 else {
2551 // At least put the line number on MS inline asm blobs.
2552 llvm::Constant *Loc =
2553 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2554 Result.setMetadata("srcloc",
2555 llvm::MDNode::get(CGF.getLLVMContext(),
2556 llvm::ConstantAsMetadata::get(Loc)));
2557 }
2558
2559 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2560 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2561 // convergent (meaning, they may call an intrinsically convergent op, such
2562 // as bar.sync, and so can't have certain optimizations applied around
2563 // them) unless it's explicitly marked 'noconvergent'.
2564 Result.addFnAttr(llvm::Attribute::Convergent);
2565 // Extract all of the register value results from the asm.
2566 if (ResultRegTypes.size() == 1) {
2567 RegResults.push_back(&Result);
2568 } else {
2569 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2570 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2571 RegResults.push_back(Tmp);
2572 }
2573 }
2574}
2575
2576static void
2578 const llvm::ArrayRef<llvm::Value *> RegResults,
2579 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2580 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2581 const llvm::ArrayRef<LValue> ResultRegDests,
2582 const llvm::ArrayRef<QualType> ResultRegQualTys,
2583 const llvm::BitVector &ResultTypeRequiresCast,
2584 const llvm::BitVector &ResultRegIsFlagReg) {
2586 CodeGenModule &CGM = CGF.CGM;
2587 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2588
2589 assert(RegResults.size() == ResultRegTypes.size());
2590 assert(RegResults.size() == ResultTruncRegTypes.size());
2591 assert(RegResults.size() == ResultRegDests.size());
2592 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2593 // in which case its size may grow.
2594 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2595 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2596
2597 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2598 llvm::Value *Tmp = RegResults[i];
2599 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2600
2601 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2602 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2603 // value.
2604 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2605 llvm::Value *IsBooleanValue =
2606 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2607 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2608 Builder.CreateCall(FnAssume, IsBooleanValue);
2609 }
2610
2611 // If the result type of the LLVM IR asm doesn't match the result type of
2612 // the expression, do the conversion.
2613 if (ResultRegTypes[i] != TruncTy) {
2614
2615 // Truncate the integer result to the right size, note that TruncTy can be
2616 // a pointer.
2617 if (TruncTy->isFloatingPointTy())
2618 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2619 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2620 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2621 Tmp = Builder.CreateTrunc(
2622 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2623 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2624 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2625 uint64_t TmpSize =
2626 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2627 Tmp = Builder.CreatePtrToInt(
2628 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2629 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2630 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2631 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2632 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2633 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2634 }
2635 }
2636
2637 LValue Dest = ResultRegDests[i];
2638 // ResultTypeRequiresCast elements correspond to the first
2639 // ResultTypeRequiresCast.size() elements of RegResults.
2640 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2641 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2642 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2643 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2644 Builder.CreateStore(Tmp, A);
2645 continue;
2646 }
2647
2648 QualType Ty =
2649 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2650 if (Ty.isNull()) {
2651 const Expr *OutExpr = S.getOutputExpr(i);
2652 CGM.getDiags().Report(OutExpr->getExprLoc(),
2653 diag::err_store_value_to_reg);
2654 return;
2655 }
2656 Dest = CGF.MakeAddrLValue(A, Ty);
2657 }
2658 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2659 }
2660}
2661
2663 const AsmStmt &S) {
2664 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2665
2666 StringRef Asm;
2667 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2668 Asm = GCCAsm->getAsmString()->getString();
2669
2670 auto &Ctx = CGF->CGM.getLLVMContext();
2671
2672 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2673 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2674 {StrTy->getType()}, false);
2675 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2676
2677 CGF->Builder.CreateCall(UBF, {StrTy});
2678}
2679
2681 // Pop all cleanup blocks at the end of the asm statement.
2682 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2683
2684 // Assemble the final asm string.
2685 std::string AsmString = S.generateAsmString(getContext());
2686
2687 // Get all the output and input constraints together.
2688 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2689 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2690
2691 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2692 bool IsValidTargetAsm = true;
2693 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2694 StringRef Name;
2695 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2696 Name = GAS->getOutputName(i);
2697 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2698 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2699 if (IsHipStdPar && !IsValid)
2700 IsValidTargetAsm = false;
2701 else
2702 assert(IsValid && "Failed to parse output constraint");
2703 OutputConstraintInfos.push_back(Info);
2704 }
2705
2706 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2707 StringRef Name;
2708 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2709 Name = GAS->getInputName(i);
2710 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2711 bool IsValid =
2712 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2713 if (IsHipStdPar && !IsValid)
2714 IsValidTargetAsm = false;
2715 else
2716 assert(IsValid && "Failed to parse input constraint");
2717 InputConstraintInfos.push_back(Info);
2718 }
2719
2720 if (!IsValidTargetAsm)
2721 return EmitHipStdParUnsupportedAsm(this, S);
2722
2723 std::string Constraints;
2724
2725 std::vector<LValue> ResultRegDests;
2726 std::vector<QualType> ResultRegQualTys;
2727 std::vector<llvm::Type *> ResultRegTypes;
2728 std::vector<llvm::Type *> ResultTruncRegTypes;
2729 std::vector<llvm::Type *> ArgTypes;
2730 std::vector<llvm::Type *> ArgElemTypes;
2731 std::vector<llvm::Value*> Args;
2732 llvm::BitVector ResultTypeRequiresCast;
2733 llvm::BitVector ResultRegIsFlagReg;
2734
2735 // Keep track of inout constraints.
2736 std::string InOutConstraints;
2737 std::vector<llvm::Value*> InOutArgs;
2738 std::vector<llvm::Type*> InOutArgTypes;
2739 std::vector<llvm::Type*> InOutArgElemTypes;
2740
2741 // Keep track of out constraints for tied input operand.
2742 std::vector<std::string> OutputConstraints;
2743
2744 // Keep track of defined physregs.
2745 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2746
2747 // An inline asm can be marked readonly if it meets the following conditions:
2748 // - it doesn't have any sideeffects
2749 // - it doesn't clobber memory
2750 // - it doesn't return a value by-reference
2751 // It can be marked readnone if it doesn't have any input memory constraints
2752 // in addition to meeting the conditions listed above.
2753 bool ReadOnly = true, ReadNone = true;
2754
2755 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2756 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2757
2758 // Simplify the output constraint.
2759 std::string OutputConstraint(S.getOutputConstraint(i));
2760 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2761 getTarget(), &OutputConstraintInfos);
2762
2763 const Expr *OutExpr = S.getOutputExpr(i);
2764 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2765
2766 std::string GCCReg;
2767 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2768 getTarget(), CGM, S,
2769 Info.earlyClobber(),
2770 &GCCReg);
2771 // Give an error on multiple outputs to same physreg.
2772 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2773 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2774
2775 OutputConstraints.push_back(OutputConstraint);
2776 LValue Dest = EmitLValue(OutExpr);
2777 if (!Constraints.empty())
2778 Constraints += ',';
2779
2780 // If this is a register output, then make the inline asm return it
2781 // by-value. If this is a memory result, return the value by-reference.
2782 QualType QTy = OutExpr->getType();
2783 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2785 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2786
2787 Constraints += "=" + OutputConstraint;
2788 ResultRegQualTys.push_back(QTy);
2789 ResultRegDests.push_back(Dest);
2790
2791 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2792 ResultRegIsFlagReg.push_back(IsFlagReg);
2793
2794 llvm::Type *Ty = ConvertTypeForMem(QTy);
2795 const bool RequiresCast = Info.allowsRegister() &&
2797 Ty->isAggregateType());
2798
2799 ResultTruncRegTypes.push_back(Ty);
2800 ResultTypeRequiresCast.push_back(RequiresCast);
2801
2802 if (RequiresCast) {
2803 unsigned Size = getContext().getTypeSize(QTy);
2804 if (Size)
2805 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2806 else
2807 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2808 }
2809 ResultRegTypes.push_back(Ty);
2810 // If this output is tied to an input, and if the input is larger, then
2811 // we need to set the actual result type of the inline asm node to be the
2812 // same as the input type.
2813 if (Info.hasMatchingInput()) {
2814 unsigned InputNo;
2815 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2816 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2817 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2818 break;
2819 }
2820 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2821
2822 QualType InputTy = S.getInputExpr(InputNo)->getType();
2823 QualType OutputType = OutExpr->getType();
2824
2825 uint64_t InputSize = getContext().getTypeSize(InputTy);
2826 if (getContext().getTypeSize(OutputType) < InputSize) {
2827 // Form the asm to return the value as a larger integer or fp type.
2828 ResultRegTypes.back() = ConvertType(InputTy);
2829 }
2830 }
2831 if (llvm::Type* AdjTy =
2832 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2833 ResultRegTypes.back()))
2834 ResultRegTypes.back() = AdjTy;
2835 else {
2836 CGM.getDiags().Report(S.getAsmLoc(),
2837 diag::err_asm_invalid_type_in_input)
2838 << OutExpr->getType() << OutputConstraint;
2839 }
2840
2841 // Update largest vector width for any vector types.
2842 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2843 LargestVectorWidth =
2844 std::max((uint64_t)LargestVectorWidth,
2845 VT->getPrimitiveSizeInBits().getKnownMinValue());
2846 } else {
2847 Address DestAddr = Dest.getAddress();
2848 // Matrix types in memory are represented by arrays, but accessed through
2849 // vector pointers, with the alignment specified on the access operation.
2850 // For inline assembly, update pointer arguments to use vector pointers.
2851 // Otherwise there will be a mis-match if the matrix is also an
2852 // input-argument which is represented as vector.
2853 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2854 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2855
2856 ArgTypes.push_back(DestAddr.getType());
2857 ArgElemTypes.push_back(DestAddr.getElementType());
2858 Args.push_back(DestAddr.emitRawPointer(*this));
2859 Constraints += "=*";
2860 Constraints += OutputConstraint;
2861 ReadOnly = ReadNone = false;
2862 }
2863
2864 if (Info.isReadWrite()) {
2865 InOutConstraints += ',';
2866
2867 const Expr *InputExpr = S.getOutputExpr(i);
2868 llvm::Value *Arg;
2869 llvm::Type *ArgElemType;
2870 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2871 Info, Dest, InputExpr->getType(), InOutConstraints,
2872 InputExpr->getExprLoc());
2873
2874 if (llvm::Type* AdjTy =
2875 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2876 Arg->getType()))
2877 Arg = Builder.CreateBitCast(Arg, AdjTy);
2878
2879 // Update largest vector width for any vector types.
2880 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2881 LargestVectorWidth =
2882 std::max((uint64_t)LargestVectorWidth,
2883 VT->getPrimitiveSizeInBits().getKnownMinValue());
2884 // Only tie earlyclobber physregs.
2885 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2886 InOutConstraints += llvm::utostr(i);
2887 else
2888 InOutConstraints += OutputConstraint;
2889
2890 InOutArgTypes.push_back(Arg->getType());
2891 InOutArgElemTypes.push_back(ArgElemType);
2892 InOutArgs.push_back(Arg);
2893 }
2894 }
2895
2896 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2897 // to the return value slot. Only do this when returning in registers.
2898 if (isa<MSAsmStmt>(&S)) {
2899 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2900 if (RetAI.isDirect() || RetAI.isExtend()) {
2901 // Make a fake lvalue for the return value slot.
2904 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2905 ResultRegDests, AsmString, S.getNumOutputs());
2906 SawAsmBlock = true;
2907 }
2908 }
2909
2910 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2911 const Expr *InputExpr = S.getInputExpr(i);
2912
2913 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2914
2915 if (Info.allowsMemory())
2916 ReadNone = false;
2917
2918 if (!Constraints.empty())
2919 Constraints += ',';
2920
2921 // Simplify the input constraint.
2922 std::string InputConstraint(S.getInputConstraint(i));
2923 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2924 &OutputConstraintInfos);
2925
2926 InputConstraint = AddVariableConstraints(
2927 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2928 getTarget(), CGM, S, false /* No EarlyClobber */);
2929
2930 std::string ReplaceConstraint (InputConstraint);
2931 llvm::Value *Arg;
2932 llvm::Type *ArgElemType;
2933 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2934
2935 // If this input argument is tied to a larger output result, extend the
2936 // input to be the same size as the output. The LLVM backend wants to see
2937 // the input and output of a matching constraint be the same size. Note
2938 // that GCC does not define what the top bits are here. We use zext because
2939 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2940 if (Info.hasTiedOperand()) {
2941 unsigned Output = Info.getTiedOperand();
2942 QualType OutputType = S.getOutputExpr(Output)->getType();
2943 QualType InputTy = InputExpr->getType();
2944
2945 if (getContext().getTypeSize(OutputType) >
2946 getContext().getTypeSize(InputTy)) {
2947 // Use ptrtoint as appropriate so that we can do our extension.
2948 if (isa<llvm::PointerType>(Arg->getType()))
2949 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2950 llvm::Type *OutputTy = ConvertType(OutputType);
2951 if (isa<llvm::IntegerType>(OutputTy))
2952 Arg = Builder.CreateZExt(Arg, OutputTy);
2953 else if (isa<llvm::PointerType>(OutputTy))
2954 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2955 else if (OutputTy->isFloatingPointTy())
2956 Arg = Builder.CreateFPExt(Arg, OutputTy);
2957 }
2958 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2959 ReplaceConstraint = OutputConstraints[Output];
2960 }
2961 if (llvm::Type* AdjTy =
2962 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2963 Arg->getType()))
2964 Arg = Builder.CreateBitCast(Arg, AdjTy);
2965 else
2966 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2967 << InputExpr->getType() << InputConstraint;
2968
2969 // Update largest vector width for any vector types.
2970 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2971 LargestVectorWidth =
2972 std::max((uint64_t)LargestVectorWidth,
2973 VT->getPrimitiveSizeInBits().getKnownMinValue());
2974
2975 ArgTypes.push_back(Arg->getType());
2976 ArgElemTypes.push_back(ArgElemType);
2977 Args.push_back(Arg);
2978 Constraints += InputConstraint;
2979 }
2980
2981 // Append the "input" part of inout constraints.
2982 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2983 ArgTypes.push_back(InOutArgTypes[i]);
2984 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2985 Args.push_back(InOutArgs[i]);
2986 }
2987 Constraints += InOutConstraints;
2988
2989 // Labels
2991 llvm::BasicBlock *Fallthrough = nullptr;
2992 bool IsGCCAsmGoto = false;
2993 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2994 IsGCCAsmGoto = GS->isAsmGoto();
2995 if (IsGCCAsmGoto) {
2996 for (const auto *E : GS->labels()) {
2997 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2998 Transfer.push_back(Dest.getBlock());
2999 if (!Constraints.empty())
3000 Constraints += ',';
3001 Constraints += "!i";
3002 }
3003 Fallthrough = createBasicBlock("asm.fallthrough");
3004 }
3005 }
3006
3007 bool HasUnwindClobber = false;
3008
3009 // Clobbers
3010 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3011 StringRef Clobber = S.getClobber(i);
3012
3013 if (Clobber == "memory")
3014 ReadOnly = ReadNone = false;
3015 else if (Clobber == "unwind") {
3016 HasUnwindClobber = true;
3017 continue;
3018 } else if (Clobber != "cc") {
3019 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3020 if (CGM.getCodeGenOpts().StackClashProtector &&
3021 getTarget().isSPRegName(Clobber)) {
3022 CGM.getDiags().Report(S.getAsmLoc(),
3023 diag::warn_stack_clash_protection_inline_asm);
3024 }
3025 }
3026
3027 if (isa<MSAsmStmt>(&S)) {
3028 if (Clobber == "eax" || Clobber == "edx") {
3029 if (Constraints.find("=&A") != std::string::npos)
3030 continue;
3031 std::string::size_type position1 =
3032 Constraints.find("={" + Clobber.str() + "}");
3033 if (position1 != std::string::npos) {
3034 Constraints.insert(position1 + 1, "&");
3035 continue;
3036 }
3037 std::string::size_type position2 = Constraints.find("=A");
3038 if (position2 != std::string::npos) {
3039 Constraints.insert(position2 + 1, "&");
3040 continue;
3041 }
3042 }
3043 }
3044 if (!Constraints.empty())
3045 Constraints += ',';
3046
3047 Constraints += "~{";
3048 Constraints += Clobber;
3049 Constraints += '}';
3050 }
3051
3052 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3053 "unwind clobber can't be used with asm goto");
3054
3055 // Add machine specific clobbers
3056 std::string_view MachineClobbers = getTarget().getClobbers();
3057 if (!MachineClobbers.empty()) {
3058 if (!Constraints.empty())
3059 Constraints += ',';
3060 Constraints += MachineClobbers;
3061 }
3062
3063 llvm::Type *ResultType;
3064 if (ResultRegTypes.empty())
3065 ResultType = VoidTy;
3066 else if (ResultRegTypes.size() == 1)
3067 ResultType = ResultRegTypes[0];
3068 else
3069 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3070
3071 llvm::FunctionType *FTy =
3072 llvm::FunctionType::get(ResultType, ArgTypes, false);
3073
3074 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3075
3076 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3077 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3078 ? llvm::InlineAsm::AD_ATT
3079 : llvm::InlineAsm::AD_Intel;
3080 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3081 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3082
3083 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3084 FTy, AsmString, Constraints, HasSideEffect,
3085 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3086 std::vector<llvm::Value*> RegResults;
3087 llvm::CallBrInst *CBR;
3088 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3089 CBRRegResults;
3090 if (IsGCCAsmGoto) {
3091 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3092 EmitBlock(Fallthrough);
3093 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3094 ReadNone, InNoMergeAttributedStmt,
3095 InNoConvergentAttributedStmt, S, ResultRegTypes,
3096 ArgElemTypes, *this, RegResults);
3097 // Because we are emitting code top to bottom, we don't have enough
3098 // information at this point to know precisely whether we have a critical
3099 // edge. If we have outputs, split all indirect destinations.
3100 if (!RegResults.empty()) {
3101 unsigned i = 0;
3102 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3103 llvm::Twine SynthName = Dest->getName() + ".split";
3104 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3105 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3106 Builder.SetInsertPoint(SynthBB);
3107
3108 if (ResultRegTypes.size() == 1) {
3109 CBRRegResults[SynthBB].push_back(CBR);
3110 } else {
3111 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3112 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3113 CBRRegResults[SynthBB].push_back(Tmp);
3114 }
3115 }
3116
3117 EmitBranch(Dest);
3118 EmitBlock(SynthBB);
3119 CBR->setIndirectDest(i++, SynthBB);
3120 }
3121 }
3122 } else if (HasUnwindClobber) {
3123 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3124 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3125 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3126 InNoConvergentAttributedStmt, S, ResultRegTypes,
3127 ArgElemTypes, *this, RegResults);
3128 } else {
3129 llvm::CallInst *Result =
3130 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3131 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3132 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3133 InNoConvergentAttributedStmt, S, ResultRegTypes,
3134 ArgElemTypes, *this, RegResults);
3135 }
3136
3137 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3138 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3139 ResultRegIsFlagReg);
3140
3141 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3142 // different insertion point; one for each indirect destination and with
3143 // CBRRegResults rather than RegResults.
3144 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3145 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3146 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3147 Builder.SetInsertPoint(Succ, --(Succ->end()));
3148 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3149 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3150 ResultTypeRequiresCast, ResultRegIsFlagReg);
3151 }
3152 }
3153}
3154
3156 const RecordDecl *RD = S.getCapturedRecordDecl();
3157 QualType RecordTy = getContext().getRecordType(RD);
3158
3159 // Initialize the captured struct.
3160 LValue SlotLV =
3161 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3162
3163 RecordDecl::field_iterator CurField = RD->field_begin();
3164 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3165 E = S.capture_init_end();
3166 I != E; ++I, ++CurField) {
3167 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3168 if (CurField->hasCapturedVLAType()) {
3169 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3170 } else {
3171 EmitInitializerForField(*CurField, LV, *I);
3172 }
3173 }
3174
3175 return SlotLV;
3176}
3177
3178/// Generate an outlined function for the body of a CapturedStmt, store any
3179/// captured variables into the captured struct, and call the outlined function.
3180llvm::Function *
3182 LValue CapStruct = InitCapturedStruct(S);
3183
3184 // Emit the CapturedDecl
3185 CodeGenFunction CGF(CGM, true);
3186 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3187 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3188 delete CGF.CapturedStmtInfo;
3189
3190 // Emit call to the helper function.
3191 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3192
3193 return F;
3194}
3195
3197 LValue CapStruct = InitCapturedStruct(S);
3198 return CapStruct.getAddress();
3199}
3200
3201/// Creates the outlined function for a CapturedStmt.
3202llvm::Function *
3204 assert(CapturedStmtInfo &&
3205 "CapturedStmtInfo should be set when generating the captured function");
3206 const CapturedDecl *CD = S.getCapturedDecl();
3207 const RecordDecl *RD = S.getCapturedRecordDecl();
3208 SourceLocation Loc = S.getBeginLoc();
3209 assert(CD->hasBody() && "missing CapturedDecl body");
3210
3211 // Build the argument list.
3212 ASTContext &Ctx = CGM.getContext();
3213 FunctionArgList Args;
3214 Args.append(CD->param_begin(), CD->param_end());
3215
3216 // Create the function declaration.
3217 const CGFunctionInfo &FuncInfo =
3219 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3220
3221 llvm::Function *F =
3222 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3224 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3225 if (CD->isNothrow())
3226 F->addFnAttr(llvm::Attribute::NoUnwind);
3227
3228 // Generate the function.
3229 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3230 CD->getBody()->getBeginLoc());
3231 // Set the context parameter in CapturedStmtInfo.
3232 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3234
3235 // Initialize variable-length arrays.
3238 for (auto *FD : RD->fields()) {
3239 if (FD->hasCapturedVLAType()) {
3240 auto *ExprArg =
3241 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3242 .getScalarVal();
3243 auto VAT = FD->getCapturedVLAType();
3244 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3245 }
3246 }
3247
3248 // If 'this' is captured, load it into CXXThisValue.
3251 LValue ThisLValue = EmitLValueForField(Base, FD);
3252 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3253 }
3254
3255 PGO.assignRegionCounters(GlobalDecl(CD), F);
3256 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3258
3259 return F;
3260}
3261
3262// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3263// std::nullptr otherwise.
3264static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3265 for (auto &I : *BB) {
3266 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3267 return CI;
3268 }
3269 return nullptr;
3270}
3271
3272llvm::CallBase *
3273CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3274 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3275 assert(ParentToken);
3276
3277 llvm::Value *bundleArgs[] = {ParentToken};
3278 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3279 auto *Output = llvm::CallBase::addOperandBundle(
3280 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3281 Input->replaceAllUsesWith(Output);
3282 Input->eraseFromParent();
3283 return Output;
3284}
3285
3286llvm::ConvergenceControlInst *
3287CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3288 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3289 if (BB->empty())
3290 Builder.SetInsertPoint(BB);
3291 else
3292 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3293
3294 llvm::CallBase *CB = Builder.CreateIntrinsic(
3295 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3296 Builder.restoreIP(IP);
3297
3298 CB = addConvergenceControlToken(CB);
3299 return cast<llvm::ConvergenceControlInst>(CB);
3300}
3301
3302llvm::ConvergenceControlInst *
3303CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3304 llvm::BasicBlock *BB = &F->getEntryBlock();
3305 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3306 if (Token)
3307 return Token;
3308
3309 // Adding a convergence token requires the function to be marked as
3310 // convergent.
3311 F->setConvergent();
3312
3313 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3314 Builder.SetInsertPoint(&BB->front());
3315 llvm::CallBase *I = Builder.CreateIntrinsic(
3316 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3317 assert(isa<llvm::IntrinsicInst>(I));
3318 Builder.restoreIP(IP);
3319
3320 return cast<llvm::ConvergenceControlInst>(I);
3321}
#define V(N, I)
Definition: ASTContext.h:3453
#define SM(sm)
Definition: Cuda.cpp:84
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2394
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2064
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition: CGStmt.cpp:3264
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2662
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2118
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2488
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2341
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1489
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1909
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2577
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:1022
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1908
@ CSFC_Failure
Definition: CGStmt.cpp:1908
@ CSFC_Success
Definition: CGStmt.cpp:1908
@ CSFC_FallThrough
Definition: CGStmt.cpp:1908
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2515
const Decl * D
Expr * E
llvm::MachO::Target Target
Definition: MachO.h:51
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:953
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2482
CanQualType VoidTy
Definition: ASTContext.h:1160
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3127
Attr - This represents one attribute.
Definition: Attr.h:43
Represents an attribute applied to a statement.
Definition: Stmt.h:2107
BreakStmt - This represents a break.
Definition: Stmt.h:3007
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
Expr * getCallee()
Definition: Expr.h:3024
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4687
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4749
bool isNothrow() const
Definition: Decl.cpp:5468
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4766
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4764
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5465
This captures a statement into a function.
Definition: Stmt.h:3784
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3948
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1429
CaseStmt - Represent a case statement.
Definition: Stmt.h:1828
Stmt * getSubStmt()
Definition: Stmt.h:1945
Expr * getLHS()
Definition: Stmt.h:1915
Expr * getRHS()
Definition: Stmt.h:1927
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:858
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:915
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:164
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:726
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
void EmitOMPScopeDirective(const OMPScopeDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void markStmtMaybeUsed(const Stmt *S)
Definition: CodeGenPGO.h:130
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1630
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:834
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:204
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:198
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1628
Stmt *const * const_body_iterator
Definition: Stmt.h:1700
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
ContinueStmt - This represents a continue.
Definition: Stmt.h:2977
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2369
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
ValueDecl * getDecl()
Definition: Expr.h:1333
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1519
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1064
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1082
SourceLocation getLocation() const
Definition: DeclBase.h:442
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1493
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2752
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3124
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3093
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3594
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:276
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3033
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2808
const Expr * getSubExpr() const
Definition: Expr.h:1057
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4321
CallingConv getCallConv() const
Definition: Type.h:4659
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3286
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2889
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2165
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2928
Represents the declaration of a label.
Definition: Decl.h:503
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2058
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:697
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
QualType getCanonicalType() const
Definition: Type.h:7988
The collection of all-type qualifiers we support.
Definition: Type.h:324
Represents a struct/union/class.
Definition: Decl.h:4162
field_range fields() const
Definition: Decl.h:4368
field_iterator field_begin() const
Definition: Decl.cpp:5094
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3046
Expr * getRetValue()
Definition: Stmt.h:3077
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1380
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1323
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1324
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1325
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1327
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:170
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:345
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:162
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1959
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1332
StringRef getString() const
Definition: Expr.h:1855
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1801
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2415
Exposes information about the current target.
Definition: TargetInfo.h:220
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:839
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:701
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:742
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:8515
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8805
bool isReferenceType() const
Definition: Type.h:8209
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
Represents a variable declaration or definition.
Definition: Decl.h:882
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1119
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2611
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:654
bool Ret(InterpState &S, CodePtr &PC)
Definition: Interp.h:318
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:294
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1131
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1138