clang 20.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/Stmt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/IR/Assumptions.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/MDBuilder.h"
36#include "llvm/Support/SaveAndRestore.h"
37#include <optional>
38
39using namespace clang;
40using namespace CodeGen;
41
42//===----------------------------------------------------------------------===//
43// Statement Emission
44//===----------------------------------------------------------------------===//
45
46namespace llvm {
47extern cl::opt<bool> EnableSingleByteCoverage;
48} // namespace llvm
49
50void CodeGenFunction::EmitStopPoint(const Stmt *S) {
51 if (CGDebugInfo *DI = getDebugInfo()) {
53 Loc = S->getBeginLoc();
54 DI->EmitLocation(Builder, Loc);
55
56 LastStopPoint = Loc;
57 }
58}
59
61 assert(S && "Null statement?");
62 PGO.setCurrentStmt(S);
63
64 // These statements have their own debug info handling.
65 if (EmitSimpleStmt(S, Attrs))
66 return;
67
68 // Check if we are generating unreachable code.
69 if (!HaveInsertPoint()) {
70 // If so, and the statement doesn't contain a label, then we do not need to
71 // generate actual code. This is safe because (1) the current point is
72 // unreachable, so we don't need to execute the code, and (2) we've already
73 // handled the statements which update internal data structures (like the
74 // local variable map) which could be used by subsequent statements.
75 if (!ContainsLabel(S)) {
76 // Verify that any decl statements were handled as simple, they may be in
77 // scope of subsequent reachable statements.
78 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79 return;
80 }
81
82 // Otherwise, make a new block to hold the code.
84 }
85
86 // Generate a stoppoint if we are emitting debug info.
88
89 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90 // enabled.
91 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94 return;
95 }
96 }
97
98 switch (S->getStmtClass()) {
100 case Stmt::CXXCatchStmtClass:
101 case Stmt::SEHExceptStmtClass:
102 case Stmt::SEHFinallyStmtClass:
103 case Stmt::MSDependentExistsStmtClass:
104 llvm_unreachable("invalid statement class to emit generically");
105 case Stmt::NullStmtClass:
106 case Stmt::CompoundStmtClass:
107 case Stmt::DeclStmtClass:
108 case Stmt::LabelStmtClass:
109 case Stmt::AttributedStmtClass:
110 case Stmt::GotoStmtClass:
111 case Stmt::BreakStmtClass:
112 case Stmt::ContinueStmtClass:
113 case Stmt::DefaultStmtClass:
114 case Stmt::CaseStmtClass:
115 case Stmt::SEHLeaveStmtClass:
116 llvm_unreachable("should have emitted these statements as simple");
117
118#define STMT(Type, Base)
119#define ABSTRACT_STMT(Op)
120#define EXPR(Type, Base) \
121 case Stmt::Type##Class:
122#include "clang/AST/StmtNodes.inc"
123 {
124 // Remember the block we came in on.
125 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126 assert(incoming && "expression emission must have an insertion point");
127
128 EmitIgnoredExpr(cast<Expr>(S));
129
130 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131 assert(outgoing && "expression emission cleared block!");
132
133 // The expression emitters assume (reasonably!) that the insertion
134 // point is always set. To maintain that, the call-emission code
135 // for noreturn functions has to enter a new block with no
136 // predecessors. We want to kill that block and mark the current
137 // insertion point unreachable in the common case of a call like
138 // "exit();". Since expression emission doesn't otherwise create
139 // blocks with no predecessors, we can just test for that.
140 // However, we must be careful not to do this to our incoming
141 // block, because *statement* emission does sometimes create
142 // reachable blocks which will have no predecessors until later in
143 // the function. This occurs with, e.g., labels that are not
144 // reachable by fallthrough.
145 if (incoming != outgoing && outgoing->use_empty()) {
146 outgoing->eraseFromParent();
147 Builder.ClearInsertionPoint();
148 }
149 break;
150 }
151
152 case Stmt::IndirectGotoStmtClass:
153 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154
155 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159
160 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161
162 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165 case Stmt::CoroutineBodyStmtClass:
166 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167 break;
168 case Stmt::CoreturnStmtClass:
169 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170 break;
171 case Stmt::CapturedStmtClass: {
172 const CapturedStmt *CS = cast<CapturedStmt>(S);
174 }
175 break;
176 case Stmt::ObjCAtTryStmtClass:
177 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178 break;
179 case Stmt::ObjCAtCatchStmtClass:
180 llvm_unreachable(
181 "@catch statements should be handled by EmitObjCAtTryStmt");
182 case Stmt::ObjCAtFinallyStmtClass:
183 llvm_unreachable(
184 "@finally statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtThrowStmtClass:
186 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187 break;
188 case Stmt::ObjCAtSynchronizedStmtClass:
189 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190 break;
191 case Stmt::ObjCForCollectionStmtClass:
192 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193 break;
194 case Stmt::ObjCAutoreleasePoolStmtClass:
195 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196 break;
197
198 case Stmt::CXXTryStmtClass:
199 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200 break;
201 case Stmt::CXXForRangeStmtClass:
202 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203 break;
204 case Stmt::SEHTryStmtClass:
205 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206 break;
207 case Stmt::OMPMetaDirectiveClass:
208 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209 break;
210 case Stmt::OMPCanonicalLoopClass:
211 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212 break;
213 case Stmt::OMPParallelDirectiveClass:
214 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215 break;
216 case Stmt::OMPSimdDirectiveClass:
217 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218 break;
219 case Stmt::OMPTileDirectiveClass:
220 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221 break;
222 case Stmt::OMPUnrollDirectiveClass:
223 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224 break;
225 case Stmt::OMPReverseDirectiveClass:
226 EmitOMPReverseDirective(cast<OMPReverseDirective>(*S));
227 break;
228 case Stmt::OMPInterchangeDirectiveClass:
229 EmitOMPInterchangeDirective(cast<OMPInterchangeDirective>(*S));
230 break;
231 case Stmt::OMPForDirectiveClass:
232 EmitOMPForDirective(cast<OMPForDirective>(*S));
233 break;
234 case Stmt::OMPForSimdDirectiveClass:
235 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
236 break;
237 case Stmt::OMPSectionsDirectiveClass:
238 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
239 break;
240 case Stmt::OMPSectionDirectiveClass:
241 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
242 break;
243 case Stmt::OMPSingleDirectiveClass:
244 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
245 break;
246 case Stmt::OMPMasterDirectiveClass:
247 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
248 break;
249 case Stmt::OMPCriticalDirectiveClass:
250 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
251 break;
252 case Stmt::OMPParallelForDirectiveClass:
253 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
254 break;
255 case Stmt::OMPParallelForSimdDirectiveClass:
256 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
257 break;
258 case Stmt::OMPParallelMasterDirectiveClass:
259 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
260 break;
261 case Stmt::OMPParallelSectionsDirectiveClass:
262 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
263 break;
264 case Stmt::OMPTaskDirectiveClass:
265 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
266 break;
267 case Stmt::OMPTaskyieldDirectiveClass:
268 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
269 break;
270 case Stmt::OMPErrorDirectiveClass:
271 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
272 break;
273 case Stmt::OMPBarrierDirectiveClass:
274 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
275 break;
276 case Stmt::OMPTaskwaitDirectiveClass:
277 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
278 break;
279 case Stmt::OMPTaskgroupDirectiveClass:
280 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
281 break;
282 case Stmt::OMPFlushDirectiveClass:
283 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
284 break;
285 case Stmt::OMPDepobjDirectiveClass:
286 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
287 break;
288 case Stmt::OMPScanDirectiveClass:
289 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
290 break;
291 case Stmt::OMPOrderedDirectiveClass:
292 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
293 break;
294 case Stmt::OMPAtomicDirectiveClass:
295 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
296 break;
297 case Stmt::OMPTargetDirectiveClass:
298 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
299 break;
300 case Stmt::OMPTeamsDirectiveClass:
301 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
302 break;
303 case Stmt::OMPCancellationPointDirectiveClass:
304 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
305 break;
306 case Stmt::OMPCancelDirectiveClass:
307 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
308 break;
309 case Stmt::OMPTargetDataDirectiveClass:
310 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
311 break;
312 case Stmt::OMPTargetEnterDataDirectiveClass:
313 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
314 break;
315 case Stmt::OMPTargetExitDataDirectiveClass:
316 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
317 break;
318 case Stmt::OMPTargetParallelDirectiveClass:
319 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
320 break;
321 case Stmt::OMPTargetParallelForDirectiveClass:
322 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
323 break;
324 case Stmt::OMPTaskLoopDirectiveClass:
325 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
326 break;
327 case Stmt::OMPTaskLoopSimdDirectiveClass:
328 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
329 break;
330 case Stmt::OMPMasterTaskLoopDirectiveClass:
331 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
332 break;
333 case Stmt::OMPMaskedTaskLoopDirectiveClass:
334 llvm_unreachable("masked taskloop directive not supported yet.");
335 break;
336 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
338 cast<OMPMasterTaskLoopSimdDirective>(*S));
339 break;
340 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
341 llvm_unreachable("masked taskloop simd directive not supported yet.");
342 break;
343 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
345 cast<OMPParallelMasterTaskLoopDirective>(*S));
346 break;
347 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
348 llvm_unreachable("parallel masked taskloop directive not supported yet.");
349 break;
350 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
352 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
355 llvm_unreachable(
356 "parallel masked taskloop simd directive not supported yet.");
357 break;
358 case Stmt::OMPDistributeDirectiveClass:
359 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
360 break;
361 case Stmt::OMPTargetUpdateDirectiveClass:
362 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
363 break;
364 case Stmt::OMPDistributeParallelForDirectiveClass:
366 cast<OMPDistributeParallelForDirective>(*S));
367 break;
368 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
370 cast<OMPDistributeParallelForSimdDirective>(*S));
371 break;
372 case Stmt::OMPDistributeSimdDirectiveClass:
373 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
374 break;
375 case Stmt::OMPTargetParallelForSimdDirectiveClass:
377 cast<OMPTargetParallelForSimdDirective>(*S));
378 break;
379 case Stmt::OMPTargetSimdDirectiveClass:
380 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
381 break;
382 case Stmt::OMPTeamsDistributeDirectiveClass:
383 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
384 break;
385 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
387 cast<OMPTeamsDistributeSimdDirective>(*S));
388 break;
389 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
391 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
392 break;
393 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
395 cast<OMPTeamsDistributeParallelForDirective>(*S));
396 break;
397 case Stmt::OMPTargetTeamsDirectiveClass:
398 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
399 break;
400 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
402 cast<OMPTargetTeamsDistributeDirective>(*S));
403 break;
404 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
406 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
407 break;
408 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
410 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
411 break;
412 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
414 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
415 break;
416 case Stmt::OMPInteropDirectiveClass:
417 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
418 break;
419 case Stmt::OMPDispatchDirectiveClass:
420 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
421 break;
422 case Stmt::OMPScopeDirectiveClass:
423 EmitOMPScopeDirective(cast<OMPScopeDirective>(*S));
424 break;
425 case Stmt::OMPMaskedDirectiveClass:
426 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
427 break;
428 case Stmt::OMPGenericLoopDirectiveClass:
429 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
430 break;
431 case Stmt::OMPTeamsGenericLoopDirectiveClass:
432 EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
433 break;
434 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
436 cast<OMPTargetTeamsGenericLoopDirective>(*S));
437 break;
438 case Stmt::OMPParallelGenericLoopDirectiveClass:
440 cast<OMPParallelGenericLoopDirective>(*S));
441 break;
442 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
444 cast<OMPTargetParallelGenericLoopDirective>(*S));
445 break;
446 case Stmt::OMPParallelMaskedDirectiveClass:
447 EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
448 break;
449 case Stmt::OMPAssumeDirectiveClass:
450 EmitOMPAssumeDirective(cast<OMPAssumeDirective>(*S));
451 break;
452 case Stmt::OpenACCComputeConstructClass:
453 EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
454 break;
455 case Stmt::OpenACCLoopConstructClass:
456 EmitOpenACCLoopConstruct(cast<OpenACCLoopConstruct>(*S));
457 break;
458 case Stmt::OpenACCCombinedConstructClass:
459 EmitOpenACCCombinedConstruct(cast<OpenACCCombinedConstruct>(*S));
460 break;
461 case Stmt::OpenACCDataConstructClass:
462 EmitOpenACCDataConstruct(cast<OpenACCDataConstruct>(*S));
463 break;
464 case Stmt::OpenACCEnterDataConstructClass:
465 EmitOpenACCEnterDataConstruct(cast<OpenACCEnterDataConstruct>(*S));
466 break;
467 case Stmt::OpenACCExitDataConstructClass:
468 EmitOpenACCExitDataConstruct(cast<OpenACCExitDataConstruct>(*S));
469 break;
470 case Stmt::OpenACCHostDataConstructClass:
471 EmitOpenACCHostDataConstruct(cast<OpenACCHostDataConstruct>(*S));
472 break;
473 case Stmt::OpenACCWaitConstructClass:
474 EmitOpenACCWaitConstruct(cast<OpenACCWaitConstruct>(*S));
475 break;
476 case Stmt::OpenACCInitConstructClass:
477 EmitOpenACCInitConstruct(cast<OpenACCInitConstruct>(*S));
478 break;
479 case Stmt::OpenACCShutdownConstructClass:
480 EmitOpenACCShutdownConstruct(cast<OpenACCShutdownConstruct>(*S));
481 break;
482 case Stmt::OpenACCSetConstructClass:
483 EmitOpenACCSetConstruct(cast<OpenACCSetConstruct>(*S));
484 break;
485 }
486}
487
490 switch (S->getStmtClass()) {
491 default:
492 return false;
493 case Stmt::NullStmtClass:
494 break;
495 case Stmt::CompoundStmtClass:
496 EmitCompoundStmt(cast<CompoundStmt>(*S));
497 break;
498 case Stmt::DeclStmtClass:
499 EmitDeclStmt(cast<DeclStmt>(*S));
500 break;
501 case Stmt::LabelStmtClass:
502 EmitLabelStmt(cast<LabelStmt>(*S));
503 break;
504 case Stmt::AttributedStmtClass:
505 EmitAttributedStmt(cast<AttributedStmt>(*S));
506 break;
507 case Stmt::GotoStmtClass:
508 EmitGotoStmt(cast<GotoStmt>(*S));
509 break;
510 case Stmt::BreakStmtClass:
511 EmitBreakStmt(cast<BreakStmt>(*S));
512 break;
513 case Stmt::ContinueStmtClass:
514 EmitContinueStmt(cast<ContinueStmt>(*S));
515 break;
516 case Stmt::DefaultStmtClass:
517 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
518 break;
519 case Stmt::CaseStmtClass:
520 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
521 break;
522 case Stmt::SEHLeaveStmtClass:
523 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
524 break;
525 }
526 return true;
527}
528
529/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
530/// this captures the expression result of the last sub-statement and returns it
531/// (for use by the statement expression extension).
533 AggValueSlot AggSlot) {
534 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
535 "LLVM IR generation of compound statement ('{}')");
536
537 // Keep track of the current cleanup stack depth, including debug scopes.
538 LexicalScope Scope(*this, S.getSourceRange());
539
540 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
541}
542
545 bool GetLast,
546 AggValueSlot AggSlot) {
547
548 const Stmt *ExprResult = S.getStmtExprResult();
549 assert((!GetLast || (GetLast && ExprResult)) &&
550 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
551
552 Address RetAlloca = Address::invalid();
553
554 for (auto *CurStmt : S.body()) {
555 if (GetLast && ExprResult == CurStmt) {
556 // We have to special case labels here. They are statements, but when put
557 // at the end of a statement expression, they yield the value of their
558 // subexpression. Handle this by walking through all labels we encounter,
559 // emitting them before we evaluate the subexpr.
560 // Similar issues arise for attributed statements.
561 while (!isa<Expr>(ExprResult)) {
562 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
563 EmitLabel(LS->getDecl());
564 ExprResult = LS->getSubStmt();
565 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
566 // FIXME: Update this if we ever have attributes that affect the
567 // semantics of an expression.
568 ExprResult = AS->getSubStmt();
569 } else {
570 llvm_unreachable("unknown value statement");
571 }
572 }
573
575
576 const Expr *E = cast<Expr>(ExprResult);
577 QualType ExprTy = E->getType();
578 if (hasAggregateEvaluationKind(ExprTy)) {
579 EmitAggExpr(E, AggSlot);
580 } else {
581 // We can't return an RValue here because there might be cleanups at
582 // the end of the StmtExpr. Because of that, we have to emit the result
583 // here into a temporary alloca.
584 RetAlloca = CreateMemTemp(ExprTy);
585 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
586 /*IsInit*/ false);
587 }
588 } else {
589 EmitStmt(CurStmt);
590 }
591 }
592
593 return RetAlloca;
594}
595
596void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
597 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
598
599 // If there is a cleanup stack, then we it isn't worth trying to
600 // simplify this block (we would need to remove it from the scope map
601 // and cleanup entry).
602 if (!EHStack.empty())
603 return;
604
605 // Can only simplify direct branches.
606 if (!BI || !BI->isUnconditional())
607 return;
608
609 // Can only simplify empty blocks.
610 if (BI->getIterator() != BB->begin())
611 return;
612
613 BB->replaceAllUsesWith(BI->getSuccessor(0));
614 BI->eraseFromParent();
615 BB->eraseFromParent();
616}
617
618void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
619 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
620
621 // Fall out of the current block (if necessary).
622 EmitBranch(BB);
623
624 if (IsFinished && BB->use_empty()) {
625 delete BB;
626 return;
627 }
628
629 // Place the block after the current block, if possible, or else at
630 // the end of the function.
631 if (CurBB && CurBB->getParent())
632 CurFn->insert(std::next(CurBB->getIterator()), BB);
633 else
634 CurFn->insert(CurFn->end(), BB);
635 Builder.SetInsertPoint(BB);
636}
637
638void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
639 // Emit a branch from the current block to the target one if this
640 // was a real block. If this was just a fall-through block after a
641 // terminator, don't emit it.
642 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
643
644 if (!CurBB || CurBB->getTerminator()) {
645 // If there is no insert point or the previous block is already
646 // terminated, don't touch it.
647 } else {
648 // Otherwise, create a fall-through branch.
649 Builder.CreateBr(Target);
650 }
651
652 Builder.ClearInsertionPoint();
653}
654
655void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
656 bool inserted = false;
657 for (llvm::User *u : block->users()) {
658 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
659 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
660 inserted = true;
661 break;
662 }
663 }
664
665 if (!inserted)
666 CurFn->insert(CurFn->end(), block);
667
668 Builder.SetInsertPoint(block);
669}
670
671CodeGenFunction::JumpDest
673 JumpDest &Dest = LabelMap[D];
674 if (Dest.isValid()) return Dest;
675
676 // Create, but don't insert, the new block.
677 Dest = JumpDest(createBasicBlock(D->getName()),
680 return Dest;
681}
682
684 // Add this label to the current lexical scope if we're within any
685 // normal cleanups. Jumps "in" to this label --- when permitted by
686 // the language --- may need to be routed around such cleanups.
687 if (EHStack.hasNormalCleanups() && CurLexicalScope)
688 CurLexicalScope->addLabel(D);
689
690 JumpDest &Dest = LabelMap[D];
691
692 // If we didn't need a forward reference to this label, just go
693 // ahead and create a destination at the current scope.
694 if (!Dest.isValid()) {
695 Dest = getJumpDestInCurrentScope(D->getName());
696
697 // Otherwise, we need to give this label a target depth and remove
698 // it from the branch-fixups list.
699 } else {
700 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
701 Dest.setScopeDepth(EHStack.stable_begin());
702 ResolveBranchFixups(Dest.getBlock());
703 }
704
705 EmitBlock(Dest.getBlock());
706
707 // Emit debug info for labels.
708 if (CGDebugInfo *DI = getDebugInfo()) {
710 DI->setLocation(D->getLocation());
711 DI->EmitLabel(D, Builder);
712 }
713 }
714
715 incrementProfileCounter(D->getStmt());
716}
717
718/// Change the cleanup scope of the labels in this lexical scope to
719/// match the scope of the enclosing context.
721 assert(!Labels.empty());
722 EHScopeStack::stable_iterator innermostScope
724
725 // Change the scope depth of all the labels.
727 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
728 assert(CGF.LabelMap.count(*i));
729 JumpDest &dest = CGF.LabelMap.find(*i)->second;
730 assert(dest.getScopeDepth().isValid());
731 assert(innermostScope.encloses(dest.getScopeDepth()));
732 dest.setScopeDepth(innermostScope);
733 }
734
735 // Reparent the labels if the new scope also has cleanups.
736 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
737 ParentScope->Labels.append(Labels.begin(), Labels.end());
738 }
739}
740
741
743 EmitLabel(S.getDecl());
744
745 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
746 if (getLangOpts().EHAsynch && S.isSideEntry())
748
749 EmitStmt(S.getSubStmt());
750}
751
753 bool nomerge = false;
754 bool noinline = false;
755 bool alwaysinline = false;
756 bool noconvergent = false;
757 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
758 HLSLControlFlowHintAttr::SpellingNotCalculated;
759 const CallExpr *musttail = nullptr;
760
761 for (const auto *A : S.getAttrs()) {
762 switch (A->getKind()) {
763 default:
764 break;
765 case attr::NoMerge:
766 nomerge = true;
767 break;
768 case attr::NoInline:
769 noinline = true;
770 break;
771 case attr::AlwaysInline:
772 alwaysinline = true;
773 break;
774 case attr::NoConvergent:
775 noconvergent = true;
776 break;
777 case attr::MustTail: {
778 const Stmt *Sub = S.getSubStmt();
779 const ReturnStmt *R = cast<ReturnStmt>(Sub);
780 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
781 } break;
782 case attr::CXXAssume: {
783 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
784 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
785 !Assumption->HasSideEffects(getContext())) {
786 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
787 Builder.CreateAssumption(AssumptionVal);
788 }
789 } break;
790 case attr::HLSLControlFlowHint: {
791 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
792 } break;
793 }
794 }
795 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
796 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
797 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
798 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
799 SaveAndRestore save_musttail(MustTailCall, musttail);
800 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
801 EmitStmt(S.getSubStmt(), S.getAttrs());
802}
803
805 // If this code is reachable then emit a stop point (if generating
806 // debug info). We have to do this ourselves because we are on the
807 // "simple" statement path.
808 if (HaveInsertPoint())
809 EmitStopPoint(&S);
810
812}
813
814
816 if (const LabelDecl *Target = S.getConstantTarget()) {
818 return;
819 }
820
821 // Ensure that we have an i8* for our PHI node.
822 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
823 Int8PtrTy, "addr");
824 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
825
826 // Get the basic block for the indirect goto.
827 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
828
829 // The first instruction in the block has to be the PHI for the switch dest,
830 // add an entry for this branch.
831 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
832
833 EmitBranch(IndGotoBB);
834}
835
836void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
837 const Stmt *Else = S.getElse();
838
839 // The else branch of a consteval if statement is always the only branch that
840 // can be runtime evaluated.
841 if (S.isConsteval()) {
842 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
843 if (Executed) {
844 RunCleanupsScope ExecutedScope(*this);
845 EmitStmt(Executed);
846 }
847 return;
848 }
849
850 // C99 6.8.4.1: The first substatement is executed if the expression compares
851 // unequal to 0. The condition must be a scalar type.
852 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
853 ApplyDebugLocation DL(*this, S.getCond());
854
855 if (S.getInit())
856 EmitStmt(S.getInit());
857
858 if (S.getConditionVariable())
859 EmitDecl(*S.getConditionVariable());
860
861 // If the condition constant folds and can be elided, try to avoid emitting
862 // the condition and the dead arm of the if/else.
863 bool CondConstant;
864 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
865 S.isConstexpr())) {
866 // Figure out which block (then or else) is executed.
867 const Stmt *Executed = S.getThen();
868 const Stmt *Skipped = Else;
869 if (!CondConstant) // Condition false?
870 std::swap(Executed, Skipped);
871
872 // If the skipped block has no labels in it, just emit the executed block.
873 // This avoids emitting dead code and simplifies the CFG substantially.
874 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
875 if (CondConstant)
877 if (Executed) {
878 RunCleanupsScope ExecutedScope(*this);
879 EmitStmt(Executed);
880 }
881 return;
882 }
883 }
884
885 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
886 // the conditional branch.
887 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
888 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
889 llvm::BasicBlock *ElseBlock = ContBlock;
890 if (Else)
891 ElseBlock = createBasicBlock("if.else");
892
893 // Prefer the PGO based weights over the likelihood attribute.
894 // When the build isn't optimized the metadata isn't used, so don't generate
895 // it.
896 // Also, differentiate between disabled PGO and a never executed branch with
897 // PGO. Assuming PGO is in use:
898 // - we want to ignore the [[likely]] attribute if the branch is never
899 // executed,
900 // - assuming the profile is poor, preserving the attribute may still be
901 // beneficial.
902 // As an approximation, preserve the attribute only if both the branch and the
903 // parent context were not executed.
905 uint64_t ThenCount = getProfileCount(S.getThen());
906 if (!ThenCount && !getCurrentProfileCount() &&
907 CGM.getCodeGenOpts().OptimizationLevel)
908 LH = Stmt::getLikelihood(S.getThen(), Else);
909
910 // When measuring MC/DC, always fully evaluate the condition up front using
911 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
912 // executing the body of the if.then or if.else. This is useful for when
913 // there is a 'return' within the body, but this is particularly beneficial
914 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
915 // updates are kept linear and consistent.
916 if (!CGM.getCodeGenOpts().MCDCCoverage)
917 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
918 else {
919 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
920 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
921 }
922
923 // Emit the 'then' code.
924 EmitBlock(ThenBlock);
926 incrementProfileCounter(S.getThen());
927 else
929 {
930 RunCleanupsScope ThenScope(*this);
931 EmitStmt(S.getThen());
932 }
933 EmitBranch(ContBlock);
934
935 // Emit the 'else' code if present.
936 if (Else) {
937 {
938 // There is no need to emit line number for an unconditional branch.
939 auto NL = ApplyDebugLocation::CreateEmpty(*this);
940 EmitBlock(ElseBlock);
941 }
942 // When single byte coverage mode is enabled, add a counter to else block.
945 {
946 RunCleanupsScope ElseScope(*this);
947 EmitStmt(Else);
948 }
949 {
950 // There is no need to emit line number for an unconditional branch.
951 auto NL = ApplyDebugLocation::CreateEmpty(*this);
952 EmitBranch(ContBlock);
953 }
954 }
955
956 // Emit the continuation block for code after the if.
957 EmitBlock(ContBlock, true);
958
959 // When single byte coverage mode is enabled, add a counter to continuation
960 // block.
963}
964
965bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
966 bool HasEmptyBody) {
967 if (CGM.getCodeGenOpts().getFiniteLoops() ==
969 return false;
970
971 // Now apply rules for plain C (see 6.8.5.6 in C11).
972 // Loops with constant conditions do not have to make progress in any C
973 // version.
974 // As an extension, we consisider loops whose constant expression
975 // can be constant-folded.
977 bool CondIsConstInt =
978 !ControllingExpression ||
979 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
980 Result.Val.isInt());
981
982 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
983 Result.Val.getInt().getBoolValue());
984
985 // Loops with non-constant conditions must make progress in C11 and later.
986 if (getLangOpts().C11 && !CondIsConstInt)
987 return true;
988
989 // [C++26][intro.progress] (DR)
990 // The implementation may assume that any thread will eventually do one of the
991 // following:
992 // [...]
993 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
994 if (CGM.getCodeGenOpts().getFiniteLoops() ==
996 getLangOpts().CPlusPlus11) {
997 if (HasEmptyBody && CondIsTrue) {
998 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
999 return false;
1000 }
1001 return true;
1002 }
1003 return false;
1004}
1005
1006// [C++26][stmt.iter.general] (DR)
1007// A trivially empty iteration statement is an iteration statement matching one
1008// of the following forms:
1009// - while ( expression ) ;
1010// - while ( expression ) { }
1011// - do ; while ( expression ) ;
1012// - do { } while ( expression ) ;
1013// - for ( init-statement expression(opt); ) ;
1014// - for ( init-statement expression(opt); ) { }
1015template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1016 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1017 if (S.getInc())
1018 return false;
1019 }
1020 const Stmt *Body = S.getBody();
1021 if (!Body || isa<NullStmt>(Body))
1022 return true;
1023 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1024 return Compound->body_empty();
1025 return false;
1026}
1027
1029 ArrayRef<const Attr *> WhileAttrs) {
1030 // Emit the header for the loop, which will also become
1031 // the continue target.
1032 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1033 EmitBlock(LoopHeader.getBlock());
1034
1036 ConvergenceTokenStack.push_back(
1037 emitConvergenceLoopToken(LoopHeader.getBlock()));
1038
1039 // Create an exit block for when the condition fails, which will
1040 // also become the break target.
1041 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
1042
1043 // Store the blocks to use for break and continue.
1044 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
1045
1046 // C++ [stmt.while]p2:
1047 // When the condition of a while statement is a declaration, the
1048 // scope of the variable that is declared extends from its point
1049 // of declaration (3.3.2) to the end of the while statement.
1050 // [...]
1051 // The object created in a condition is destroyed and created
1052 // with each iteration of the loop.
1053 RunCleanupsScope ConditionScope(*this);
1054
1055 if (S.getConditionVariable())
1056 EmitDecl(*S.getConditionVariable());
1057
1058 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1059 // evaluation of the controlling expression takes place before each
1060 // execution of the loop body.
1061 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1062
1063 // while(1) is common, avoid extra exit blocks. Be sure
1064 // to correctly handle break/continue though.
1065 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1066 bool EmitBoolCondBranch = !C || !C->isOne();
1067 const SourceRange &R = S.getSourceRange();
1068 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1069 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1071 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1072
1073 // When single byte coverage mode is enabled, add a counter to loop condition.
1075 incrementProfileCounter(S.getCond());
1076
1077 // As long as the condition is true, go to the loop body.
1078 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1079 if (EmitBoolCondBranch) {
1080 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1081 if (ConditionScope.requiresCleanups())
1082 ExitBlock = createBasicBlock("while.exit");
1083 llvm::MDNode *Weights =
1084 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1085 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1086 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1087 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1088 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1089
1090 if (ExitBlock != LoopExit.getBlock()) {
1091 EmitBlock(ExitBlock);
1093 }
1094 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1095 CGM.getDiags().Report(A->getLocation(),
1096 diag::warn_attribute_has_no_effect_on_infinite_loop)
1097 << A << A->getRange();
1099 S.getWhileLoc(),
1100 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1101 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1102 }
1103
1104 // Emit the loop body. We have to emit this in a cleanup scope
1105 // because it might be a singleton DeclStmt.
1106 {
1107 RunCleanupsScope BodyScope(*this);
1108 EmitBlock(LoopBody);
1109 // When single byte coverage mode is enabled, add a counter to the body.
1111 incrementProfileCounter(S.getBody());
1112 else
1114 EmitStmt(S.getBody());
1115 }
1116
1117 BreakContinueStack.pop_back();
1118
1119 // Immediately force cleanup.
1120 ConditionScope.ForceCleanup();
1121
1122 EmitStopPoint(&S);
1123 // Branch to the loop header again.
1124 EmitBranch(LoopHeader.getBlock());
1125
1126 LoopStack.pop();
1127
1128 // Emit the exit block.
1129 EmitBlock(LoopExit.getBlock(), true);
1130
1131 // The LoopHeader typically is just a branch if we skipped emitting
1132 // a branch, try to erase it.
1133 if (!EmitBoolCondBranch)
1134 SimplifyForwardingBlocks(LoopHeader.getBlock());
1135
1136 // When single byte coverage mode is enabled, add a counter to continuation
1137 // block.
1140
1142 ConvergenceTokenStack.pop_back();
1143}
1144
1146 ArrayRef<const Attr *> DoAttrs) {
1147 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
1148 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1149
1150 uint64_t ParentCount = getCurrentProfileCount();
1151
1152 // Store the blocks to use for break and continue.
1153 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1154
1155 // Emit the body of the loop.
1156 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1157
1159 EmitBlockWithFallThrough(LoopBody, S.getBody());
1160 else
1161 EmitBlockWithFallThrough(LoopBody, &S);
1162
1164 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1165
1166 {
1167 RunCleanupsScope BodyScope(*this);
1168 EmitStmt(S.getBody());
1169 }
1170
1171 EmitBlock(LoopCond.getBlock());
1172 // When single byte coverage mode is enabled, add a counter to loop condition.
1174 incrementProfileCounter(S.getCond());
1175
1176 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1177 // after each execution of the loop body."
1178
1179 // Evaluate the conditional in the while header.
1180 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1181 // compares unequal to 0. The condition must be a scalar type.
1182 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1183
1184 BreakContinueStack.pop_back();
1185
1186 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1187 // to correctly handle break/continue though.
1188 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1189 bool EmitBoolCondBranch = !C || !C->isZero();
1190
1191 const SourceRange &R = S.getSourceRange();
1192 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1195 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1196
1197 // As long as the condition is true, iterate the loop.
1198 if (EmitBoolCondBranch) {
1199 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1200 Builder.CreateCondBr(
1201 BoolCondVal, LoopBody, LoopExit.getBlock(),
1202 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1203 }
1204
1205 LoopStack.pop();
1206
1207 // Emit the exit block.
1208 EmitBlock(LoopExit.getBlock());
1209
1210 // The DoCond block typically is just a branch if we skipped
1211 // emitting a branch, try to erase it.
1212 if (!EmitBoolCondBranch)
1213 SimplifyForwardingBlocks(LoopCond.getBlock());
1214
1215 // When single byte coverage mode is enabled, add a counter to continuation
1216 // block.
1219
1221 ConvergenceTokenStack.pop_back();
1222}
1223
1225 ArrayRef<const Attr *> ForAttrs) {
1226 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1227
1228 LexicalScope ForScope(*this, S.getSourceRange());
1229
1230 // Evaluate the first part before the loop.
1231 if (S.getInit())
1232 EmitStmt(S.getInit());
1233
1234 // Start the loop with a block that tests the condition.
1235 // If there's an increment, the continue scope will be overwritten
1236 // later.
1237 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1238 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1239 EmitBlock(CondBlock);
1240
1242 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1243
1244 const SourceRange &R = S.getSourceRange();
1245 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1248 checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1249
1250 // Create a cleanup scope for the condition variable cleanups.
1251 LexicalScope ConditionScope(*this, S.getSourceRange());
1252
1253 // If the for loop doesn't have an increment we can just use the condition as
1254 // the continue block. Otherwise, if there is no condition variable, we can
1255 // form the continue block now. If there is a condition variable, we can't
1256 // form the continue block until after we've emitted the condition, because
1257 // the condition is in scope in the increment, but Sema's jump diagnostics
1258 // ensure that there are no continues from the condition variable that jump
1259 // to the loop increment.
1260 JumpDest Continue;
1261 if (!S.getInc())
1262 Continue = CondDest;
1263 else if (!S.getConditionVariable())
1264 Continue = getJumpDestInCurrentScope("for.inc");
1265 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1266
1267 if (S.getCond()) {
1268 // If the for statement has a condition scope, emit the local variable
1269 // declaration.
1270 if (S.getConditionVariable()) {
1271 EmitDecl(*S.getConditionVariable());
1272
1273 // We have entered the condition variable's scope, so we're now able to
1274 // jump to the continue block.
1275 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1276 BreakContinueStack.back().ContinueBlock = Continue;
1277 }
1278
1279 // When single byte coverage mode is enabled, add a counter to loop
1280 // condition.
1282 incrementProfileCounter(S.getCond());
1283
1284 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1285 // If there are any cleanups between here and the loop-exit scope,
1286 // create a block to stage a loop exit along.
1287 if (ForScope.requiresCleanups())
1288 ExitBlock = createBasicBlock("for.cond.cleanup");
1289
1290 // As long as the condition is true, iterate the loop.
1291 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1292
1293 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1294 // compares unequal to 0. The condition must be a scalar type.
1295 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1296 llvm::MDNode *Weights =
1297 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1298 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1299 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1300 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1301
1302 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1303
1304 if (ExitBlock != LoopExit.getBlock()) {
1305 EmitBlock(ExitBlock);
1307 }
1308
1309 EmitBlock(ForBody);
1310 } else {
1311 // Treat it as a non-zero constant. Don't even create a new block for the
1312 // body, just fall into it.
1313 }
1314
1315 // When single byte coverage mode is enabled, add a counter to the body.
1317 incrementProfileCounter(S.getBody());
1318 else
1320 {
1321 // Create a separate cleanup scope for the body, in case it is not
1322 // a compound statement.
1323 RunCleanupsScope BodyScope(*this);
1324 EmitStmt(S.getBody());
1325 }
1326
1327 // If there is an increment, emit it next.
1328 if (S.getInc()) {
1329 EmitBlock(Continue.getBlock());
1330 EmitStmt(S.getInc());
1332 incrementProfileCounter(S.getInc());
1333 }
1334
1335 BreakContinueStack.pop_back();
1336
1337 ConditionScope.ForceCleanup();
1338
1339 EmitStopPoint(&S);
1340 EmitBranch(CondBlock);
1341
1342 ForScope.ForceCleanup();
1343
1344 LoopStack.pop();
1345
1346 // Emit the fall-through block.
1347 EmitBlock(LoopExit.getBlock(), true);
1348
1349 // When single byte coverage mode is enabled, add a counter to continuation
1350 // block.
1353
1355 ConvergenceTokenStack.pop_back();
1356}
1357
1358void
1360 ArrayRef<const Attr *> ForAttrs) {
1361 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1362
1363 LexicalScope ForScope(*this, S.getSourceRange());
1364
1365 // Evaluate the first pieces before the loop.
1366 if (S.getInit())
1367 EmitStmt(S.getInit());
1368 EmitStmt(S.getRangeStmt());
1369 EmitStmt(S.getBeginStmt());
1370 EmitStmt(S.getEndStmt());
1371
1372 // Start the loop with a block that tests the condition.
1373 // If there's an increment, the continue scope will be overwritten
1374 // later.
1375 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1376 EmitBlock(CondBlock);
1377
1379 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1380
1381 const SourceRange &R = S.getSourceRange();
1382 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1385
1386 // If there are any cleanups between here and the loop-exit scope,
1387 // create a block to stage a loop exit along.
1388 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1389 if (ForScope.requiresCleanups())
1390 ExitBlock = createBasicBlock("for.cond.cleanup");
1391
1392 // The loop body, consisting of the specified body and the loop variable.
1393 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1394
1395 // The body is executed if the expression, contextually converted
1396 // to bool, is true.
1397 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1398 llvm::MDNode *Weights =
1399 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1400 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1401 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1402 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1403 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1404
1405 if (ExitBlock != LoopExit.getBlock()) {
1406 EmitBlock(ExitBlock);
1408 }
1409
1410 EmitBlock(ForBody);
1412 incrementProfileCounter(S.getBody());
1413 else
1415
1416 // Create a block for the increment. In case of a 'continue', we jump there.
1417 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1418
1419 // Store the blocks to use for break and continue.
1420 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1421
1422 {
1423 // Create a separate cleanup scope for the loop variable and body.
1424 LexicalScope BodyScope(*this, S.getSourceRange());
1425 EmitStmt(S.getLoopVarStmt());
1426 EmitStmt(S.getBody());
1427 }
1428
1429 EmitStopPoint(&S);
1430 // If there is an increment, emit it next.
1431 EmitBlock(Continue.getBlock());
1432 EmitStmt(S.getInc());
1433
1434 BreakContinueStack.pop_back();
1435
1436 EmitBranch(CondBlock);
1437
1438 ForScope.ForceCleanup();
1439
1440 LoopStack.pop();
1441
1442 // Emit the fall-through block.
1443 EmitBlock(LoopExit.getBlock(), true);
1444
1445 // When single byte coverage mode is enabled, add a counter to continuation
1446 // block.
1449
1451 ConvergenceTokenStack.pop_back();
1452}
1453
1454void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1455 if (RV.isScalar()) {
1457 } else if (RV.isAggregate()) {
1458 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1461 } else {
1463 /*init*/ true);
1464 }
1466}
1467
1468namespace {
1469// RAII struct used to save and restore a return statment's result expression.
1470struct SaveRetExprRAII {
1471 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1472 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1473 CGF.RetExpr = RetExpr;
1474 }
1475 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1476 const Expr *OldRetExpr;
1477 CodeGenFunction &CGF;
1478};
1479} // namespace
1480
1481/// Determine if the given call uses the swiftasync calling convention.
1482static bool isSwiftAsyncCallee(const CallExpr *CE) {
1483 auto calleeQualType = CE->getCallee()->getType();
1484 const FunctionType *calleeType = nullptr;
1485 if (calleeQualType->isFunctionPointerType() ||
1486 calleeQualType->isFunctionReferenceType() ||
1487 calleeQualType->isBlockPointerType() ||
1488 calleeQualType->isMemberFunctionPointerType()) {
1489 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1490 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1491 calleeType = ty;
1492 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1493 if (auto methodDecl = CMCE->getMethodDecl()) {
1494 // getMethodDecl() doesn't handle member pointers at the moment.
1495 calleeType = methodDecl->getType()->castAs<FunctionType>();
1496 } else {
1497 return false;
1498 }
1499 } else {
1500 return false;
1501 }
1502 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1503}
1504
1505/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1506/// if the function returns void, or may be missing one if the function returns
1507/// non-void. Fun stuff :).
1509 if (requiresReturnValueCheck()) {
1510 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1511 auto *SLocPtr =
1512 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1513 llvm::GlobalVariable::PrivateLinkage, SLoc);
1514 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1516 assert(ReturnLocation.isValid() && "No valid return location");
1517 Builder.CreateStore(SLocPtr, ReturnLocation);
1518 }
1519
1520 // Returning from an outlined SEH helper is UB, and we already warn on it.
1521 if (IsOutlinedSEHHelper) {
1522 Builder.CreateUnreachable();
1523 Builder.ClearInsertionPoint();
1524 }
1525
1526 // Emit the result value, even if unused, to evaluate the side effects.
1527 const Expr *RV = S.getRetValue();
1528
1529 // Record the result expression of the return statement. The recorded
1530 // expression is used to determine whether a block capture's lifetime should
1531 // end at the end of the full expression as opposed to the end of the scope
1532 // enclosing the block expression.
1533 //
1534 // This permits a small, easily-implemented exception to our over-conservative
1535 // rules about not jumping to statements following block literals with
1536 // non-trivial cleanups.
1537 SaveRetExprRAII SaveRetExpr(RV, *this);
1538
1539 RunCleanupsScope cleanupScope(*this);
1540 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1541 RV = EWC->getSubExpr();
1542
1543 // If we're in a swiftasynccall function, and the return expression is a
1544 // call to a swiftasynccall function, mark the call as the musttail call.
1545 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1546 if (RV && CurFnInfo &&
1548 if (auto CE = dyn_cast<CallExpr>(RV)) {
1549 if (isSwiftAsyncCallee(CE)) {
1550 SaveMustTail.emplace(MustTailCall, CE);
1551 }
1552 }
1553 }
1554
1555 // FIXME: Clean this up by using an LValue for ReturnTemp,
1556 // EmitStoreThroughLValue, and EmitAnyExpr.
1557 // Check if the NRVO candidate was not globalized in OpenMP mode.
1558 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1559 S.getNRVOCandidate()->isNRVOVariable() &&
1560 (!getLangOpts().OpenMP ||
1562 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1563 .isValid())) {
1564 // Apply the named return value optimization for this return statement,
1565 // which means doing nothing: the appropriate result has already been
1566 // constructed into the NRVO variable.
1567
1568 // If there is an NRVO flag for this variable, set it to 1 into indicate
1569 // that the cleanup code should not destroy the variable.
1570 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1571 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1572 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1573 // Make sure not to return anything, but evaluate the expression
1574 // for side effects.
1575 if (RV) {
1576 EmitAnyExpr(RV);
1577 }
1578 } else if (!RV) {
1579 // Do nothing (return value is left uninitialized)
1580 } else if (FnRetTy->isReferenceType()) {
1581 // If this function returns a reference, take the address of the expression
1582 // rather than the value.
1584 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1585 } else {
1586 switch (getEvaluationKind(RV->getType())) {
1587 case TEK_Scalar: {
1588 llvm::Value *Ret = EmitScalarExpr(RV);
1591 /*isInit*/ true);
1592 else
1594 break;
1595 }
1596 case TEK_Complex:
1598 /*isInit*/ true);
1599 break;
1600 case TEK_Aggregate:
1607 break;
1608 }
1609 }
1610
1611 ++NumReturnExprs;
1612 if (!RV || RV->isEvaluatable(getContext()))
1613 ++NumSimpleReturnExprs;
1614
1615 cleanupScope.ForceCleanup();
1617}
1618
1620 // As long as debug info is modeled with instructions, we have to ensure we
1621 // have a place to insert here and write the stop point here.
1622 if (HaveInsertPoint())
1623 EmitStopPoint(&S);
1624
1625 for (const auto *I : S.decls())
1626 EmitDecl(*I);
1627}
1628
1630 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1631
1632 // If this code is reachable then emit a stop point (if generating
1633 // debug info). We have to do this ourselves because we are on the
1634 // "simple" statement path.
1635 if (HaveInsertPoint())
1636 EmitStopPoint(&S);
1637
1638 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1639}
1640
1642 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1643
1644 // If this code is reachable then emit a stop point (if generating
1645 // debug info). We have to do this ourselves because we are on the
1646 // "simple" statement path.
1647 if (HaveInsertPoint())
1648 EmitStopPoint(&S);
1649
1650 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1651}
1652
1653/// EmitCaseStmtRange - If case statement range is not too big then
1654/// add multiple cases to switch instruction, one for each value within
1655/// the range. If range is too big then emit "if" condition check.
1657 ArrayRef<const Attr *> Attrs) {
1658 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1659
1660 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1661 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1662
1663 // Emit the code for this case. We do this first to make sure it is
1664 // properly chained from our predecessor before generating the
1665 // switch machinery to enter this block.
1666 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1667 EmitBlockWithFallThrough(CaseDest, &S);
1668 EmitStmt(S.getSubStmt());
1669
1670 // If range is empty, do nothing.
1671 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1672 return;
1673
1675 llvm::APInt Range = RHS - LHS;
1676 // FIXME: parameters such as this should not be hardcoded.
1677 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1678 // Range is small enough to add multiple switch instruction cases.
1679 uint64_t Total = getProfileCount(&S);
1680 unsigned NCases = Range.getZExtValue() + 1;
1681 // We only have one region counter for the entire set of cases here, so we
1682 // need to divide the weights evenly between the generated cases, ensuring
1683 // that the total weight is preserved. E.g., a weight of 5 over three cases
1684 // will be distributed as weights of 2, 2, and 1.
1685 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1686 for (unsigned I = 0; I != NCases; ++I) {
1687 if (SwitchWeights)
1688 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1689 else if (SwitchLikelihood)
1690 SwitchLikelihood->push_back(LH);
1691
1692 if (Rem)
1693 Rem--;
1694 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1695 ++LHS;
1696 }
1697 return;
1698 }
1699
1700 // The range is too big. Emit "if" condition into a new block,
1701 // making sure to save and restore the current insertion point.
1702 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1703
1704 // Push this test onto the chain of range checks (which terminates
1705 // in the default basic block). The switch's default will be changed
1706 // to the top of this chain after switch emission is complete.
1707 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1708 CaseRangeBlock = createBasicBlock("sw.caserange");
1709
1710 CurFn->insert(CurFn->end(), CaseRangeBlock);
1711 Builder.SetInsertPoint(CaseRangeBlock);
1712
1713 // Emit range check.
1714 llvm::Value *Diff =
1715 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1716 llvm::Value *Cond =
1717 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1718
1719 llvm::MDNode *Weights = nullptr;
1720 if (SwitchWeights) {
1721 uint64_t ThisCount = getProfileCount(&S);
1722 uint64_t DefaultCount = (*SwitchWeights)[0];
1723 Weights = createProfileWeights(ThisCount, DefaultCount);
1724
1725 // Since we're chaining the switch default through each large case range, we
1726 // need to update the weight for the default, ie, the first case, to include
1727 // this case.
1728 (*SwitchWeights)[0] += ThisCount;
1729 } else if (SwitchLikelihood)
1730 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1731
1732 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1733
1734 // Restore the appropriate insertion point.
1735 if (RestoreBB)
1736 Builder.SetInsertPoint(RestoreBB);
1737 else
1738 Builder.ClearInsertionPoint();
1739}
1740
1742 ArrayRef<const Attr *> Attrs) {
1743 // If there is no enclosing switch instance that we're aware of, then this
1744 // case statement and its block can be elided. This situation only happens
1745 // when we've constant-folded the switch, are emitting the constant case,
1746 // and part of the constant case includes another case statement. For
1747 // instance: switch (4) { case 4: do { case 5: } while (1); }
1748 if (!SwitchInsn) {
1749 EmitStmt(S.getSubStmt());
1750 return;
1751 }
1752
1753 // Handle case ranges.
1754 if (S.getRHS()) {
1755 EmitCaseStmtRange(S, Attrs);
1756 return;
1757 }
1758
1759 llvm::ConstantInt *CaseVal =
1760 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1761
1762 // Emit debuginfo for the case value if it is an enum value.
1763 const ConstantExpr *CE;
1764 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1765 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1766 else
1767 CE = dyn_cast<ConstantExpr>(S.getLHS());
1768 if (CE) {
1769 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1770 if (CGDebugInfo *Dbg = getDebugInfo())
1772 Dbg->EmitGlobalVariable(DE->getDecl(),
1773 APValue(llvm::APSInt(CaseVal->getValue())));
1774 }
1775
1776 if (SwitchLikelihood)
1777 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1778
1779 // If the body of the case is just a 'break', try to not emit an empty block.
1780 // If we're profiling or we're not optimizing, leave the block in for better
1781 // debug and coverage analysis.
1783 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1784 isa<BreakStmt>(S.getSubStmt())) {
1785 JumpDest Block = BreakContinueStack.back().BreakBlock;
1786
1787 // Only do this optimization if there are no cleanups that need emitting.
1789 if (SwitchWeights)
1790 SwitchWeights->push_back(getProfileCount(&S));
1791 SwitchInsn->addCase(CaseVal, Block.getBlock());
1792
1793 // If there was a fallthrough into this case, make sure to redirect it to
1794 // the end of the switch as well.
1795 if (Builder.GetInsertBlock()) {
1796 Builder.CreateBr(Block.getBlock());
1797 Builder.ClearInsertionPoint();
1798 }
1799 return;
1800 }
1801 }
1802
1803 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1804 EmitBlockWithFallThrough(CaseDest, &S);
1805 if (SwitchWeights)
1806 SwitchWeights->push_back(getProfileCount(&S));
1807 SwitchInsn->addCase(CaseVal, CaseDest);
1808
1809 // Recursively emitting the statement is acceptable, but is not wonderful for
1810 // code where we have many case statements nested together, i.e.:
1811 // case 1:
1812 // case 2:
1813 // case 3: etc.
1814 // Handling this recursively will create a new block for each case statement
1815 // that falls through to the next case which is IR intensive. It also causes
1816 // deep recursion which can run into stack depth limitations. Handle
1817 // sequential non-range case statements specially.
1818 //
1819 // TODO When the next case has a likelihood attribute the code returns to the
1820 // recursive algorithm. Maybe improve this case if it becomes common practice
1821 // to use a lot of attributes.
1822 const CaseStmt *CurCase = &S;
1823 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1824
1825 // Otherwise, iteratively add consecutive cases to this switch stmt.
1826 while (NextCase && NextCase->getRHS() == nullptr) {
1827 CurCase = NextCase;
1828 llvm::ConstantInt *CaseVal =
1829 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1830
1831 if (SwitchWeights)
1832 SwitchWeights->push_back(getProfileCount(NextCase));
1834 CaseDest = createBasicBlock("sw.bb");
1835 EmitBlockWithFallThrough(CaseDest, CurCase);
1836 }
1837 // Since this loop is only executed when the CaseStmt has no attributes
1838 // use a hard-coded value.
1839 if (SwitchLikelihood)
1840 SwitchLikelihood->push_back(Stmt::LH_None);
1841
1842 SwitchInsn->addCase(CaseVal, CaseDest);
1843 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1844 }
1845
1846 // Generate a stop point for debug info if the case statement is
1847 // followed by a default statement. A fallthrough case before a
1848 // default case gets its own branch target.
1849 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1850 EmitStopPoint(CurCase);
1851
1852 // Normal default recursion for non-cases.
1853 EmitStmt(CurCase->getSubStmt());
1854}
1855
1857 ArrayRef<const Attr *> Attrs) {
1858 // If there is no enclosing switch instance that we're aware of, then this
1859 // default statement can be elided. This situation only happens when we've
1860 // constant-folded the switch.
1861 if (!SwitchInsn) {
1862 EmitStmt(S.getSubStmt());
1863 return;
1864 }
1865
1866 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1867 assert(DefaultBlock->empty() &&
1868 "EmitDefaultStmt: Default block already defined?");
1869
1870 if (SwitchLikelihood)
1871 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1872
1873 EmitBlockWithFallThrough(DefaultBlock, &S);
1874
1875 EmitStmt(S.getSubStmt());
1876}
1877
1878/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1879/// constant value that is being switched on, see if we can dead code eliminate
1880/// the body of the switch to a simple series of statements to emit. Basically,
1881/// on a switch (5) we want to find these statements:
1882/// case 5:
1883/// printf(...); <--
1884/// ++i; <--
1885/// break;
1886///
1887/// and add them to the ResultStmts vector. If it is unsafe to do this
1888/// transformation (for example, one of the elided statements contains a label
1889/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1890/// should include statements after it (e.g. the printf() line is a substmt of
1891/// the case) then return CSFC_FallThrough. If we handled it and found a break
1892/// statement, then return CSFC_Success.
1893///
1894/// If Case is non-null, then we are looking for the specified case, checking
1895/// that nothing we jump over contains labels. If Case is null, then we found
1896/// the case and are looking for the break.
1897///
1898/// If the recursive walk actually finds our Case, then we set FoundCase to
1899/// true.
1900///
1903 const SwitchCase *Case,
1904 bool &FoundCase,
1905 SmallVectorImpl<const Stmt*> &ResultStmts) {
1906 // If this is a null statement, just succeed.
1907 if (!S)
1908 return Case ? CSFC_Success : CSFC_FallThrough;
1909
1910 // If this is the switchcase (case 4: or default) that we're looking for, then
1911 // we're in business. Just add the substatement.
1912 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1913 if (S == Case) {
1914 FoundCase = true;
1915 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1916 ResultStmts);
1917 }
1918
1919 // Otherwise, this is some other case or default statement, just ignore it.
1920 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1921 ResultStmts);
1922 }
1923
1924 // If we are in the live part of the code and we found our break statement,
1925 // return a success!
1926 if (!Case && isa<BreakStmt>(S))
1927 return CSFC_Success;
1928
1929 // If this is a switch statement, then it might contain the SwitchCase, the
1930 // break, or neither.
1931 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1932 // Handle this as two cases: we might be looking for the SwitchCase (if so
1933 // the skipped statements must be skippable) or we might already have it.
1934 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1935 bool StartedInLiveCode = FoundCase;
1936 unsigned StartSize = ResultStmts.size();
1937
1938 // If we've not found the case yet, scan through looking for it.
1939 if (Case) {
1940 // Keep track of whether we see a skipped declaration. The code could be
1941 // using the declaration even if it is skipped, so we can't optimize out
1942 // the decl if the kept statements might refer to it.
1943 bool HadSkippedDecl = false;
1944
1945 // If we're looking for the case, just see if we can skip each of the
1946 // substatements.
1947 for (; Case && I != E; ++I) {
1948 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1949
1950 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1951 case CSFC_Failure: return CSFC_Failure;
1952 case CSFC_Success:
1953 // A successful result means that either 1) that the statement doesn't
1954 // have the case and is skippable, or 2) does contain the case value
1955 // and also contains the break to exit the switch. In the later case,
1956 // we just verify the rest of the statements are elidable.
1957 if (FoundCase) {
1958 // If we found the case and skipped declarations, we can't do the
1959 // optimization.
1960 if (HadSkippedDecl)
1961 return CSFC_Failure;
1962
1963 for (++I; I != E; ++I)
1964 if (CodeGenFunction::ContainsLabel(*I, true))
1965 return CSFC_Failure;
1966 return CSFC_Success;
1967 }
1968 break;
1969 case CSFC_FallThrough:
1970 // If we have a fallthrough condition, then we must have found the
1971 // case started to include statements. Consider the rest of the
1972 // statements in the compound statement as candidates for inclusion.
1973 assert(FoundCase && "Didn't find case but returned fallthrough?");
1974 // We recursively found Case, so we're not looking for it anymore.
1975 Case = nullptr;
1976
1977 // If we found the case and skipped declarations, we can't do the
1978 // optimization.
1979 if (HadSkippedDecl)
1980 return CSFC_Failure;
1981 break;
1982 }
1983 }
1984
1985 if (!FoundCase)
1986 return CSFC_Success;
1987
1988 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1989 }
1990
1991 // If we have statements in our range, then we know that the statements are
1992 // live and need to be added to the set of statements we're tracking.
1993 bool AnyDecls = false;
1994 for (; I != E; ++I) {
1996
1997 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1998 case CSFC_Failure: return CSFC_Failure;
1999 case CSFC_FallThrough:
2000 // A fallthrough result means that the statement was simple and just
2001 // included in ResultStmt, keep adding them afterwards.
2002 break;
2003 case CSFC_Success:
2004 // A successful result means that we found the break statement and
2005 // stopped statement inclusion. We just ensure that any leftover stmts
2006 // are skippable and return success ourselves.
2007 for (++I; I != E; ++I)
2008 if (CodeGenFunction::ContainsLabel(*I, true))
2009 return CSFC_Failure;
2010 return CSFC_Success;
2011 }
2012 }
2013
2014 // If we're about to fall out of a scope without hitting a 'break;', we
2015 // can't perform the optimization if there were any decls in that scope
2016 // (we'd lose their end-of-lifetime).
2017 if (AnyDecls) {
2018 // If the entire compound statement was live, there's one more thing we
2019 // can try before giving up: emit the whole thing as a single statement.
2020 // We can do that unless the statement contains a 'break;'.
2021 // FIXME: Such a break must be at the end of a construct within this one.
2022 // We could emit this by just ignoring the BreakStmts entirely.
2023 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2024 ResultStmts.resize(StartSize);
2025 ResultStmts.push_back(S);
2026 } else {
2027 return CSFC_Failure;
2028 }
2029 }
2030
2031 return CSFC_FallThrough;
2032 }
2033
2034 // Okay, this is some other statement that we don't handle explicitly, like a
2035 // for statement or increment etc. If we are skipping over this statement,
2036 // just verify it doesn't have labels, which would make it invalid to elide.
2037 if (Case) {
2038 if (CodeGenFunction::ContainsLabel(S, true))
2039 return CSFC_Failure;
2040 return CSFC_Success;
2041 }
2042
2043 // Otherwise, we want to include this statement. Everything is cool with that
2044 // so long as it doesn't contain a break out of the switch we're in.
2046
2047 // Otherwise, everything is great. Include the statement and tell the caller
2048 // that we fall through and include the next statement as well.
2049 ResultStmts.push_back(S);
2050 return CSFC_FallThrough;
2051}
2052
2053/// FindCaseStatementsForValue - Find the case statement being jumped to and
2054/// then invoke CollectStatementsForCase to find the list of statements to emit
2055/// for a switch on constant. See the comment above CollectStatementsForCase
2056/// for more details.
2058 const llvm::APSInt &ConstantCondValue,
2059 SmallVectorImpl<const Stmt*> &ResultStmts,
2060 ASTContext &C,
2061 const SwitchCase *&ResultCase) {
2062 // First step, find the switch case that is being branched to. We can do this
2063 // efficiently by scanning the SwitchCase list.
2064 const SwitchCase *Case = S.getSwitchCaseList();
2065 const DefaultStmt *DefaultCase = nullptr;
2066
2067 for (; Case; Case = Case->getNextSwitchCase()) {
2068 // It's either a default or case. Just remember the default statement in
2069 // case we're not jumping to any numbered cases.
2070 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2071 DefaultCase = DS;
2072 continue;
2073 }
2074
2075 // Check to see if this case is the one we're looking for.
2076 const CaseStmt *CS = cast<CaseStmt>(Case);
2077 // Don't handle case ranges yet.
2078 if (CS->getRHS()) return false;
2079
2080 // If we found our case, remember it as 'case'.
2081 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2082 break;
2083 }
2084
2085 // If we didn't find a matching case, we use a default if it exists, or we
2086 // elide the whole switch body!
2087 if (!Case) {
2088 // It is safe to elide the body of the switch if it doesn't contain labels
2089 // etc. If it is safe, return successfully with an empty ResultStmts list.
2090 if (!DefaultCase)
2092 Case = DefaultCase;
2093 }
2094
2095 // Ok, we know which case is being jumped to, try to collect all the
2096 // statements that follow it. This can fail for a variety of reasons. Also,
2097 // check to see that the recursive walk actually found our case statement.
2098 // Insane cases like this can fail to find it in the recursive walk since we
2099 // don't handle every stmt kind:
2100 // switch (4) {
2101 // while (1) {
2102 // case 4: ...
2103 bool FoundCase = false;
2104 ResultCase = Case;
2105 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2106 ResultStmts) != CSFC_Failure &&
2107 FoundCase;
2108}
2109
2110static std::optional<SmallVector<uint64_t, 16>>
2112 // Are there enough branches to weight them?
2113 if (Likelihoods.size() <= 1)
2114 return std::nullopt;
2115
2116 uint64_t NumUnlikely = 0;
2117 uint64_t NumNone = 0;
2118 uint64_t NumLikely = 0;
2119 for (const auto LH : Likelihoods) {
2120 switch (LH) {
2121 case Stmt::LH_Unlikely:
2122 ++NumUnlikely;
2123 break;
2124 case Stmt::LH_None:
2125 ++NumNone;
2126 break;
2127 case Stmt::LH_Likely:
2128 ++NumLikely;
2129 break;
2130 }
2131 }
2132
2133 // Is there a likelihood attribute used?
2134 if (NumUnlikely == 0 && NumLikely == 0)
2135 return std::nullopt;
2136
2137 // When multiple cases share the same code they can be combined during
2138 // optimization. In that case the weights of the branch will be the sum of
2139 // the individual weights. Make sure the combined sum of all neutral cases
2140 // doesn't exceed the value of a single likely attribute.
2141 // The additions both avoid divisions by 0 and make sure the weights of None
2142 // don't exceed the weight of Likely.
2143 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2144 const uint64_t None = Likely / (NumNone + 1);
2145 const uint64_t Unlikely = 0;
2146
2148 Result.reserve(Likelihoods.size());
2149 for (const auto LH : Likelihoods) {
2150 switch (LH) {
2151 case Stmt::LH_Unlikely:
2152 Result.push_back(Unlikely);
2153 break;
2154 case Stmt::LH_None:
2155 Result.push_back(None);
2156 break;
2157 case Stmt::LH_Likely:
2158 Result.push_back(Likely);
2159 break;
2160 }
2161 }
2162
2163 return Result;
2164}
2165
2167 // Handle nested switch statements.
2168 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2169 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2170 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2171 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2172
2173 // See if we can constant fold the condition of the switch and therefore only
2174 // emit the live case statement (if any) of the switch.
2175 llvm::APSInt ConstantCondValue;
2176 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2178 const SwitchCase *Case = nullptr;
2179 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2180 getContext(), Case)) {
2181 if (Case)
2183 RunCleanupsScope ExecutedScope(*this);
2184
2185 if (S.getInit())
2186 EmitStmt(S.getInit());
2187
2188 // Emit the condition variable if needed inside the entire cleanup scope
2189 // used by this special case for constant folded switches.
2190 if (S.getConditionVariable())
2191 EmitDecl(*S.getConditionVariable());
2192
2193 // At this point, we are no longer "within" a switch instance, so
2194 // we can temporarily enforce this to ensure that any embedded case
2195 // statements are not emitted.
2196 SwitchInsn = nullptr;
2197
2198 // Okay, we can dead code eliminate everything except this case. Emit the
2199 // specified series of statements and we're good.
2200 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2201 EmitStmt(CaseStmts[i]);
2203
2204 // Now we want to restore the saved switch instance so that nested
2205 // switches continue to function properly
2206 SwitchInsn = SavedSwitchInsn;
2207
2208 return;
2209 }
2210 }
2211
2212 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2213
2214 RunCleanupsScope ConditionScope(*this);
2215
2216 if (S.getInit())
2217 EmitStmt(S.getInit());
2218
2219 if (S.getConditionVariable())
2220 EmitDecl(*S.getConditionVariable());
2221 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2222
2223 // Create basic block to hold stuff that comes after switch
2224 // statement. We also need to create a default block now so that
2225 // explicit case ranges tests can have a place to jump to on
2226 // failure.
2227 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2228 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2229 if (PGO.haveRegionCounts()) {
2230 // Walk the SwitchCase list to find how many there are.
2231 uint64_t DefaultCount = 0;
2232 unsigned NumCases = 0;
2233 for (const SwitchCase *Case = S.getSwitchCaseList();
2234 Case;
2235 Case = Case->getNextSwitchCase()) {
2236 if (isa<DefaultStmt>(Case))
2237 DefaultCount = getProfileCount(Case);
2238 NumCases += 1;
2239 }
2240 SwitchWeights = new SmallVector<uint64_t, 16>();
2241 SwitchWeights->reserve(NumCases);
2242 // The default needs to be first. We store the edge count, so we already
2243 // know the right weight.
2244 SwitchWeights->push_back(DefaultCount);
2245 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2246 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2247 // Initialize the default case.
2248 SwitchLikelihood->push_back(Stmt::LH_None);
2249 }
2250
2251 CaseRangeBlock = DefaultBlock;
2252
2253 // Clear the insertion point to indicate we are in unreachable code.
2254 Builder.ClearInsertionPoint();
2255
2256 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2257 // then reuse last ContinueBlock.
2258 JumpDest OuterContinue;
2259 if (!BreakContinueStack.empty())
2260 OuterContinue = BreakContinueStack.back().ContinueBlock;
2261
2262 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2263
2264 // Emit switch body.
2265 EmitStmt(S.getBody());
2266
2267 BreakContinueStack.pop_back();
2268
2269 // Update the default block in case explicit case range tests have
2270 // been chained on top.
2271 SwitchInsn->setDefaultDest(CaseRangeBlock);
2272
2273 // If a default was never emitted:
2274 if (!DefaultBlock->getParent()) {
2275 // If we have cleanups, emit the default block so that there's a
2276 // place to jump through the cleanups from.
2277 if (ConditionScope.requiresCleanups()) {
2278 EmitBlock(DefaultBlock);
2279
2280 // Otherwise, just forward the default block to the switch end.
2281 } else {
2282 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2283 delete DefaultBlock;
2284 }
2285 }
2286
2287 ConditionScope.ForceCleanup();
2288
2289 // Emit continuation.
2290 EmitBlock(SwitchExit.getBlock(), true);
2292
2293 // If the switch has a condition wrapped by __builtin_unpredictable,
2294 // create metadata that specifies that the switch is unpredictable.
2295 // Don't bother if not optimizing because that metadata would not be used.
2296 auto *Call = dyn_cast<CallExpr>(S.getCond());
2297 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2298 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2299 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2300 llvm::MDBuilder MDHelper(getLLVMContext());
2301 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2302 MDHelper.createUnpredictable());
2303 }
2304 }
2305
2306 if (SwitchWeights) {
2307 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2308 "switch weights do not match switch cases");
2309 // If there's only one jump destination there's no sense weighting it.
2310 if (SwitchWeights->size() > 1)
2311 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2312 createProfileWeights(*SwitchWeights));
2313 delete SwitchWeights;
2314 } else if (SwitchLikelihood) {
2315 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2316 "switch likelihoods do not match switch cases");
2317 std::optional<SmallVector<uint64_t, 16>> LHW =
2318 getLikelihoodWeights(*SwitchLikelihood);
2319 if (LHW) {
2320 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2321 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2322 createProfileWeights(*LHW));
2323 }
2324 delete SwitchLikelihood;
2325 }
2326 SwitchInsn = SavedSwitchInsn;
2327 SwitchWeights = SavedSwitchWeights;
2328 SwitchLikelihood = SavedSwitchLikelihood;
2329 CaseRangeBlock = SavedCRBlock;
2330}
2331
2332static std::string
2333SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2335 std::string Result;
2336
2337 while (*Constraint) {
2338 switch (*Constraint) {
2339 default:
2340 Result += Target.convertConstraint(Constraint);
2341 break;
2342 // Ignore these
2343 case '*':
2344 case '?':
2345 case '!':
2346 case '=': // Will see this and the following in mult-alt constraints.
2347 case '+':
2348 break;
2349 case '#': // Ignore the rest of the constraint alternative.
2350 while (Constraint[1] && Constraint[1] != ',')
2351 Constraint++;
2352 break;
2353 case '&':
2354 case '%':
2355 Result += *Constraint;
2356 while (Constraint[1] && Constraint[1] == *Constraint)
2357 Constraint++;
2358 break;
2359 case ',':
2360 Result += "|";
2361 break;
2362 case 'g':
2363 Result += "imr";
2364 break;
2365 case '[': {
2366 assert(OutCons &&
2367 "Must pass output names to constraints with a symbolic name");
2368 unsigned Index;
2369 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2370 assert(result && "Could not resolve symbolic name"); (void)result;
2371 Result += llvm::utostr(Index);
2372 break;
2373 }
2374 }
2375
2376 Constraint++;
2377 }
2378
2379 return Result;
2380}
2381
2382/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2383/// as using a particular register add that as a constraint that will be used
2384/// in this asm stmt.
2385static std::string
2386AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2388 const AsmStmt &Stmt, const bool EarlyClobber,
2389 std::string *GCCReg = nullptr) {
2390 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2391 if (!AsmDeclRef)
2392 return Constraint;
2393 const ValueDecl &Value = *AsmDeclRef->getDecl();
2394 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2395 if (!Variable)
2396 return Constraint;
2398 return Constraint;
2399 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2400 if (!Attr)
2401 return Constraint;
2402 StringRef Register = Attr->getLabel();
2403 assert(Target.isValidGCCRegisterName(Register));
2404 // We're using validateOutputConstraint here because we only care if
2405 // this is a register constraint.
2406 TargetInfo::ConstraintInfo Info(Constraint, "");
2407 if (Target.validateOutputConstraint(Info) &&
2408 !Info.allowsRegister()) {
2409 CGM.ErrorUnsupported(&Stmt, "__asm__");
2410 return Constraint;
2411 }
2412 // Canonicalize the register here before returning it.
2413 Register = Target.getNormalizedGCCRegisterName(Register);
2414 if (GCCReg != nullptr)
2415 *GCCReg = Register.str();
2416 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2417}
2418
2419std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2420 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2421 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2422 if (Info.allowsRegister() || !Info.allowsMemory()) {
2424 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2425
2426 llvm::Type *Ty = ConvertType(InputType);
2427 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2428 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2429 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2430 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2431
2432 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2433 nullptr};
2434 }
2435 }
2436
2437 Address Addr = InputValue.getAddress();
2438 ConstraintStr += '*';
2439 return {InputValue.getPointer(*this), Addr.getElementType()};
2440}
2441
2442std::pair<llvm::Value *, llvm::Type *>
2443CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2444 const Expr *InputExpr,
2445 std::string &ConstraintStr) {
2446 // If this can't be a register or memory, i.e., has to be a constant
2447 // (immediate or symbolic), try to emit it as such.
2448 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2449 if (Info.requiresImmediateConstant()) {
2450 Expr::EvalResult EVResult;
2451 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2452
2453 llvm::APSInt IntResult;
2454 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2455 getContext()))
2456 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2457 }
2458
2460 if (InputExpr->EvaluateAsInt(Result, getContext()))
2461 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2462 nullptr};
2463 }
2464
2465 if (Info.allowsRegister() || !Info.allowsMemory())
2467 return {EmitScalarExpr(InputExpr), nullptr};
2468 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2469 return {EmitScalarExpr(InputExpr), nullptr};
2470 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2471 LValue Dest = EmitLValue(InputExpr);
2472 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2473 InputExpr->getExprLoc());
2474}
2475
2476/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2477/// asm call instruction. The !srcloc MDNode contains a list of constant
2478/// integers which are the source locations of the start of each line in the
2479/// asm.
2480static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2481 CodeGenFunction &CGF) {
2483 // Add the location of the first line to the MDNode.
2484 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2485 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2486 StringRef StrVal = Str->getString();
2487 if (!StrVal.empty()) {
2489 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2490 unsigned StartToken = 0;
2491 unsigned ByteOffset = 0;
2492
2493 // Add the location of the start of each subsequent line of the asm to the
2494 // MDNode.
2495 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2496 if (StrVal[i] != '\n') continue;
2497 SourceLocation LineLoc = Str->getLocationOfByte(
2498 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2499 Locs.push_back(llvm::ConstantAsMetadata::get(
2500 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2501 }
2502 }
2503
2504 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2505}
2506
2507static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2508 bool HasUnwindClobber, bool ReadOnly,
2509 bool ReadNone, bool NoMerge, bool NoConvergent,
2510 const AsmStmt &S,
2511 const std::vector<llvm::Type *> &ResultRegTypes,
2512 const std::vector<llvm::Type *> &ArgElemTypes,
2513 CodeGenFunction &CGF,
2514 std::vector<llvm::Value *> &RegResults) {
2515 if (!HasUnwindClobber)
2516 Result.addFnAttr(llvm::Attribute::NoUnwind);
2517
2518 if (NoMerge)
2519 Result.addFnAttr(llvm::Attribute::NoMerge);
2520 // Attach readnone and readonly attributes.
2521 if (!HasSideEffect) {
2522 if (ReadNone)
2523 Result.setDoesNotAccessMemory();
2524 else if (ReadOnly)
2525 Result.setOnlyReadsMemory();
2526 }
2527
2528 // Add elementtype attribute for indirect constraints.
2529 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2530 if (Pair.value()) {
2531 auto Attr = llvm::Attribute::get(
2532 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2533 Result.addParamAttr(Pair.index(), Attr);
2534 }
2535 }
2536
2537 // Slap the source location of the inline asm into a !srcloc metadata on the
2538 // call.
2539 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2540 Result.setMetadata("srcloc",
2541 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2542 else {
2543 // At least put the line number on MS inline asm blobs.
2544 llvm::Constant *Loc =
2545 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2546 Result.setMetadata("srcloc",
2547 llvm::MDNode::get(CGF.getLLVMContext(),
2548 llvm::ConstantAsMetadata::get(Loc)));
2549 }
2550
2551 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2552 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2553 // convergent (meaning, they may call an intrinsically convergent op, such
2554 // as bar.sync, and so can't have certain optimizations applied around
2555 // them) unless it's explicitly marked 'noconvergent'.
2556 Result.addFnAttr(llvm::Attribute::Convergent);
2557 // Extract all of the register value results from the asm.
2558 if (ResultRegTypes.size() == 1) {
2559 RegResults.push_back(&Result);
2560 } else {
2561 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2562 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2563 RegResults.push_back(Tmp);
2564 }
2565 }
2566}
2567
2568static void
2570 const llvm::ArrayRef<llvm::Value *> RegResults,
2571 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2572 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2573 const llvm::ArrayRef<LValue> ResultRegDests,
2574 const llvm::ArrayRef<QualType> ResultRegQualTys,
2575 const llvm::BitVector &ResultTypeRequiresCast,
2576 const llvm::BitVector &ResultRegIsFlagReg) {
2578 CodeGenModule &CGM = CGF.CGM;
2579 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2580
2581 assert(RegResults.size() == ResultRegTypes.size());
2582 assert(RegResults.size() == ResultTruncRegTypes.size());
2583 assert(RegResults.size() == ResultRegDests.size());
2584 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2585 // in which case its size may grow.
2586 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2587 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2588
2589 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2590 llvm::Value *Tmp = RegResults[i];
2591 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2592
2593 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2594 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2595 // value.
2596 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2597 llvm::Value *IsBooleanValue =
2598 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2599 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2600 Builder.CreateCall(FnAssume, IsBooleanValue);
2601 }
2602
2603 // If the result type of the LLVM IR asm doesn't match the result type of
2604 // the expression, do the conversion.
2605 if (ResultRegTypes[i] != TruncTy) {
2606
2607 // Truncate the integer result to the right size, note that TruncTy can be
2608 // a pointer.
2609 if (TruncTy->isFloatingPointTy())
2610 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2611 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2612 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2613 Tmp = Builder.CreateTrunc(
2614 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2615 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2616 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2617 uint64_t TmpSize =
2618 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2619 Tmp = Builder.CreatePtrToInt(
2620 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2621 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2622 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2623 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2624 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2625 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2626 }
2627 }
2628
2629 LValue Dest = ResultRegDests[i];
2630 // ResultTypeRequiresCast elements correspond to the first
2631 // ResultTypeRequiresCast.size() elements of RegResults.
2632 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2633 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2634 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2635 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2636 Builder.CreateStore(Tmp, A);
2637 continue;
2638 }
2639
2640 QualType Ty =
2641 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2642 if (Ty.isNull()) {
2643 const Expr *OutExpr = S.getOutputExpr(i);
2644 CGM.getDiags().Report(OutExpr->getExprLoc(),
2645 diag::err_store_value_to_reg);
2646 return;
2647 }
2648 Dest = CGF.MakeAddrLValue(A, Ty);
2649 }
2650 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2651 }
2652}
2653
2655 const AsmStmt &S) {
2656 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2657
2658 StringRef Asm;
2659 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2660 Asm = GCCAsm->getAsmString()->getString();
2661
2662 auto &Ctx = CGF->CGM.getLLVMContext();
2663
2664 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2665 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2666 {StrTy->getType()}, false);
2667 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2668
2669 CGF->Builder.CreateCall(UBF, {StrTy});
2670}
2671
2673 // Pop all cleanup blocks at the end of the asm statement.
2674 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2675
2676 // Assemble the final asm string.
2677 std::string AsmString = S.generateAsmString(getContext());
2678
2679 // Get all the output and input constraints together.
2680 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2681 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2682
2683 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2684 bool IsValidTargetAsm = true;
2685 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2686 StringRef Name;
2687 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2688 Name = GAS->getOutputName(i);
2689 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2690 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2691 if (IsHipStdPar && !IsValid)
2692 IsValidTargetAsm = false;
2693 else
2694 assert(IsValid && "Failed to parse output constraint");
2695 OutputConstraintInfos.push_back(Info);
2696 }
2697
2698 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2699 StringRef Name;
2700 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2701 Name = GAS->getInputName(i);
2702 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2703 bool IsValid =
2704 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2705 if (IsHipStdPar && !IsValid)
2706 IsValidTargetAsm = false;
2707 else
2708 assert(IsValid && "Failed to parse input constraint");
2709 InputConstraintInfos.push_back(Info);
2710 }
2711
2712 if (!IsValidTargetAsm)
2713 return EmitHipStdParUnsupportedAsm(this, S);
2714
2715 std::string Constraints;
2716
2717 std::vector<LValue> ResultRegDests;
2718 std::vector<QualType> ResultRegQualTys;
2719 std::vector<llvm::Type *> ResultRegTypes;
2720 std::vector<llvm::Type *> ResultTruncRegTypes;
2721 std::vector<llvm::Type *> ArgTypes;
2722 std::vector<llvm::Type *> ArgElemTypes;
2723 std::vector<llvm::Value*> Args;
2724 llvm::BitVector ResultTypeRequiresCast;
2725 llvm::BitVector ResultRegIsFlagReg;
2726
2727 // Keep track of inout constraints.
2728 std::string InOutConstraints;
2729 std::vector<llvm::Value*> InOutArgs;
2730 std::vector<llvm::Type*> InOutArgTypes;
2731 std::vector<llvm::Type*> InOutArgElemTypes;
2732
2733 // Keep track of out constraints for tied input operand.
2734 std::vector<std::string> OutputConstraints;
2735
2736 // Keep track of defined physregs.
2737 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2738
2739 // An inline asm can be marked readonly if it meets the following conditions:
2740 // - it doesn't have any sideeffects
2741 // - it doesn't clobber memory
2742 // - it doesn't return a value by-reference
2743 // It can be marked readnone if it doesn't have any input memory constraints
2744 // in addition to meeting the conditions listed above.
2745 bool ReadOnly = true, ReadNone = true;
2746
2747 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2748 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2749
2750 // Simplify the output constraint.
2751 std::string OutputConstraint(S.getOutputConstraint(i));
2752 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2753 getTarget(), &OutputConstraintInfos);
2754
2755 const Expr *OutExpr = S.getOutputExpr(i);
2756 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2757
2758 std::string GCCReg;
2759 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2760 getTarget(), CGM, S,
2761 Info.earlyClobber(),
2762 &GCCReg);
2763 // Give an error on multiple outputs to same physreg.
2764 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2765 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2766
2767 OutputConstraints.push_back(OutputConstraint);
2768 LValue Dest = EmitLValue(OutExpr);
2769 if (!Constraints.empty())
2770 Constraints += ',';
2771
2772 // If this is a register output, then make the inline asm return it
2773 // by-value. If this is a memory result, return the value by-reference.
2774 QualType QTy = OutExpr->getType();
2775 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2777 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2778
2779 Constraints += "=" + OutputConstraint;
2780 ResultRegQualTys.push_back(QTy);
2781 ResultRegDests.push_back(Dest);
2782
2783 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2784 ResultRegIsFlagReg.push_back(IsFlagReg);
2785
2786 llvm::Type *Ty = ConvertTypeForMem(QTy);
2787 const bool RequiresCast = Info.allowsRegister() &&
2789 Ty->isAggregateType());
2790
2791 ResultTruncRegTypes.push_back(Ty);
2792 ResultTypeRequiresCast.push_back(RequiresCast);
2793
2794 if (RequiresCast) {
2795 unsigned Size = getContext().getTypeSize(QTy);
2796 if (Size)
2797 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2798 else
2799 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2800 }
2801 ResultRegTypes.push_back(Ty);
2802 // If this output is tied to an input, and if the input is larger, then
2803 // we need to set the actual result type of the inline asm node to be the
2804 // same as the input type.
2805 if (Info.hasMatchingInput()) {
2806 unsigned InputNo;
2807 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2808 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2809 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2810 break;
2811 }
2812 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2813
2814 QualType InputTy = S.getInputExpr(InputNo)->getType();
2815 QualType OutputType = OutExpr->getType();
2816
2817 uint64_t InputSize = getContext().getTypeSize(InputTy);
2818 if (getContext().getTypeSize(OutputType) < InputSize) {
2819 // Form the asm to return the value as a larger integer or fp type.
2820 ResultRegTypes.back() = ConvertType(InputTy);
2821 }
2822 }
2823 if (llvm::Type* AdjTy =
2824 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2825 ResultRegTypes.back()))
2826 ResultRegTypes.back() = AdjTy;
2827 else {
2828 CGM.getDiags().Report(S.getAsmLoc(),
2829 diag::err_asm_invalid_type_in_input)
2830 << OutExpr->getType() << OutputConstraint;
2831 }
2832
2833 // Update largest vector width for any vector types.
2834 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2835 LargestVectorWidth =
2836 std::max((uint64_t)LargestVectorWidth,
2837 VT->getPrimitiveSizeInBits().getKnownMinValue());
2838 } else {
2839 Address DestAddr = Dest.getAddress();
2840 // Matrix types in memory are represented by arrays, but accessed through
2841 // vector pointers, with the alignment specified on the access operation.
2842 // For inline assembly, update pointer arguments to use vector pointers.
2843 // Otherwise there will be a mis-match if the matrix is also an
2844 // input-argument which is represented as vector.
2845 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2846 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2847
2848 ArgTypes.push_back(DestAddr.getType());
2849 ArgElemTypes.push_back(DestAddr.getElementType());
2850 Args.push_back(DestAddr.emitRawPointer(*this));
2851 Constraints += "=*";
2852 Constraints += OutputConstraint;
2853 ReadOnly = ReadNone = false;
2854 }
2855
2856 if (Info.isReadWrite()) {
2857 InOutConstraints += ',';
2858
2859 const Expr *InputExpr = S.getOutputExpr(i);
2860 llvm::Value *Arg;
2861 llvm::Type *ArgElemType;
2862 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2863 Info, Dest, InputExpr->getType(), InOutConstraints,
2864 InputExpr->getExprLoc());
2865
2866 if (llvm::Type* AdjTy =
2867 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2868 Arg->getType()))
2869 Arg = Builder.CreateBitCast(Arg, AdjTy);
2870
2871 // Update largest vector width for any vector types.
2872 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2873 LargestVectorWidth =
2874 std::max((uint64_t)LargestVectorWidth,
2875 VT->getPrimitiveSizeInBits().getKnownMinValue());
2876 // Only tie earlyclobber physregs.
2877 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2878 InOutConstraints += llvm::utostr(i);
2879 else
2880 InOutConstraints += OutputConstraint;
2881
2882 InOutArgTypes.push_back(Arg->getType());
2883 InOutArgElemTypes.push_back(ArgElemType);
2884 InOutArgs.push_back(Arg);
2885 }
2886 }
2887
2888 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2889 // to the return value slot. Only do this when returning in registers.
2890 if (isa<MSAsmStmt>(&S)) {
2891 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2892 if (RetAI.isDirect() || RetAI.isExtend()) {
2893 // Make a fake lvalue for the return value slot.
2896 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2897 ResultRegDests, AsmString, S.getNumOutputs());
2898 SawAsmBlock = true;
2899 }
2900 }
2901
2902 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2903 const Expr *InputExpr = S.getInputExpr(i);
2904
2905 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2906
2907 if (Info.allowsMemory())
2908 ReadNone = false;
2909
2910 if (!Constraints.empty())
2911 Constraints += ',';
2912
2913 // Simplify the input constraint.
2914 std::string InputConstraint(S.getInputConstraint(i));
2915 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2916 &OutputConstraintInfos);
2917
2918 InputConstraint = AddVariableConstraints(
2919 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2920 getTarget(), CGM, S, false /* No EarlyClobber */);
2921
2922 std::string ReplaceConstraint (InputConstraint);
2923 llvm::Value *Arg;
2924 llvm::Type *ArgElemType;
2925 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2926
2927 // If this input argument is tied to a larger output result, extend the
2928 // input to be the same size as the output. The LLVM backend wants to see
2929 // the input and output of a matching constraint be the same size. Note
2930 // that GCC does not define what the top bits are here. We use zext because
2931 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2932 if (Info.hasTiedOperand()) {
2933 unsigned Output = Info.getTiedOperand();
2934 QualType OutputType = S.getOutputExpr(Output)->getType();
2935 QualType InputTy = InputExpr->getType();
2936
2937 if (getContext().getTypeSize(OutputType) >
2938 getContext().getTypeSize(InputTy)) {
2939 // Use ptrtoint as appropriate so that we can do our extension.
2940 if (isa<llvm::PointerType>(Arg->getType()))
2941 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2942 llvm::Type *OutputTy = ConvertType(OutputType);
2943 if (isa<llvm::IntegerType>(OutputTy))
2944 Arg = Builder.CreateZExt(Arg, OutputTy);
2945 else if (isa<llvm::PointerType>(OutputTy))
2946 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2947 else if (OutputTy->isFloatingPointTy())
2948 Arg = Builder.CreateFPExt(Arg, OutputTy);
2949 }
2950 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2951 ReplaceConstraint = OutputConstraints[Output];
2952 }
2953 if (llvm::Type* AdjTy =
2954 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2955 Arg->getType()))
2956 Arg = Builder.CreateBitCast(Arg, AdjTy);
2957 else
2958 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2959 << InputExpr->getType() << InputConstraint;
2960
2961 // Update largest vector width for any vector types.
2962 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2963 LargestVectorWidth =
2964 std::max((uint64_t)LargestVectorWidth,
2965 VT->getPrimitiveSizeInBits().getKnownMinValue());
2966
2967 ArgTypes.push_back(Arg->getType());
2968 ArgElemTypes.push_back(ArgElemType);
2969 Args.push_back(Arg);
2970 Constraints += InputConstraint;
2971 }
2972
2973 // Append the "input" part of inout constraints.
2974 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2975 ArgTypes.push_back(InOutArgTypes[i]);
2976 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2977 Args.push_back(InOutArgs[i]);
2978 }
2979 Constraints += InOutConstraints;
2980
2981 // Labels
2983 llvm::BasicBlock *Fallthrough = nullptr;
2984 bool IsGCCAsmGoto = false;
2985 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2986 IsGCCAsmGoto = GS->isAsmGoto();
2987 if (IsGCCAsmGoto) {
2988 for (const auto *E : GS->labels()) {
2989 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2990 Transfer.push_back(Dest.getBlock());
2991 if (!Constraints.empty())
2992 Constraints += ',';
2993 Constraints += "!i";
2994 }
2995 Fallthrough = createBasicBlock("asm.fallthrough");
2996 }
2997 }
2998
2999 bool HasUnwindClobber = false;
3000
3001 // Clobbers
3002 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3003 StringRef Clobber = S.getClobber(i);
3004
3005 if (Clobber == "memory")
3006 ReadOnly = ReadNone = false;
3007 else if (Clobber == "unwind") {
3008 HasUnwindClobber = true;
3009 continue;
3010 } else if (Clobber != "cc") {
3011 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3012 if (CGM.getCodeGenOpts().StackClashProtector &&
3013 getTarget().isSPRegName(Clobber)) {
3014 CGM.getDiags().Report(S.getAsmLoc(),
3015 diag::warn_stack_clash_protection_inline_asm);
3016 }
3017 }
3018
3019 if (isa<MSAsmStmt>(&S)) {
3020 if (Clobber == "eax" || Clobber == "edx") {
3021 if (Constraints.find("=&A") != std::string::npos)
3022 continue;
3023 std::string::size_type position1 =
3024 Constraints.find("={" + Clobber.str() + "}");
3025 if (position1 != std::string::npos) {
3026 Constraints.insert(position1 + 1, "&");
3027 continue;
3028 }
3029 std::string::size_type position2 = Constraints.find("=A");
3030 if (position2 != std::string::npos) {
3031 Constraints.insert(position2 + 1, "&");
3032 continue;
3033 }
3034 }
3035 }
3036 if (!Constraints.empty())
3037 Constraints += ',';
3038
3039 Constraints += "~{";
3040 Constraints += Clobber;
3041 Constraints += '}';
3042 }
3043
3044 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3045 "unwind clobber can't be used with asm goto");
3046
3047 // Add machine specific clobbers
3048 std::string_view MachineClobbers = getTarget().getClobbers();
3049 if (!MachineClobbers.empty()) {
3050 if (!Constraints.empty())
3051 Constraints += ',';
3052 Constraints += MachineClobbers;
3053 }
3054
3055 llvm::Type *ResultType;
3056 if (ResultRegTypes.empty())
3057 ResultType = VoidTy;
3058 else if (ResultRegTypes.size() == 1)
3059 ResultType = ResultRegTypes[0];
3060 else
3061 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3062
3063 llvm::FunctionType *FTy =
3064 llvm::FunctionType::get(ResultType, ArgTypes, false);
3065
3066 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3067
3068 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3069 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3070 ? llvm::InlineAsm::AD_ATT
3071 : llvm::InlineAsm::AD_Intel;
3072 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3073 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3074
3075 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3076 FTy, AsmString, Constraints, HasSideEffect,
3077 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3078 std::vector<llvm::Value*> RegResults;
3079 llvm::CallBrInst *CBR;
3080 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3081 CBRRegResults;
3082 if (IsGCCAsmGoto) {
3083 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3084 EmitBlock(Fallthrough);
3085 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3086 ReadNone, InNoMergeAttributedStmt,
3087 InNoConvergentAttributedStmt, S, ResultRegTypes,
3088 ArgElemTypes, *this, RegResults);
3089 // Because we are emitting code top to bottom, we don't have enough
3090 // information at this point to know precisely whether we have a critical
3091 // edge. If we have outputs, split all indirect destinations.
3092 if (!RegResults.empty()) {
3093 unsigned i = 0;
3094 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3095 llvm::Twine SynthName = Dest->getName() + ".split";
3096 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3097 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3098 Builder.SetInsertPoint(SynthBB);
3099
3100 if (ResultRegTypes.size() == 1) {
3101 CBRRegResults[SynthBB].push_back(CBR);
3102 } else {
3103 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3104 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3105 CBRRegResults[SynthBB].push_back(Tmp);
3106 }
3107 }
3108
3109 EmitBranch(Dest);
3110 EmitBlock(SynthBB);
3111 CBR->setIndirectDest(i++, SynthBB);
3112 }
3113 }
3114 } else if (HasUnwindClobber) {
3115 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3116 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3117 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3118 InNoConvergentAttributedStmt, S, ResultRegTypes,
3119 ArgElemTypes, *this, RegResults);
3120 } else {
3121 llvm::CallInst *Result =
3122 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3123 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3124 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3125 InNoConvergentAttributedStmt, S, ResultRegTypes,
3126 ArgElemTypes, *this, RegResults);
3127 }
3128
3129 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3130 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3131 ResultRegIsFlagReg);
3132
3133 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3134 // different insertion point; one for each indirect destination and with
3135 // CBRRegResults rather than RegResults.
3136 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3137 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3138 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3139 Builder.SetInsertPoint(Succ, --(Succ->end()));
3140 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3141 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3142 ResultTypeRequiresCast, ResultRegIsFlagReg);
3143 }
3144 }
3145}
3146
3148 const RecordDecl *RD = S.getCapturedRecordDecl();
3149 QualType RecordTy = getContext().getRecordType(RD);
3150
3151 // Initialize the captured struct.
3152 LValue SlotLV =
3153 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3154
3155 RecordDecl::field_iterator CurField = RD->field_begin();
3156 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3157 E = S.capture_init_end();
3158 I != E; ++I, ++CurField) {
3159 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3160 if (CurField->hasCapturedVLAType()) {
3161 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3162 } else {
3163 EmitInitializerForField(*CurField, LV, *I);
3164 }
3165 }
3166
3167 return SlotLV;
3168}
3169
3170/// Generate an outlined function for the body of a CapturedStmt, store any
3171/// captured variables into the captured struct, and call the outlined function.
3172llvm::Function *
3174 LValue CapStruct = InitCapturedStruct(S);
3175
3176 // Emit the CapturedDecl
3177 CodeGenFunction CGF(CGM, true);
3178 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3179 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3180 delete CGF.CapturedStmtInfo;
3181
3182 // Emit call to the helper function.
3183 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3184
3185 return F;
3186}
3187
3189 LValue CapStruct = InitCapturedStruct(S);
3190 return CapStruct.getAddress();
3191}
3192
3193/// Creates the outlined function for a CapturedStmt.
3194llvm::Function *
3196 assert(CapturedStmtInfo &&
3197 "CapturedStmtInfo should be set when generating the captured function");
3198 const CapturedDecl *CD = S.getCapturedDecl();
3199 const RecordDecl *RD = S.getCapturedRecordDecl();
3200 SourceLocation Loc = S.getBeginLoc();
3201 assert(CD->hasBody() && "missing CapturedDecl body");
3202
3203 // Build the argument list.
3204 ASTContext &Ctx = CGM.getContext();
3205 FunctionArgList Args;
3206 Args.append(CD->param_begin(), CD->param_end());
3207
3208 // Create the function declaration.
3209 const CGFunctionInfo &FuncInfo =
3211 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3212
3213 llvm::Function *F =
3214 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3216 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3217 if (CD->isNothrow())
3218 F->addFnAttr(llvm::Attribute::NoUnwind);
3219
3220 // Generate the function.
3221 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3222 CD->getBody()->getBeginLoc());
3223 // Set the context parameter in CapturedStmtInfo.
3224 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3226
3227 // Initialize variable-length arrays.
3230 for (auto *FD : RD->fields()) {
3231 if (FD->hasCapturedVLAType()) {
3232 auto *ExprArg =
3233 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3234 .getScalarVal();
3235 auto VAT = FD->getCapturedVLAType();
3236 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3237 }
3238 }
3239
3240 // If 'this' is captured, load it into CXXThisValue.
3243 LValue ThisLValue = EmitLValueForField(Base, FD);
3244 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3245 }
3246
3247 PGO.assignRegionCounters(GlobalDecl(CD), F);
3248 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3250
3251 return F;
3252}
3253
3254// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3255// std::nullptr otherwise.
3256static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3257 for (auto &I : *BB) {
3258 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3259 return CI;
3260 }
3261 return nullptr;
3262}
3263
3264llvm::CallBase *
3265CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3266 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3267 assert(ParentToken);
3268
3269 llvm::Value *bundleArgs[] = {ParentToken};
3270 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3271 auto *Output = llvm::CallBase::addOperandBundle(
3272 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3273 Input->replaceAllUsesWith(Output);
3274 Input->eraseFromParent();
3275 return Output;
3276}
3277
3278llvm::ConvergenceControlInst *
3279CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3280 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3281 if (BB->empty())
3282 Builder.SetInsertPoint(BB);
3283 else
3284 Builder.SetInsertPoint(BB->getFirstInsertionPt());
3285
3286 llvm::CallBase *CB = Builder.CreateIntrinsic(
3287 llvm::Intrinsic::experimental_convergence_loop, {}, {});
3288 Builder.restoreIP(IP);
3289
3290 CB = addConvergenceControlToken(CB);
3291 return cast<llvm::ConvergenceControlInst>(CB);
3292}
3293
3294llvm::ConvergenceControlInst *
3295CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3296 llvm::BasicBlock *BB = &F->getEntryBlock();
3297 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3298 if (Token)
3299 return Token;
3300
3301 // Adding a convergence token requires the function to be marked as
3302 // convergent.
3303 F->setConvergent();
3304
3305 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3306 Builder.SetInsertPoint(&BB->front());
3307 llvm::CallBase *I = Builder.CreateIntrinsic(
3308 llvm::Intrinsic::experimental_convergence_entry, {}, {});
3309 assert(isa<llvm::IntrinsicInst>(I));
3310 Builder.restoreIP(IP);
3311
3312 return cast<llvm::ConvergenceControlInst>(I);
3313}
#define V(N, I)
Definition: ASTContext.h:3443
#define SM(sm)
Definition: Cuda.cpp:84
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2386
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2057
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition: CGStmt.cpp:3256
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2654
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2111
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2480
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2333
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1482
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1902
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2569
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:1015
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1901
@ CSFC_Failure
Definition: CGStmt.cpp:1901
@ CSFC_Success
Definition: CGStmt.cpp:1901
@ CSFC_FallThrough
Definition: CGStmt.cpp:1901
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2507
const Decl * D
Expr * E
llvm::MachO::Target Target
Definition: MachO.h:51
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:758
VarDecl * Variable
Definition: SemaObjC.cpp:757
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:953
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2482
CanQualType VoidTy
Definition: ASTContext.h:1160
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3127
Attr - This represents one attribute.
Definition: Attr.h:43
Represents an attribute applied to a statement.
Definition: Stmt.h:2107
BreakStmt - This represents a break.
Definition: Stmt.h:3007
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
Expr * getCallee()
Definition: Expr.h:3024
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4673
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4735
bool isNothrow() const
Definition: Decl.cpp:5466
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4752
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4750
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5463
This captures a statement into a function.
Definition: Stmt.h:3784
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3948
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1429
CaseStmt - Represent a case statement.
Definition: Stmt.h:1828
Stmt * getSubStmt()
Definition: Stmt.h:1945
Expr * getLHS()
Definition: Stmt.h:1915
Expr * getRHS()
Definition: Stmt.h:1927
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:858
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:915
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:164
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:720
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitGotoStmt(const GotoStmt &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitIfStmt(const IfStmt &S)
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void EmitOMPReverseDirective(const OMPReverseDirective &S)
static bool hasScalarEvaluationKind(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
const LangOptions & getLangOpts() const
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
void EmitContinueStmt(const ContinueStmt &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitAttributedStmt(const AttributedStmt &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
const TargetInfo & getTarget() const
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitLabelStmt(const LabelStmt &S)
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void EmitSwitchStmt(const SwitchStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPInteropDirective(const OMPInteropDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
void EmitDeclStmt(const DeclStmt &S)
void EmitOMPScopeDirective(const OMPScopeDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
void EmitOMPTileDirective(const OMPTileDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
llvm::Type * ConvertType(QualType T)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPSimdDirective(const OMPSimdDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
void EmitOMPForDirective(const OMPForDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBreakStmt(const BreakStmt &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
const CGFunctionInfo * CurFnInfo
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
llvm::LLVMContext & getLLVMContext()
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitAsmStmt(const AsmStmt &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1630
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition: CGValue.h:361
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:834
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:204
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:198
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1628
Stmt *const * const_body_iterator
Definition: Stmt.h:1700
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
ContinueStmt - This represents a continue.
Definition: Stmt.h:2977
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2369
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
ValueDecl * getDecl()
Definition: Expr.h:1333
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1519
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1064
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1082
SourceLocation getLocation() const
Definition: DeclBase.h:442
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1493
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2752
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3117
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3086
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3587
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3033
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2808
const Expr * getSubExpr() const
Definition: Expr.h:1057
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4321
CallingConv getCallConv() const
Definition: Type.h:4654
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3286
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2889
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2165
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2928
Represents the declaration of a label.
Definition: Decl.h:503
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2058
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:697
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
QualType getCanonicalType() const
Definition: Type.h:7983
The collection of all-type qualifiers we support.
Definition: Type.h:324
Represents a struct/union/class.
Definition: Decl.h:4148
field_range fields() const
Definition: Decl.h:4354
field_iterator field_begin() const
Definition: Decl.cpp:5092
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3046
Expr * getRetValue()
Definition: Stmt.h:3077
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1380
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1323
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1324
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1325
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1327
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:170
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:345
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:162
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1959
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1325
StringRef getString() const
Definition: Expr.h:1855
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1801
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2415
Exposes information about the current target.
Definition: TargetInfo.h:220
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:839
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:701
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:742
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:8510
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8800
bool isReferenceType() const
Definition: Type.h:8204
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
Represents a variable declaration or definition.
Definition: Decl.h:882
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1119
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2611
Defines the clang::TargetInfo interface.
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:654
bool Ret(InterpState &S, CodePtr &PC)
Definition: Interp.h:318
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
@ CC_SwiftAsync
Definition: Specifiers.h:294
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1131
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1138