clang  10.0.0svn
CGStmt.cpp
Go to the documentation of this file.
1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGDebugInfo.h"
15 #include "CodeGenModule.h"
16 #include "TargetInfo.h"
17 #include "clang/AST/StmtVisitor.h"
18 #include "clang/Basic/Builtins.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/InlineAsm.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/MDBuilder.h"
26 
27 using namespace clang;
28 using namespace CodeGen;
29 
30 //===----------------------------------------------------------------------===//
31 // Statement Emission
32 //===----------------------------------------------------------------------===//
33 
35  if (CGDebugInfo *DI = getDebugInfo()) {
36  SourceLocation Loc;
37  Loc = S->getBeginLoc();
38  DI->EmitLocation(Builder, Loc);
39 
40  LastStopPoint = Loc;
41  }
42 }
43 
45  assert(S && "Null statement?");
46  PGO.setCurrentStmt(S);
47 
48  // These statements have their own debug info handling.
49  if (EmitSimpleStmt(S))
50  return;
51 
52  // Check if we are generating unreachable code.
53  if (!HaveInsertPoint()) {
54  // If so, and the statement doesn't contain a label, then we do not need to
55  // generate actual code. This is safe because (1) the current point is
56  // unreachable, so we don't need to execute the code, and (2) we've already
57  // handled the statements which update internal data structures (like the
58  // local variable map) which could be used by subsequent statements.
59  if (!ContainsLabel(S)) {
60  // Verify that any decl statements were handled as simple, they may be in
61  // scope of subsequent reachable statements.
62  assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
63  return;
64  }
65 
66  // Otherwise, make a new block to hold the code.
68  }
69 
70  // Generate a stoppoint if we are emitting debug info.
71  EmitStopPoint(S);
72 
73  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
74  // enabled.
75  if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
76  if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
78  return;
79  }
80  }
81 
82  switch (S->getStmtClass()) {
83  case Stmt::NoStmtClass:
84  case Stmt::CXXCatchStmtClass:
85  case Stmt::SEHExceptStmtClass:
86  case Stmt::SEHFinallyStmtClass:
87  case Stmt::MSDependentExistsStmtClass:
88  llvm_unreachable("invalid statement class to emit generically");
89  case Stmt::NullStmtClass:
90  case Stmt::CompoundStmtClass:
91  case Stmt::DeclStmtClass:
92  case Stmt::LabelStmtClass:
93  case Stmt::AttributedStmtClass:
94  case Stmt::GotoStmtClass:
95  case Stmt::BreakStmtClass:
96  case Stmt::ContinueStmtClass:
97  case Stmt::DefaultStmtClass:
98  case Stmt::CaseStmtClass:
99  case Stmt::SEHLeaveStmtClass:
100  llvm_unreachable("should have emitted these statements as simple");
101 
102 #define STMT(Type, Base)
103 #define ABSTRACT_STMT(Op)
104 #define EXPR(Type, Base) \
105  case Stmt::Type##Class:
106 #include "clang/AST/StmtNodes.inc"
107  {
108  // Remember the block we came in on.
109  llvm::BasicBlock *incoming = Builder.GetInsertBlock();
110  assert(incoming && "expression emission must have an insertion point");
111 
112  EmitIgnoredExpr(cast<Expr>(S));
113 
114  llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
115  assert(outgoing && "expression emission cleared block!");
116 
117  // The expression emitters assume (reasonably!) that the insertion
118  // point is always set. To maintain that, the call-emission code
119  // for noreturn functions has to enter a new block with no
120  // predecessors. We want to kill that block and mark the current
121  // insertion point unreachable in the common case of a call like
122  // "exit();". Since expression emission doesn't otherwise create
123  // blocks with no predecessors, we can just test for that.
124  // However, we must be careful not to do this to our incoming
125  // block, because *statement* emission does sometimes create
126  // reachable blocks which will have no predecessors until later in
127  // the function. This occurs with, e.g., labels that are not
128  // reachable by fallthrough.
129  if (incoming != outgoing && outgoing->use_empty()) {
130  outgoing->eraseFromParent();
131  Builder.ClearInsertionPoint();
132  }
133  break;
134  }
135 
136  case Stmt::IndirectGotoStmtClass:
137  EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
138 
139  case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
140  case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
141  case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
142  case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
143 
144  case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
145 
146  case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
147  case Stmt::GCCAsmStmtClass: // Intentional fall-through.
148  case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
149  case Stmt::CoroutineBodyStmtClass:
150  EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
151  break;
152  case Stmt::CoreturnStmtClass:
153  EmitCoreturnStmt(cast<CoreturnStmt>(*S));
154  break;
155  case Stmt::CapturedStmtClass: {
156  const CapturedStmt *CS = cast<CapturedStmt>(S);
158  }
159  break;
160  case Stmt::ObjCAtTryStmtClass:
161  EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
162  break;
163  case Stmt::ObjCAtCatchStmtClass:
164  llvm_unreachable(
165  "@catch statements should be handled by EmitObjCAtTryStmt");
166  case Stmt::ObjCAtFinallyStmtClass:
167  llvm_unreachable(
168  "@finally statements should be handled by EmitObjCAtTryStmt");
169  case Stmt::ObjCAtThrowStmtClass:
170  EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
171  break;
172  case Stmt::ObjCAtSynchronizedStmtClass:
173  EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
174  break;
175  case Stmt::ObjCForCollectionStmtClass:
176  EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
177  break;
178  case Stmt::ObjCAutoreleasePoolStmtClass:
179  EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
180  break;
181 
182  case Stmt::CXXTryStmtClass:
183  EmitCXXTryStmt(cast<CXXTryStmt>(*S));
184  break;
185  case Stmt::CXXForRangeStmtClass:
186  EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
187  break;
188  case Stmt::SEHTryStmtClass:
189  EmitSEHTryStmt(cast<SEHTryStmt>(*S));
190  break;
191  case Stmt::OMPParallelDirectiveClass:
192  EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
193  break;
194  case Stmt::OMPSimdDirectiveClass:
195  EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
196  break;
197  case Stmt::OMPForDirectiveClass:
198  EmitOMPForDirective(cast<OMPForDirective>(*S));
199  break;
200  case Stmt::OMPForSimdDirectiveClass:
201  EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
202  break;
203  case Stmt::OMPSectionsDirectiveClass:
204  EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
205  break;
206  case Stmt::OMPSectionDirectiveClass:
207  EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
208  break;
209  case Stmt::OMPSingleDirectiveClass:
210  EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
211  break;
212  case Stmt::OMPMasterDirectiveClass:
213  EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
214  break;
215  case Stmt::OMPCriticalDirectiveClass:
216  EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
217  break;
218  case Stmt::OMPParallelForDirectiveClass:
219  EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
220  break;
221  case Stmt::OMPParallelForSimdDirectiveClass:
222  EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
223  break;
224  case Stmt::OMPParallelSectionsDirectiveClass:
225  EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
226  break;
227  case Stmt::OMPTaskDirectiveClass:
228  EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
229  break;
230  case Stmt::OMPTaskyieldDirectiveClass:
231  EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
232  break;
233  case Stmt::OMPBarrierDirectiveClass:
234  EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
235  break;
236  case Stmt::OMPTaskwaitDirectiveClass:
237  EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
238  break;
239  case Stmt::OMPTaskgroupDirectiveClass:
240  EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
241  break;
242  case Stmt::OMPFlushDirectiveClass:
243  EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
244  break;
245  case Stmt::OMPOrderedDirectiveClass:
246  EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
247  break;
248  case Stmt::OMPAtomicDirectiveClass:
249  EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
250  break;
251  case Stmt::OMPTargetDirectiveClass:
252  EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
253  break;
254  case Stmt::OMPTeamsDirectiveClass:
255  EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
256  break;
257  case Stmt::OMPCancellationPointDirectiveClass:
258  EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
259  break;
260  case Stmt::OMPCancelDirectiveClass:
261  EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
262  break;
263  case Stmt::OMPTargetDataDirectiveClass:
264  EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
265  break;
266  case Stmt::OMPTargetEnterDataDirectiveClass:
267  EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
268  break;
269  case Stmt::OMPTargetExitDataDirectiveClass:
270  EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
271  break;
272  case Stmt::OMPTargetParallelDirectiveClass:
273  EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
274  break;
275  case Stmt::OMPTargetParallelForDirectiveClass:
276  EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
277  break;
278  case Stmt::OMPTaskLoopDirectiveClass:
279  EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
280  break;
281  case Stmt::OMPTaskLoopSimdDirectiveClass:
282  EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
283  break;
284  case Stmt::OMPDistributeDirectiveClass:
285  EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
286  break;
287  case Stmt::OMPTargetUpdateDirectiveClass:
288  EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
289  break;
290  case Stmt::OMPDistributeParallelForDirectiveClass:
292  cast<OMPDistributeParallelForDirective>(*S));
293  break;
294  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
296  cast<OMPDistributeParallelForSimdDirective>(*S));
297  break;
298  case Stmt::OMPDistributeSimdDirectiveClass:
299  EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
300  break;
301  case Stmt::OMPTargetParallelForSimdDirectiveClass:
303  cast<OMPTargetParallelForSimdDirective>(*S));
304  break;
305  case Stmt::OMPTargetSimdDirectiveClass:
306  EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
307  break;
308  case Stmt::OMPTeamsDistributeDirectiveClass:
309  EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
310  break;
311  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
313  cast<OMPTeamsDistributeSimdDirective>(*S));
314  break;
315  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
317  cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
318  break;
319  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
321  cast<OMPTeamsDistributeParallelForDirective>(*S));
322  break;
323  case Stmt::OMPTargetTeamsDirectiveClass:
324  EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
325  break;
326  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
328  cast<OMPTargetTeamsDistributeDirective>(*S));
329  break;
330  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
332  cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
333  break;
334  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
336  cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
337  break;
338  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
340  cast<OMPTargetTeamsDistributeSimdDirective>(*S));
341  break;
342  }
343 }
344 
346  switch (S->getStmtClass()) {
347  default: return false;
348  case Stmt::NullStmtClass: break;
349  case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
350  case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
351  case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
352  case Stmt::AttributedStmtClass:
353  EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
354  case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
355  case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
356  case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
357  case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
358  case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
359  case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
360  }
361 
362  return true;
363 }
364 
365 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
366 /// this captures the expression result of the last sub-statement and returns it
367 /// (for use by the statement expression extension).
369  AggValueSlot AggSlot) {
370  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
371  "LLVM IR generation of compound statement ('{}')");
372 
373  // Keep track of the current cleanup stack depth, including debug scopes.
374  LexicalScope Scope(*this, S.getSourceRange());
375 
376  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
377 }
378 
379 Address
381  bool GetLast,
382  AggValueSlot AggSlot) {
383 
384  const Stmt *ExprResult = S.getStmtExprResult();
385  assert((!GetLast || (GetLast && ExprResult)) &&
386  "If GetLast is true then the CompoundStmt must have a StmtExprResult");
387 
388  Address RetAlloca = Address::invalid();
389 
390  for (auto *CurStmt : S.body()) {
391  if (GetLast && ExprResult == CurStmt) {
392  // We have to special case labels here. They are statements, but when put
393  // at the end of a statement expression, they yield the value of their
394  // subexpression. Handle this by walking through all labels we encounter,
395  // emitting them before we evaluate the subexpr.
396  // Similar issues arise for attributed statements.
397  while (!isa<Expr>(ExprResult)) {
398  if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
399  EmitLabel(LS->getDecl());
400  ExprResult = LS->getSubStmt();
401  } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
402  // FIXME: Update this if we ever have attributes that affect the
403  // semantics of an expression.
404  ExprResult = AS->getSubStmt();
405  } else {
406  llvm_unreachable("unknown value statement");
407  }
408  }
409 
411 
412  const Expr *E = cast<Expr>(ExprResult);
413  QualType ExprTy = E->getType();
414  if (hasAggregateEvaluationKind(ExprTy)) {
415  EmitAggExpr(E, AggSlot);
416  } else {
417  // We can't return an RValue here because there might be cleanups at
418  // the end of the StmtExpr. Because of that, we have to emit the result
419  // here into a temporary alloca.
420  RetAlloca = CreateMemTemp(ExprTy);
421  EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
422  /*IsInit*/ false);
423  }
424  } else {
425  EmitStmt(CurStmt);
426  }
427  }
428 
429  return RetAlloca;
430 }
431 
432 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
433  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
434 
435  // If there is a cleanup stack, then we it isn't worth trying to
436  // simplify this block (we would need to remove it from the scope map
437  // and cleanup entry).
438  if (!EHStack.empty())
439  return;
440 
441  // Can only simplify direct branches.
442  if (!BI || !BI->isUnconditional())
443  return;
444 
445  // Can only simplify empty blocks.
446  if (BI->getIterator() != BB->begin())
447  return;
448 
449  BB->replaceAllUsesWith(BI->getSuccessor(0));
450  BI->eraseFromParent();
451  BB->eraseFromParent();
452 }
453 
454 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
455  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
456 
457  // Fall out of the current block (if necessary).
458  EmitBranch(BB);
459 
460  if (IsFinished && BB->use_empty()) {
461  delete BB;
462  return;
463  }
464 
465  // Place the block after the current block, if possible, or else at
466  // the end of the function.
467  if (CurBB && CurBB->getParent())
468  CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
469  else
470  CurFn->getBasicBlockList().push_back(BB);
471  Builder.SetInsertPoint(BB);
472 }
473 
474 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
475  // Emit a branch from the current block to the target one if this
476  // was a real block. If this was just a fall-through block after a
477  // terminator, don't emit it.
478  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
479 
480  if (!CurBB || CurBB->getTerminator()) {
481  // If there is no insert point or the previous block is already
482  // terminated, don't touch it.
483  } else {
484  // Otherwise, create a fall-through branch.
485  Builder.CreateBr(Target);
486  }
487 
488  Builder.ClearInsertionPoint();
489 }
490 
491 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
492  bool inserted = false;
493  for (llvm::User *u : block->users()) {
494  if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
495  CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
496  block);
497  inserted = true;
498  break;
499  }
500  }
501 
502  if (!inserted)
503  CurFn->getBasicBlockList().push_back(block);
504 
505  Builder.SetInsertPoint(block);
506 }
507 
510  JumpDest &Dest = LabelMap[D];
511  if (Dest.isValid()) return Dest;
512 
513  // Create, but don't insert, the new block.
514  Dest = JumpDest(createBasicBlock(D->getName()),
517  return Dest;
518 }
519 
521  // Add this label to the current lexical scope if we're within any
522  // normal cleanups. Jumps "in" to this label --- when permitted by
523  // the language --- may need to be routed around such cleanups.
524  if (EHStack.hasNormalCleanups() && CurLexicalScope)
525  CurLexicalScope->addLabel(D);
526 
527  JumpDest &Dest = LabelMap[D];
528 
529  // If we didn't need a forward reference to this label, just go
530  // ahead and create a destination at the current scope.
531  if (!Dest.isValid()) {
532  Dest = getJumpDestInCurrentScope(D->getName());
533 
534  // Otherwise, we need to give this label a target depth and remove
535  // it from the branch-fixups list.
536  } else {
537  assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
540  }
541 
542  EmitBlock(Dest.getBlock());
543 
544  // Emit debug info for labels.
545  if (CGDebugInfo *DI = getDebugInfo()) {
546  if (CGM.getCodeGenOpts().getDebugInfo() >=
548  DI->setLocation(D->getLocation());
549  DI->EmitLabel(D, Builder);
550  }
551  }
552 
554 }
555 
556 /// Change the cleanup scope of the labels in this lexical scope to
557 /// match the scope of the enclosing context.
559  assert(!Labels.empty());
560  EHScopeStack::stable_iterator innermostScope
561  = CGF.EHStack.getInnermostNormalCleanup();
562 
563  // Change the scope depth of all the labels.
565  i = Labels.begin(), e = Labels.end(); i != e; ++i) {
566  assert(CGF.LabelMap.count(*i));
567  JumpDest &dest = CGF.LabelMap.find(*i)->second;
568  assert(dest.getScopeDepth().isValid());
569  assert(innermostScope.encloses(dest.getScopeDepth()));
570  dest.setScopeDepth(innermostScope);
571  }
572 
573  // Reparent the labels if the new scope also has cleanups.
574  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
575  ParentScope->Labels.append(Labels.begin(), Labels.end());
576  }
577 }
578 
579 
581  EmitLabel(S.getDecl());
582  EmitStmt(S.getSubStmt());
583 }
584 
586  EmitStmt(S.getSubStmt(), S.getAttrs());
587 }
588 
590  // If this code is reachable then emit a stop point (if generating
591  // debug info). We have to do this ourselves because we are on the
592  // "simple" statement path.
593  if (HaveInsertPoint())
594  EmitStopPoint(&S);
595 
597 }
598 
599 
601  if (const LabelDecl *Target = S.getConstantTarget()) {
603  return;
604  }
605 
606  // Ensure that we have an i8* for our PHI node.
608  Int8PtrTy, "addr");
609  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
610 
611  // Get the basic block for the indirect goto.
612  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
613 
614  // The first instruction in the block has to be the PHI for the switch dest,
615  // add an entry for this branch.
616  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
617 
618  EmitBranch(IndGotoBB);
619 }
620 
622  // C99 6.8.4.1: The first substatement is executed if the expression compares
623  // unequal to 0. The condition must be a scalar type.
624  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
625 
626  if (S.getInit())
627  EmitStmt(S.getInit());
628 
629  if (S.getConditionVariable())
631 
632  // If the condition constant folds and can be elided, try to avoid emitting
633  // the condition and the dead arm of the if/else.
634  bool CondConstant;
635  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
636  S.isConstexpr())) {
637  // Figure out which block (then or else) is executed.
638  const Stmt *Executed = S.getThen();
639  const Stmt *Skipped = S.getElse();
640  if (!CondConstant) // Condition false?
641  std::swap(Executed, Skipped);
642 
643  // If the skipped block has no labels in it, just emit the executed block.
644  // This avoids emitting dead code and simplifies the CFG substantially.
645  if (S.isConstexpr() || !ContainsLabel(Skipped)) {
646  if (CondConstant)
648  if (Executed) {
649  RunCleanupsScope ExecutedScope(*this);
650  EmitStmt(Executed);
651  }
652  return;
653  }
654  }
655 
656  // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
657  // the conditional branch.
658  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
659  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
660  llvm::BasicBlock *ElseBlock = ContBlock;
661  if (S.getElse())
662  ElseBlock = createBasicBlock("if.else");
663 
664  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
665  getProfileCount(S.getThen()));
666 
667  // Emit the 'then' code.
668  EmitBlock(ThenBlock);
670  {
671  RunCleanupsScope ThenScope(*this);
672  EmitStmt(S.getThen());
673  }
674  EmitBranch(ContBlock);
675 
676  // Emit the 'else' code if present.
677  if (const Stmt *Else = S.getElse()) {
678  {
679  // There is no need to emit line number for an unconditional branch.
680  auto NL = ApplyDebugLocation::CreateEmpty(*this);
681  EmitBlock(ElseBlock);
682  }
683  {
684  RunCleanupsScope ElseScope(*this);
685  EmitStmt(Else);
686  }
687  {
688  // There is no need to emit line number for an unconditional branch.
689  auto NL = ApplyDebugLocation::CreateEmpty(*this);
690  EmitBranch(ContBlock);
691  }
692  }
693 
694  // Emit the continuation block for code after the if.
695  EmitBlock(ContBlock, true);
696 }
697 
699  ArrayRef<const Attr *> WhileAttrs) {
700  // Emit the header for the loop, which will also become
701  // the continue target.
702  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
703  EmitBlock(LoopHeader.getBlock());
704 
705  const SourceRange &R = S.getSourceRange();
706  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs,
707  SourceLocToDebugLoc(R.getBegin()),
708  SourceLocToDebugLoc(R.getEnd()));
709 
710  // Create an exit block for when the condition fails, which will
711  // also become the break target.
713 
714  // Store the blocks to use for break and continue.
715  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
716 
717  // C++ [stmt.while]p2:
718  // When the condition of a while statement is a declaration, the
719  // scope of the variable that is declared extends from its point
720  // of declaration (3.3.2) to the end of the while statement.
721  // [...]
722  // The object created in a condition is destroyed and created
723  // with each iteration of the loop.
724  RunCleanupsScope ConditionScope(*this);
725 
726  if (S.getConditionVariable())
728 
729  // Evaluate the conditional in the while header. C99 6.8.5.1: The
730  // evaluation of the controlling expression takes place before each
731  // execution of the loop body.
732  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
733 
734  // while(1) is common, avoid extra exit blocks. Be sure
735  // to correctly handle break/continue though.
736  bool EmitBoolCondBranch = true;
737  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
738  if (C->isOne())
739  EmitBoolCondBranch = false;
740 
741  // As long as the condition is true, go to the loop body.
742  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
743  if (EmitBoolCondBranch) {
744  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
745  if (ConditionScope.requiresCleanups())
746  ExitBlock = createBasicBlock("while.exit");
747  Builder.CreateCondBr(
748  BoolCondVal, LoopBody, ExitBlock,
749  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
750 
751  if (ExitBlock != LoopExit.getBlock()) {
752  EmitBlock(ExitBlock);
753  EmitBranchThroughCleanup(LoopExit);
754  }
755  }
756 
757  // Emit the loop body. We have to emit this in a cleanup scope
758  // because it might be a singleton DeclStmt.
759  {
760  RunCleanupsScope BodyScope(*this);
761  EmitBlock(LoopBody);
763  EmitStmt(S.getBody());
764  }
765 
766  BreakContinueStack.pop_back();
767 
768  // Immediately force cleanup.
769  ConditionScope.ForceCleanup();
770 
771  EmitStopPoint(&S);
772  // Branch to the loop header again.
773  EmitBranch(LoopHeader.getBlock());
774 
775  LoopStack.pop();
776 
777  // Emit the exit block.
778  EmitBlock(LoopExit.getBlock(), true);
779 
780  // The LoopHeader typically is just a branch if we skipped emitting
781  // a branch, try to erase it.
782  if (!EmitBoolCondBranch)
783  SimplifyForwardingBlocks(LoopHeader.getBlock());
784 }
785 
787  ArrayRef<const Attr *> DoAttrs) {
789  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
790 
791  uint64_t ParentCount = getCurrentProfileCount();
792 
793  // Store the blocks to use for break and continue.
794  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
795 
796  // Emit the body of the loop.
797  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
798 
799  EmitBlockWithFallThrough(LoopBody, &S);
800  {
801  RunCleanupsScope BodyScope(*this);
802  EmitStmt(S.getBody());
803  }
804 
805  EmitBlock(LoopCond.getBlock());
806 
807  const SourceRange &R = S.getSourceRange();
808  LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
809  SourceLocToDebugLoc(R.getBegin()),
810  SourceLocToDebugLoc(R.getEnd()));
811 
812  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
813  // after each execution of the loop body."
814 
815  // Evaluate the conditional in the while header.
816  // C99 6.8.5p2/p4: The first substatement is executed if the expression
817  // compares unequal to 0. The condition must be a scalar type.
818  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
819 
820  BreakContinueStack.pop_back();
821 
822  // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
823  // to correctly handle break/continue though.
824  bool EmitBoolCondBranch = true;
825  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
826  if (C->isZero())
827  EmitBoolCondBranch = false;
828 
829  // As long as the condition is true, iterate the loop.
830  if (EmitBoolCondBranch) {
831  uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
832  Builder.CreateCondBr(
833  BoolCondVal, LoopBody, LoopExit.getBlock(),
834  createProfileWeightsForLoop(S.getCond(), BackedgeCount));
835  }
836 
837  LoopStack.pop();
838 
839  // Emit the exit block.
840  EmitBlock(LoopExit.getBlock());
841 
842  // The DoCond block typically is just a branch if we skipped
843  // emitting a branch, try to erase it.
844  if (!EmitBoolCondBranch)
846 }
847 
849  ArrayRef<const Attr *> ForAttrs) {
851 
852  LexicalScope ForScope(*this, S.getSourceRange());
853 
854  // Evaluate the first part before the loop.
855  if (S.getInit())
856  EmitStmt(S.getInit());
857 
858  // Start the loop with a block that tests the condition.
859  // If there's an increment, the continue scope will be overwritten
860  // later.
861  JumpDest Continue = getJumpDestInCurrentScope("for.cond");
862  llvm::BasicBlock *CondBlock = Continue.getBlock();
863  EmitBlock(CondBlock);
864 
865  const SourceRange &R = S.getSourceRange();
866  LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
869 
870  // If the for loop doesn't have an increment we can just use the
871  // condition as the continue block. Otherwise we'll need to create
872  // a block for it (in the current scope, i.e. in the scope of the
873  // condition), and that we will become our continue block.
874  if (S.getInc())
875  Continue = getJumpDestInCurrentScope("for.inc");
876 
877  // Store the blocks to use for break and continue.
878  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
879 
880  // Create a cleanup scope for the condition variable cleanups.
881  LexicalScope ConditionScope(*this, S.getSourceRange());
882 
883  if (S.getCond()) {
884  // If the for statement has a condition scope, emit the local variable
885  // declaration.
886  if (S.getConditionVariable()) {
888  }
889 
890  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
891  // If there are any cleanups between here and the loop-exit scope,
892  // create a block to stage a loop exit along.
893  if (ForScope.requiresCleanups())
894  ExitBlock = createBasicBlock("for.cond.cleanup");
895 
896  // As long as the condition is true, iterate the loop.
897  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
898 
899  // C99 6.8.5p2/p4: The first substatement is executed if the expression
900  // compares unequal to 0. The condition must be a scalar type.
901  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
902  Builder.CreateCondBr(
903  BoolCondVal, ForBody, ExitBlock,
904  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
905 
906  if (ExitBlock != LoopExit.getBlock()) {
907  EmitBlock(ExitBlock);
908  EmitBranchThroughCleanup(LoopExit);
909  }
910 
911  EmitBlock(ForBody);
912  } else {
913  // Treat it as a non-zero constant. Don't even create a new block for the
914  // body, just fall into it.
915  }
917 
918  {
919  // Create a separate cleanup scope for the body, in case it is not
920  // a compound statement.
921  RunCleanupsScope BodyScope(*this);
922  EmitStmt(S.getBody());
923  }
924 
925  // If there is an increment, emit it next.
926  if (S.getInc()) {
927  EmitBlock(Continue.getBlock());
928  EmitStmt(S.getInc());
929  }
930 
931  BreakContinueStack.pop_back();
932 
933  ConditionScope.ForceCleanup();
934 
935  EmitStopPoint(&S);
936  EmitBranch(CondBlock);
937 
938  ForScope.ForceCleanup();
939 
940  LoopStack.pop();
941 
942  // Emit the fall-through block.
943  EmitBlock(LoopExit.getBlock(), true);
944 }
945 
946 void
948  ArrayRef<const Attr *> ForAttrs) {
950 
951  LexicalScope ForScope(*this, S.getSourceRange());
952 
953  // Evaluate the first pieces before the loop.
954  if (S.getInit())
955  EmitStmt(S.getInit());
956  EmitStmt(S.getRangeStmt());
957  EmitStmt(S.getBeginStmt());
958  EmitStmt(S.getEndStmt());
959 
960  // Start the loop with a block that tests the condition.
961  // If there's an increment, the continue scope will be overwritten
962  // later.
963  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
964  EmitBlock(CondBlock);
965 
966  const SourceRange &R = S.getSourceRange();
967  LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
970 
971  // If there are any cleanups between here and the loop-exit scope,
972  // create a block to stage a loop exit along.
973  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
974  if (ForScope.requiresCleanups())
975  ExitBlock = createBasicBlock("for.cond.cleanup");
976 
977  // The loop body, consisting of the specified body and the loop variable.
978  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
979 
980  // The body is executed if the expression, contextually converted
981  // to bool, is true.
982  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
983  Builder.CreateCondBr(
984  BoolCondVal, ForBody, ExitBlock,
985  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
986 
987  if (ExitBlock != LoopExit.getBlock()) {
988  EmitBlock(ExitBlock);
989  EmitBranchThroughCleanup(LoopExit);
990  }
991 
992  EmitBlock(ForBody);
994 
995  // Create a block for the increment. In case of a 'continue', we jump there.
996  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
997 
998  // Store the blocks to use for break and continue.
999  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1000 
1001  {
1002  // Create a separate cleanup scope for the loop variable and body.
1003  LexicalScope BodyScope(*this, S.getSourceRange());
1004  EmitStmt(S.getLoopVarStmt());
1005  EmitStmt(S.getBody());
1006  }
1007 
1008  EmitStopPoint(&S);
1009  // If there is an increment, emit it next.
1010  EmitBlock(Continue.getBlock());
1011  EmitStmt(S.getInc());
1012 
1013  BreakContinueStack.pop_back();
1014 
1015  EmitBranch(CondBlock);
1016 
1017  ForScope.ForceCleanup();
1018 
1019  LoopStack.pop();
1020 
1021  // Emit the fall-through block.
1022  EmitBlock(LoopExit.getBlock(), true);
1023 }
1024 
1025 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1026  if (RV.isScalar()) {
1028  } else if (RV.isAggregate()) {
1029  LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1030  LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1031  EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1032  } else {
1034  /*init*/ true);
1035  }
1037 }
1038 
1039 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1040 /// if the function returns void, or may be missing one if the function returns
1041 /// non-void. Fun stuff :).
1043  if (requiresReturnValueCheck()) {
1044  llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1045  auto *SLocPtr =
1046  new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1047  llvm::GlobalVariable::PrivateLinkage, SLoc);
1048  SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1050  assert(ReturnLocation.isValid() && "No valid return location");
1052  ReturnLocation);
1053  }
1054 
1055  // Returning from an outlined SEH helper is UB, and we already warn on it.
1056  if (IsOutlinedSEHHelper) {
1057  Builder.CreateUnreachable();
1058  Builder.ClearInsertionPoint();
1059  }
1060 
1061  // Emit the result value, even if unused, to evaluate the side effects.
1062  const Expr *RV = S.getRetValue();
1063 
1064  // Treat block literals in a return expression as if they appeared
1065  // in their own scope. This permits a small, easily-implemented
1066  // exception to our over-conservative rules about not jumping to
1067  // statements following block literals with non-trivial cleanups.
1068  RunCleanupsScope cleanupScope(*this);
1069  if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
1070  enterFullExpression(fe);
1071  RV = fe->getSubExpr();
1072  }
1073 
1074  // FIXME: Clean this up by using an LValue for ReturnTemp,
1075  // EmitStoreThroughLValue, and EmitAnyExpr.
1076  if (getLangOpts().ElideConstructors &&
1078  // Apply the named return value optimization for this return statement,
1079  // which means doing nothing: the appropriate result has already been
1080  // constructed into the NRVO variable.
1081 
1082  // If there is an NRVO flag for this variable, set it to 1 into indicate
1083  // that the cleanup code should not destroy the variable.
1084  if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1085  Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1086  } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1087  // Make sure not to return anything, but evaluate the expression
1088  // for side effects.
1089  if (RV)
1090  EmitAnyExpr(RV);
1091  } else if (!RV) {
1092  // Do nothing (return value is left uninitialized)
1093  } else if (FnRetTy->isReferenceType()) {
1094  // If this function returns a reference, take the address of the expression
1095  // rather than the value.
1096  RValue Result = EmitReferenceBindingToExpr(RV);
1098  } else {
1099  switch (getEvaluationKind(RV->getType())) {
1100  case TEK_Scalar:
1102  break;
1103  case TEK_Complex:
1105  /*isInit*/ true);
1106  break;
1107  case TEK_Aggregate:
1114  break;
1115  }
1116  }
1117 
1118  ++NumReturnExprs;
1119  if (!RV || RV->isEvaluatable(getContext()))
1120  ++NumSimpleReturnExprs;
1121 
1122  cleanupScope.ForceCleanup();
1124 }
1125 
1127  // As long as debug info is modeled with instructions, we have to ensure we
1128  // have a place to insert here and write the stop point here.
1129  if (HaveInsertPoint())
1130  EmitStopPoint(&S);
1131 
1132  for (const auto *I : S.decls())
1133  EmitDecl(*I);
1134 }
1135 
1137  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1138 
1139  // If this code is reachable then emit a stop point (if generating
1140  // debug info). We have to do this ourselves because we are on the
1141  // "simple" statement path.
1142  if (HaveInsertPoint())
1143  EmitStopPoint(&S);
1144 
1145  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1146 }
1147 
1149  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1150 
1151  // If this code is reachable then emit a stop point (if generating
1152  // debug info). We have to do this ourselves because we are on the
1153  // "simple" statement path.
1154  if (HaveInsertPoint())
1155  EmitStopPoint(&S);
1156 
1157  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1158 }
1159 
1160 /// EmitCaseStmtRange - If case statement range is not too big then
1161 /// add multiple cases to switch instruction, one for each value within
1162 /// the range. If range is too big then emit "if" condition check.
1164  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1165 
1168 
1169  // Emit the code for this case. We do this first to make sure it is
1170  // properly chained from our predecessor before generating the
1171  // switch machinery to enter this block.
1172  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1173  EmitBlockWithFallThrough(CaseDest, &S);
1174  EmitStmt(S.getSubStmt());
1175 
1176  // If range is empty, do nothing.
1177  if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1178  return;
1179 
1180  llvm::APInt Range = RHS - LHS;
1181  // FIXME: parameters such as this should not be hardcoded.
1182  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1183  // Range is small enough to add multiple switch instruction cases.
1184  uint64_t Total = getProfileCount(&S);
1185  unsigned NCases = Range.getZExtValue() + 1;
1186  // We only have one region counter for the entire set of cases here, so we
1187  // need to divide the weights evenly between the generated cases, ensuring
1188  // that the total weight is preserved. E.g., a weight of 5 over three cases
1189  // will be distributed as weights of 2, 2, and 1.
1190  uint64_t Weight = Total / NCases, Rem = Total % NCases;
1191  for (unsigned I = 0; I != NCases; ++I) {
1192  if (SwitchWeights)
1193  SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1194  if (Rem)
1195  Rem--;
1196  SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1197  ++LHS;
1198  }
1199  return;
1200  }
1201 
1202  // The range is too big. Emit "if" condition into a new block,
1203  // making sure to save and restore the current insertion point.
1204  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1205 
1206  // Push this test onto the chain of range checks (which terminates
1207  // in the default basic block). The switch's default will be changed
1208  // to the top of this chain after switch emission is complete.
1209  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1210  CaseRangeBlock = createBasicBlock("sw.caserange");
1211 
1212  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1213  Builder.SetInsertPoint(CaseRangeBlock);
1214 
1215  // Emit range check.
1216  llvm::Value *Diff =
1217  Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1218  llvm::Value *Cond =
1219  Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1220 
1221  llvm::MDNode *Weights = nullptr;
1222  if (SwitchWeights) {
1223  uint64_t ThisCount = getProfileCount(&S);
1224  uint64_t DefaultCount = (*SwitchWeights)[0];
1225  Weights = createProfileWeights(ThisCount, DefaultCount);
1226 
1227  // Since we're chaining the switch default through each large case range, we
1228  // need to update the weight for the default, ie, the first case, to include
1229  // this case.
1230  (*SwitchWeights)[0] += ThisCount;
1231  }
1232  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1233 
1234  // Restore the appropriate insertion point.
1235  if (RestoreBB)
1236  Builder.SetInsertPoint(RestoreBB);
1237  else
1238  Builder.ClearInsertionPoint();
1239 }
1240 
1242  // If there is no enclosing switch instance that we're aware of, then this
1243  // case statement and its block can be elided. This situation only happens
1244  // when we've constant-folded the switch, are emitting the constant case,
1245  // and part of the constant case includes another case statement. For
1246  // instance: switch (4) { case 4: do { case 5: } while (1); }
1247  if (!SwitchInsn) {
1248  EmitStmt(S.getSubStmt());
1249  return;
1250  }
1251 
1252  // Handle case ranges.
1253  if (S.getRHS()) {
1254  EmitCaseStmtRange(S);
1255  return;
1256  }
1257 
1258  llvm::ConstantInt *CaseVal =
1260 
1261  // If the body of the case is just a 'break', try to not emit an empty block.
1262  // If we're profiling or we're not optimizing, leave the block in for better
1263  // debug and coverage analysis.
1265  CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1266  isa<BreakStmt>(S.getSubStmt())) {
1267  JumpDest Block = BreakContinueStack.back().BreakBlock;
1268 
1269  // Only do this optimization if there are no cleanups that need emitting.
1270  if (isObviouslyBranchWithoutCleanups(Block)) {
1271  if (SwitchWeights)
1272  SwitchWeights->push_back(getProfileCount(&S));
1273  SwitchInsn->addCase(CaseVal, Block.getBlock());
1274 
1275  // If there was a fallthrough into this case, make sure to redirect it to
1276  // the end of the switch as well.
1277  if (Builder.GetInsertBlock()) {
1278  Builder.CreateBr(Block.getBlock());
1279  Builder.ClearInsertionPoint();
1280  }
1281  return;
1282  }
1283  }
1284 
1285  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1286  EmitBlockWithFallThrough(CaseDest, &S);
1287  if (SwitchWeights)
1288  SwitchWeights->push_back(getProfileCount(&S));
1289  SwitchInsn->addCase(CaseVal, CaseDest);
1290 
1291  // Recursively emitting the statement is acceptable, but is not wonderful for
1292  // code where we have many case statements nested together, i.e.:
1293  // case 1:
1294  // case 2:
1295  // case 3: etc.
1296  // Handling this recursively will create a new block for each case statement
1297  // that falls through to the next case which is IR intensive. It also causes
1298  // deep recursion which can run into stack depth limitations. Handle
1299  // sequential non-range case statements specially.
1300  const CaseStmt *CurCase = &S;
1301  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1302 
1303  // Otherwise, iteratively add consecutive cases to this switch stmt.
1304  while (NextCase && NextCase->getRHS() == nullptr) {
1305  CurCase = NextCase;
1306  llvm::ConstantInt *CaseVal =
1307  Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1308 
1309  if (SwitchWeights)
1310  SwitchWeights->push_back(getProfileCount(NextCase));
1312  CaseDest = createBasicBlock("sw.bb");
1313  EmitBlockWithFallThrough(CaseDest, &S);
1314  }
1315 
1316  SwitchInsn->addCase(CaseVal, CaseDest);
1317  NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1318  }
1319 
1320  // Normal default recursion for non-cases.
1321  EmitStmt(CurCase->getSubStmt());
1322 }
1323 
1325  // If there is no enclosing switch instance that we're aware of, then this
1326  // default statement can be elided. This situation only happens when we've
1327  // constant-folded the switch.
1328  if (!SwitchInsn) {
1329  EmitStmt(S.getSubStmt());
1330  return;
1331  }
1332 
1333  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1334  assert(DefaultBlock->empty() &&
1335  "EmitDefaultStmt: Default block already defined?");
1336 
1337  EmitBlockWithFallThrough(DefaultBlock, &S);
1338 
1339  EmitStmt(S.getSubStmt());
1340 }
1341 
1342 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1343 /// constant value that is being switched on, see if we can dead code eliminate
1344 /// the body of the switch to a simple series of statements to emit. Basically,
1345 /// on a switch (5) we want to find these statements:
1346 /// case 5:
1347 /// printf(...); <--
1348 /// ++i; <--
1349 /// break;
1350 ///
1351 /// and add them to the ResultStmts vector. If it is unsafe to do this
1352 /// transformation (for example, one of the elided statements contains a label
1353 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1354 /// should include statements after it (e.g. the printf() line is a substmt of
1355 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1356 /// statement, then return CSFC_Success.
1357 ///
1358 /// If Case is non-null, then we are looking for the specified case, checking
1359 /// that nothing we jump over contains labels. If Case is null, then we found
1360 /// the case and are looking for the break.
1361 ///
1362 /// If the recursive walk actually finds our Case, then we set FoundCase to
1363 /// true.
1364 ///
1367  const SwitchCase *Case,
1368  bool &FoundCase,
1369  SmallVectorImpl<const Stmt*> &ResultStmts) {
1370  // If this is a null statement, just succeed.
1371  if (!S)
1372  return Case ? CSFC_Success : CSFC_FallThrough;
1373 
1374  // If this is the switchcase (case 4: or default) that we're looking for, then
1375  // we're in business. Just add the substatement.
1376  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1377  if (S == Case) {
1378  FoundCase = true;
1379  return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1380  ResultStmts);
1381  }
1382 
1383  // Otherwise, this is some other case or default statement, just ignore it.
1384  return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1385  ResultStmts);
1386  }
1387 
1388  // If we are in the live part of the code and we found our break statement,
1389  // return a success!
1390  if (!Case && isa<BreakStmt>(S))
1391  return CSFC_Success;
1392 
1393  // If this is a switch statement, then it might contain the SwitchCase, the
1394  // break, or neither.
1395  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1396  // Handle this as two cases: we might be looking for the SwitchCase (if so
1397  // the skipped statements must be skippable) or we might already have it.
1398  CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1399  bool StartedInLiveCode = FoundCase;
1400  unsigned StartSize = ResultStmts.size();
1401 
1402  // If we've not found the case yet, scan through looking for it.
1403  if (Case) {
1404  // Keep track of whether we see a skipped declaration. The code could be
1405  // using the declaration even if it is skipped, so we can't optimize out
1406  // the decl if the kept statements might refer to it.
1407  bool HadSkippedDecl = false;
1408 
1409  // If we're looking for the case, just see if we can skip each of the
1410  // substatements.
1411  for (; Case && I != E; ++I) {
1412  HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1413 
1414  switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1415  case CSFC_Failure: return CSFC_Failure;
1416  case CSFC_Success:
1417  // A successful result means that either 1) that the statement doesn't
1418  // have the case and is skippable, or 2) does contain the case value
1419  // and also contains the break to exit the switch. In the later case,
1420  // we just verify the rest of the statements are elidable.
1421  if (FoundCase) {
1422  // If we found the case and skipped declarations, we can't do the
1423  // optimization.
1424  if (HadSkippedDecl)
1425  return CSFC_Failure;
1426 
1427  for (++I; I != E; ++I)
1428  if (CodeGenFunction::ContainsLabel(*I, true))
1429  return CSFC_Failure;
1430  return CSFC_Success;
1431  }
1432  break;
1433  case CSFC_FallThrough:
1434  // If we have a fallthrough condition, then we must have found the
1435  // case started to include statements. Consider the rest of the
1436  // statements in the compound statement as candidates for inclusion.
1437  assert(FoundCase && "Didn't find case but returned fallthrough?");
1438  // We recursively found Case, so we're not looking for it anymore.
1439  Case = nullptr;
1440 
1441  // If we found the case and skipped declarations, we can't do the
1442  // optimization.
1443  if (HadSkippedDecl)
1444  return CSFC_Failure;
1445  break;
1446  }
1447  }
1448 
1449  if (!FoundCase)
1450  return CSFC_Success;
1451 
1452  assert(!HadSkippedDecl && "fallthrough after skipping decl");
1453  }
1454 
1455  // If we have statements in our range, then we know that the statements are
1456  // live and need to be added to the set of statements we're tracking.
1457  bool AnyDecls = false;
1458  for (; I != E; ++I) {
1459  AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1460 
1461  switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1462  case CSFC_Failure: return CSFC_Failure;
1463  case CSFC_FallThrough:
1464  // A fallthrough result means that the statement was simple and just
1465  // included in ResultStmt, keep adding them afterwards.
1466  break;
1467  case CSFC_Success:
1468  // A successful result means that we found the break statement and
1469  // stopped statement inclusion. We just ensure that any leftover stmts
1470  // are skippable and return success ourselves.
1471  for (++I; I != E; ++I)
1472  if (CodeGenFunction::ContainsLabel(*I, true))
1473  return CSFC_Failure;
1474  return CSFC_Success;
1475  }
1476  }
1477 
1478  // If we're about to fall out of a scope without hitting a 'break;', we
1479  // can't perform the optimization if there were any decls in that scope
1480  // (we'd lose their end-of-lifetime).
1481  if (AnyDecls) {
1482  // If the entire compound statement was live, there's one more thing we
1483  // can try before giving up: emit the whole thing as a single statement.
1484  // We can do that unless the statement contains a 'break;'.
1485  // FIXME: Such a break must be at the end of a construct within this one.
1486  // We could emit this by just ignoring the BreakStmts entirely.
1487  if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1488  ResultStmts.resize(StartSize);
1489  ResultStmts.push_back(S);
1490  } else {
1491  return CSFC_Failure;
1492  }
1493  }
1494 
1495  return CSFC_FallThrough;
1496  }
1497 
1498  // Okay, this is some other statement that we don't handle explicitly, like a
1499  // for statement or increment etc. If we are skipping over this statement,
1500  // just verify it doesn't have labels, which would make it invalid to elide.
1501  if (Case) {
1502  if (CodeGenFunction::ContainsLabel(S, true))
1503  return CSFC_Failure;
1504  return CSFC_Success;
1505  }
1506 
1507  // Otherwise, we want to include this statement. Everything is cool with that
1508  // so long as it doesn't contain a break out of the switch we're in.
1510 
1511  // Otherwise, everything is great. Include the statement and tell the caller
1512  // that we fall through and include the next statement as well.
1513  ResultStmts.push_back(S);
1514  return CSFC_FallThrough;
1515 }
1516 
1517 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1518 /// then invoke CollectStatementsForCase to find the list of statements to emit
1519 /// for a switch on constant. See the comment above CollectStatementsForCase
1520 /// for more details.
1522  const llvm::APSInt &ConstantCondValue,
1523  SmallVectorImpl<const Stmt*> &ResultStmts,
1524  ASTContext &C,
1525  const SwitchCase *&ResultCase) {
1526  // First step, find the switch case that is being branched to. We can do this
1527  // efficiently by scanning the SwitchCase list.
1528  const SwitchCase *Case = S.getSwitchCaseList();
1529  const DefaultStmt *DefaultCase = nullptr;
1530 
1531  for (; Case; Case = Case->getNextSwitchCase()) {
1532  // It's either a default or case. Just remember the default statement in
1533  // case we're not jumping to any numbered cases.
1534  if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1535  DefaultCase = DS;
1536  continue;
1537  }
1538 
1539  // Check to see if this case is the one we're looking for.
1540  const CaseStmt *CS = cast<CaseStmt>(Case);
1541  // Don't handle case ranges yet.
1542  if (CS->getRHS()) return false;
1543 
1544  // If we found our case, remember it as 'case'.
1545  if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1546  break;
1547  }
1548 
1549  // If we didn't find a matching case, we use a default if it exists, or we
1550  // elide the whole switch body!
1551  if (!Case) {
1552  // It is safe to elide the body of the switch if it doesn't contain labels
1553  // etc. If it is safe, return successfully with an empty ResultStmts list.
1554  if (!DefaultCase)
1555  return !CodeGenFunction::ContainsLabel(&S);
1556  Case = DefaultCase;
1557  }
1558 
1559  // Ok, we know which case is being jumped to, try to collect all the
1560  // statements that follow it. This can fail for a variety of reasons. Also,
1561  // check to see that the recursive walk actually found our case statement.
1562  // Insane cases like this can fail to find it in the recursive walk since we
1563  // don't handle every stmt kind:
1564  // switch (4) {
1565  // while (1) {
1566  // case 4: ...
1567  bool FoundCase = false;
1568  ResultCase = Case;
1569  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1570  ResultStmts) != CSFC_Failure &&
1571  FoundCase;
1572 }
1573 
1575  // Handle nested switch statements.
1576  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1577  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1578  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1579 
1580  // See if we can constant fold the condition of the switch and therefore only
1581  // emit the live case statement (if any) of the switch.
1582  llvm::APSInt ConstantCondValue;
1583  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1584  SmallVector<const Stmt*, 4> CaseStmts;
1585  const SwitchCase *Case = nullptr;
1586  if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1587  getContext(), Case)) {
1588  if (Case)
1590  RunCleanupsScope ExecutedScope(*this);
1591 
1592  if (S.getInit())
1593  EmitStmt(S.getInit());
1594 
1595  // Emit the condition variable if needed inside the entire cleanup scope
1596  // used by this special case for constant folded switches.
1597  if (S.getConditionVariable())
1599 
1600  // At this point, we are no longer "within" a switch instance, so
1601  // we can temporarily enforce this to ensure that any embedded case
1602  // statements are not emitted.
1603  SwitchInsn = nullptr;
1604 
1605  // Okay, we can dead code eliminate everything except this case. Emit the
1606  // specified series of statements and we're good.
1607  for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1608  EmitStmt(CaseStmts[i]);
1610 
1611  // Now we want to restore the saved switch instance so that nested
1612  // switches continue to function properly
1613  SwitchInsn = SavedSwitchInsn;
1614 
1615  return;
1616  }
1617  }
1618 
1619  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1620 
1621  RunCleanupsScope ConditionScope(*this);
1622 
1623  if (S.getInit())
1624  EmitStmt(S.getInit());
1625 
1626  if (S.getConditionVariable())
1628  llvm::Value *CondV = EmitScalarExpr(S.getCond());
1629 
1630  // Create basic block to hold stuff that comes after switch
1631  // statement. We also need to create a default block now so that
1632  // explicit case ranges tests can have a place to jump to on
1633  // failure.
1634  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1635  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1636  if (PGO.haveRegionCounts()) {
1637  // Walk the SwitchCase list to find how many there are.
1638  uint64_t DefaultCount = 0;
1639  unsigned NumCases = 0;
1640  for (const SwitchCase *Case = S.getSwitchCaseList();
1641  Case;
1642  Case = Case->getNextSwitchCase()) {
1643  if (isa<DefaultStmt>(Case))
1644  DefaultCount = getProfileCount(Case);
1645  NumCases += 1;
1646  }
1647  SwitchWeights = new SmallVector<uint64_t, 16>();
1648  SwitchWeights->reserve(NumCases);
1649  // The default needs to be first. We store the edge count, so we already
1650  // know the right weight.
1651  SwitchWeights->push_back(DefaultCount);
1652  }
1653  CaseRangeBlock = DefaultBlock;
1654 
1655  // Clear the insertion point to indicate we are in unreachable code.
1656  Builder.ClearInsertionPoint();
1657 
1658  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1659  // then reuse last ContinueBlock.
1660  JumpDest OuterContinue;
1661  if (!BreakContinueStack.empty())
1662  OuterContinue = BreakContinueStack.back().ContinueBlock;
1663 
1664  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1665 
1666  // Emit switch body.
1667  EmitStmt(S.getBody());
1668 
1669  BreakContinueStack.pop_back();
1670 
1671  // Update the default block in case explicit case range tests have
1672  // been chained on top.
1673  SwitchInsn->setDefaultDest(CaseRangeBlock);
1674 
1675  // If a default was never emitted:
1676  if (!DefaultBlock->getParent()) {
1677  // If we have cleanups, emit the default block so that there's a
1678  // place to jump through the cleanups from.
1679  if (ConditionScope.requiresCleanups()) {
1680  EmitBlock(DefaultBlock);
1681 
1682  // Otherwise, just forward the default block to the switch end.
1683  } else {
1684  DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1685  delete DefaultBlock;
1686  }
1687  }
1688 
1689  ConditionScope.ForceCleanup();
1690 
1691  // Emit continuation.
1692  EmitBlock(SwitchExit.getBlock(), true);
1694 
1695  // If the switch has a condition wrapped by __builtin_unpredictable,
1696  // create metadata that specifies that the switch is unpredictable.
1697  // Don't bother if not optimizing because that metadata would not be used.
1698  auto *Call = dyn_cast<CallExpr>(S.getCond());
1699  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1700  auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1701  if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1702  llvm::MDBuilder MDHelper(getLLVMContext());
1703  SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1704  MDHelper.createUnpredictable());
1705  }
1706  }
1707 
1708  if (SwitchWeights) {
1709  assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1710  "switch weights do not match switch cases");
1711  // If there's only one jump destination there's no sense weighting it.
1712  if (SwitchWeights->size() > 1)
1713  SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1714  createProfileWeights(*SwitchWeights));
1715  delete SwitchWeights;
1716  }
1717  SwitchInsn = SavedSwitchInsn;
1718  SwitchWeights = SavedSwitchWeights;
1719  CaseRangeBlock = SavedCRBlock;
1720 }
1721 
1722 static std::string
1723 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1725  std::string Result;
1726 
1727  while (*Constraint) {
1728  switch (*Constraint) {
1729  default:
1730  Result += Target.convertConstraint(Constraint);
1731  break;
1732  // Ignore these
1733  case '*':
1734  case '?':
1735  case '!':
1736  case '=': // Will see this and the following in mult-alt constraints.
1737  case '+':
1738  break;
1739  case '#': // Ignore the rest of the constraint alternative.
1740  while (Constraint[1] && Constraint[1] != ',')
1741  Constraint++;
1742  break;
1743  case '&':
1744  case '%':
1745  Result += *Constraint;
1746  while (Constraint[1] && Constraint[1] == *Constraint)
1747  Constraint++;
1748  break;
1749  case ',':
1750  Result += "|";
1751  break;
1752  case 'g':
1753  Result += "imr";
1754  break;
1755  case '[': {
1756  assert(OutCons &&
1757  "Must pass output names to constraints with a symbolic name");
1758  unsigned Index;
1759  bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1760  assert(result && "Could not resolve symbolic name"); (void)result;
1761  Result += llvm::utostr(Index);
1762  break;
1763  }
1764  }
1765 
1766  Constraint++;
1767  }
1768 
1769  return Result;
1770 }
1771 
1772 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1773 /// as using a particular register add that as a constraint that will be used
1774 /// in this asm stmt.
1775 static std::string
1776 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1778  const AsmStmt &Stmt, const bool EarlyClobber) {
1779  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1780  if (!AsmDeclRef)
1781  return Constraint;
1782  const ValueDecl &Value = *AsmDeclRef->getDecl();
1783  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1784  if (!Variable)
1785  return Constraint;
1786  if (Variable->getStorageClass() != SC_Register)
1787  return Constraint;
1788  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1789  if (!Attr)
1790  return Constraint;
1791  StringRef Register = Attr->getLabel();
1792  assert(Target.isValidGCCRegisterName(Register));
1793  // We're using validateOutputConstraint here because we only care if
1794  // this is a register constraint.
1795  TargetInfo::ConstraintInfo Info(Constraint, "");
1796  if (Target.validateOutputConstraint(Info) &&
1797  !Info.allowsRegister()) {
1798  CGM.ErrorUnsupported(&Stmt, "__asm__");
1799  return Constraint;
1800  }
1801  // Canonicalize the register here before returning it.
1802  Register = Target.getNormalizedGCCRegisterName(Register);
1803  return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
1804 }
1805 
1806 llvm::Value*
1807 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1808  LValue InputValue, QualType InputType,
1809  std::string &ConstraintStr,
1810  SourceLocation Loc) {
1811  llvm::Value *Arg;
1812  if (Info.allowsRegister() || !Info.allowsMemory()) {
1814  Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1815  } else {
1816  llvm::Type *Ty = ConvertType(InputType);
1817  uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1818  if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1819  Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1820  Ty = llvm::PointerType::getUnqual(Ty);
1821 
1822  Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
1823  Ty));
1824  } else {
1825  Arg = InputValue.getPointer();
1826  ConstraintStr += '*';
1827  }
1828  }
1829  } else {
1830  Arg = InputValue.getPointer();
1831  ConstraintStr += '*';
1832  }
1833 
1834  return Arg;
1835 }
1836 
1837 llvm::Value* CodeGenFunction::EmitAsmInput(
1838  const TargetInfo::ConstraintInfo &Info,
1839  const Expr *InputExpr,
1840  std::string &ConstraintStr) {
1841  // If this can't be a register or memory, i.e., has to be a constant
1842  // (immediate or symbolic), try to emit it as such.
1843  if (!Info.allowsRegister() && !Info.allowsMemory()) {
1844  if (Info.requiresImmediateConstant()) {
1845  Expr::EvalResult EVResult;
1846  InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
1847 
1848  llvm::APSInt IntResult;
1849  if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
1850  getContext()))
1851  return llvm::ConstantInt::get(getLLVMContext(), IntResult);
1852  }
1853 
1854  Expr::EvalResult Result;
1855  if (InputExpr->EvaluateAsInt(Result, getContext()))
1856  return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
1857  }
1858 
1859  if (Info.allowsRegister() || !Info.allowsMemory())
1861  return EmitScalarExpr(InputExpr);
1862  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
1863  return EmitScalarExpr(InputExpr);
1864  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1865  LValue Dest = EmitLValue(InputExpr);
1866  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1867  InputExpr->getExprLoc());
1868 }
1869 
1870 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1871 /// asm call instruction. The !srcloc MDNode contains a list of constant
1872 /// integers which are the source locations of the start of each line in the
1873 /// asm.
1874 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1875  CodeGenFunction &CGF) {
1877  // Add the location of the first line to the MDNode.
1878  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1879  CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
1880  StringRef StrVal = Str->getString();
1881  if (!StrVal.empty()) {
1882  const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1883  const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1884  unsigned StartToken = 0;
1885  unsigned ByteOffset = 0;
1886 
1887  // Add the location of the start of each subsequent line of the asm to the
1888  // MDNode.
1889  for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
1890  if (StrVal[i] != '\n') continue;
1891  SourceLocation LineLoc = Str->getLocationOfByte(
1892  i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
1893  Locs.push_back(llvm::ConstantAsMetadata::get(
1894  llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1895  }
1896  }
1897 
1898  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1899 }
1900 
1901 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
1902  bool ReadOnly, bool ReadNone, const AsmStmt &S,
1903  const std::vector<llvm::Type *> &ResultRegTypes,
1904  CodeGenFunction &CGF,
1905  std::vector<llvm::Value *> &RegResults) {
1906  Result.addAttribute(llvm::AttributeList::FunctionIndex,
1907  llvm::Attribute::NoUnwind);
1908  // Attach readnone and readonly attributes.
1909  if (!HasSideEffect) {
1910  if (ReadNone)
1911  Result.addAttribute(llvm::AttributeList::FunctionIndex,
1912  llvm::Attribute::ReadNone);
1913  else if (ReadOnly)
1914  Result.addAttribute(llvm::AttributeList::FunctionIndex,
1915  llvm::Attribute::ReadOnly);
1916  }
1917 
1918  // Slap the source location of the inline asm into a !srcloc metadata on the
1919  // call.
1920  if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
1921  Result.setMetadata("srcloc",
1922  getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
1923  else {
1924  // At least put the line number on MS inline asm blobs.
1925  llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
1926  S.getAsmLoc().getRawEncoding());
1927  Result.setMetadata("srcloc",
1928  llvm::MDNode::get(CGF.getLLVMContext(),
1929  llvm::ConstantAsMetadata::get(Loc)));
1930  }
1931 
1933  // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
1934  // convergent (meaning, they may call an intrinsically convergent op, such
1935  // as bar.sync, and so can't have certain optimizations applied around
1936  // them).
1937  Result.addAttribute(llvm::AttributeList::FunctionIndex,
1938  llvm::Attribute::Convergent);
1939  // Extract all of the register value results from the asm.
1940  if (ResultRegTypes.size() == 1) {
1941  RegResults.push_back(&Result);
1942  } else {
1943  for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
1944  llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
1945  RegResults.push_back(Tmp);
1946  }
1947  }
1948 }
1949 
1951  // Assemble the final asm string.
1952  std::string AsmString = S.generateAsmString(getContext());
1953 
1954  // Get all the output and input constraints together.
1955  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
1956  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
1957 
1958  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1959  StringRef Name;
1960  if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1961  Name = GAS->getOutputName(i);
1963  bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
1964  assert(IsValid && "Failed to parse output constraint");
1965  OutputConstraintInfos.push_back(Info);
1966  }
1967 
1968  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1969  StringRef Name;
1970  if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1971  Name = GAS->getInputName(i);
1973  bool IsValid =
1974  getTarget().validateInputConstraint(OutputConstraintInfos, Info);
1975  assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
1976  InputConstraintInfos.push_back(Info);
1977  }
1978 
1979  std::string Constraints;
1980 
1981  std::vector<LValue> ResultRegDests;
1982  std::vector<QualType> ResultRegQualTys;
1983  std::vector<llvm::Type *> ResultRegTypes;
1984  std::vector<llvm::Type *> ResultTruncRegTypes;
1985  std::vector<llvm::Type *> ArgTypes;
1986  std::vector<llvm::Value*> Args;
1987  llvm::BitVector ResultTypeRequiresCast;
1988 
1989  // Keep track of inout constraints.
1990  std::string InOutConstraints;
1991  std::vector<llvm::Value*> InOutArgs;
1992  std::vector<llvm::Type*> InOutArgTypes;
1993 
1994  // Keep track of out constraints for tied input operand.
1995  std::vector<std::string> OutputConstraints;
1996 
1997  // An inline asm can be marked readonly if it meets the following conditions:
1998  // - it doesn't have any sideeffects
1999  // - it doesn't clobber memory
2000  // - it doesn't return a value by-reference
2001  // It can be marked readnone if it doesn't have any input memory constraints
2002  // in addition to meeting the conditions listed above.
2003  bool ReadOnly = true, ReadNone = true;
2004 
2005  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2006  TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2007 
2008  // Simplify the output constraint.
2009  std::string OutputConstraint(S.getOutputConstraint(i));
2010  OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2011  getTarget(), &OutputConstraintInfos);
2012 
2013  const Expr *OutExpr = S.getOutputExpr(i);
2014  OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2015 
2016  OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2017  getTarget(), CGM, S,
2018  Info.earlyClobber());
2019  OutputConstraints.push_back(OutputConstraint);
2020  LValue Dest = EmitLValue(OutExpr);
2021  if (!Constraints.empty())
2022  Constraints += ',';
2023 
2024  // If this is a register output, then make the inline asm return it
2025  // by-value. If this is a memory result, return the value by-reference.
2026  bool isScalarizableAggregate =
2027  hasAggregateEvaluationKind(OutExpr->getType());
2028  if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) ||
2029  isScalarizableAggregate)) {
2030  Constraints += "=" + OutputConstraint;
2031  ResultRegQualTys.push_back(OutExpr->getType());
2032  ResultRegDests.push_back(Dest);
2033  ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
2034  if (Info.allowsRegister() && isScalarizableAggregate) {
2035  ResultTypeRequiresCast.push_back(true);
2036  unsigned Size = getContext().getTypeSize(OutExpr->getType());
2037  llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
2038  ResultRegTypes.push_back(ConvTy);
2039  } else {
2040  ResultTypeRequiresCast.push_back(false);
2041  ResultRegTypes.push_back(ResultTruncRegTypes.back());
2042  }
2043  // If this output is tied to an input, and if the input is larger, then
2044  // we need to set the actual result type of the inline asm node to be the
2045  // same as the input type.
2046  if (Info.hasMatchingInput()) {
2047  unsigned InputNo;
2048  for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2049  TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2050  if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2051  break;
2052  }
2053  assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2054 
2055  QualType InputTy = S.getInputExpr(InputNo)->getType();
2056  QualType OutputType = OutExpr->getType();
2057 
2058  uint64_t InputSize = getContext().getTypeSize(InputTy);
2059  if (getContext().getTypeSize(OutputType) < InputSize) {
2060  // Form the asm to return the value as a larger integer or fp type.
2061  ResultRegTypes.back() = ConvertType(InputTy);
2062  }
2063  }
2064  if (llvm::Type* AdjTy =
2065  getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2066  ResultRegTypes.back()))
2067  ResultRegTypes.back() = AdjTy;
2068  else {
2069  CGM.getDiags().Report(S.getAsmLoc(),
2070  diag::err_asm_invalid_type_in_input)
2071  << OutExpr->getType() << OutputConstraint;
2072  }
2073 
2074  // Update largest vector width for any vector types.
2075  if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2076  LargestVectorWidth = std::max(LargestVectorWidth,
2077  VT->getPrimitiveSizeInBits());
2078  } else {
2079  ArgTypes.push_back(Dest.getAddress().getType());
2080  Args.push_back(Dest.getPointer());
2081  Constraints += "=*";
2082  Constraints += OutputConstraint;
2083  ReadOnly = ReadNone = false;
2084  }
2085 
2086  if (Info.isReadWrite()) {
2087  InOutConstraints += ',';
2088 
2089  const Expr *InputExpr = S.getOutputExpr(i);
2090  llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2091  InOutConstraints,
2092  InputExpr->getExprLoc());
2093 
2094  if (llvm::Type* AdjTy =
2095  getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2096  Arg->getType()))
2097  Arg = Builder.CreateBitCast(Arg, AdjTy);
2098 
2099  // Update largest vector width for any vector types.
2100  if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2101  LargestVectorWidth = std::max(LargestVectorWidth,
2102  VT->getPrimitiveSizeInBits());
2103  if (Info.allowsRegister())
2104  InOutConstraints += llvm::utostr(i);
2105  else
2106  InOutConstraints += OutputConstraint;
2107 
2108  InOutArgTypes.push_back(Arg->getType());
2109  InOutArgs.push_back(Arg);
2110  }
2111  }
2112 
2113  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2114  // to the return value slot. Only do this when returning in registers.
2115  if (isa<MSAsmStmt>(&S)) {
2116  const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2117  if (RetAI.isDirect() || RetAI.isExtend()) {
2118  // Make a fake lvalue for the return value slot.
2119  LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2121  *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2122  ResultRegDests, AsmString, S.getNumOutputs());
2123  SawAsmBlock = true;
2124  }
2125  }
2126 
2127  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2128  const Expr *InputExpr = S.getInputExpr(i);
2129 
2130  TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2131 
2132  if (Info.allowsMemory())
2133  ReadNone = false;
2134 
2135  if (!Constraints.empty())
2136  Constraints += ',';
2137 
2138  // Simplify the input constraint.
2139  std::string InputConstraint(S.getInputConstraint(i));
2140  InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2141  &OutputConstraintInfos);
2142 
2143  InputConstraint = AddVariableConstraints(
2144  InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2145  getTarget(), CGM, S, false /* No EarlyClobber */);
2146 
2147  std::string ReplaceConstraint (InputConstraint);
2148  llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2149 
2150  // If this input argument is tied to a larger output result, extend the
2151  // input to be the same size as the output. The LLVM backend wants to see
2152  // the input and output of a matching constraint be the same size. Note
2153  // that GCC does not define what the top bits are here. We use zext because
2154  // that is usually cheaper, but LLVM IR should really get an anyext someday.
2155  if (Info.hasTiedOperand()) {
2156  unsigned Output = Info.getTiedOperand();
2157  QualType OutputType = S.getOutputExpr(Output)->getType();
2158  QualType InputTy = InputExpr->getType();
2159 
2160  if (getContext().getTypeSize(OutputType) >
2161  getContext().getTypeSize(InputTy)) {
2162  // Use ptrtoint as appropriate so that we can do our extension.
2163  if (isa<llvm::PointerType>(Arg->getType()))
2164  Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2165  llvm::Type *OutputTy = ConvertType(OutputType);
2166  if (isa<llvm::IntegerType>(OutputTy))
2167  Arg = Builder.CreateZExt(Arg, OutputTy);
2168  else if (isa<llvm::PointerType>(OutputTy))
2169  Arg = Builder.CreateZExt(Arg, IntPtrTy);
2170  else {
2171  assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2172  Arg = Builder.CreateFPExt(Arg, OutputTy);
2173  }
2174  }
2175  // Deal with the tied operands' constraint code in adjustInlineAsmType.
2176  ReplaceConstraint = OutputConstraints[Output];
2177  }
2178  if (llvm::Type* AdjTy =
2179  getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2180  Arg->getType()))
2181  Arg = Builder.CreateBitCast(Arg, AdjTy);
2182  else
2183  CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2184  << InputExpr->getType() << InputConstraint;
2185 
2186  // Update largest vector width for any vector types.
2187  if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2188  LargestVectorWidth = std::max(LargestVectorWidth,
2189  VT->getPrimitiveSizeInBits());
2190 
2191  ArgTypes.push_back(Arg->getType());
2192  Args.push_back(Arg);
2193  Constraints += InputConstraint;
2194  }
2195 
2196  // Append the "input" part of inout constraints last.
2197  for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2198  ArgTypes.push_back(InOutArgTypes[i]);
2199  Args.push_back(InOutArgs[i]);
2200  }
2201  Constraints += InOutConstraints;
2202 
2203  // Labels
2205  llvm::BasicBlock *Fallthrough = nullptr;
2206  bool IsGCCAsmGoto = false;
2207  if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2208  IsGCCAsmGoto = GS->isAsmGoto();
2209  if (IsGCCAsmGoto) {
2210  for (auto *E : GS->labels()) {
2211  JumpDest Dest = getJumpDestForLabel(E->getLabel());
2212  Transfer.push_back(Dest.getBlock());
2213  llvm::BlockAddress *BA =
2214  llvm::BlockAddress::get(CurFn, Dest.getBlock());
2215  Args.push_back(BA);
2216  ArgTypes.push_back(BA->getType());
2217  if (!Constraints.empty())
2218  Constraints += ',';
2219  Constraints += 'X';
2220  }
2221  StringRef Name = "asm.fallthrough";
2222  Fallthrough = createBasicBlock(Name);
2223  }
2224  }
2225 
2226  // Clobbers
2227  for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2228  StringRef Clobber = S.getClobber(i);
2229 
2230  if (Clobber == "memory")
2231  ReadOnly = ReadNone = false;
2232  else if (Clobber != "cc")
2233  Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2234 
2235  if (!Constraints.empty())
2236  Constraints += ',';
2237 
2238  Constraints += "~{";
2239  Constraints += Clobber;
2240  Constraints += '}';
2241  }
2242 
2243  // Add machine specific clobbers
2244  std::string MachineClobbers = getTarget().getClobbers();
2245  if (!MachineClobbers.empty()) {
2246  if (!Constraints.empty())
2247  Constraints += ',';
2248  Constraints += MachineClobbers;
2249  }
2250 
2251  llvm::Type *ResultType;
2252  if (ResultRegTypes.empty())
2253  ResultType = VoidTy;
2254  else if (ResultRegTypes.size() == 1)
2255  ResultType = ResultRegTypes[0];
2256  else
2257  ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2258 
2259  llvm::FunctionType *FTy =
2260  llvm::FunctionType::get(ResultType, ArgTypes, false);
2261 
2262  bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2263  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2264  llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2265  llvm::InlineAsm *IA =
2266  llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2267  /* IsAlignStack */ false, AsmDialect);
2268  std::vector<llvm::Value*> RegResults;
2269  if (IsGCCAsmGoto) {
2270  llvm::CallBrInst *Result =
2271  Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2272  UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2273  ReadNone, S, ResultRegTypes, *this, RegResults);
2274  EmitBlock(Fallthrough);
2275  } else {
2276  llvm::CallInst *Result =
2277  Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2278  UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2279  ReadNone, S, ResultRegTypes, *this, RegResults);
2280  }
2281 
2282  assert(RegResults.size() == ResultRegTypes.size());
2283  assert(RegResults.size() == ResultTruncRegTypes.size());
2284  assert(RegResults.size() == ResultRegDests.size());
2285  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2286  // in which case its size may grow.
2287  assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2288  for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2289  llvm::Value *Tmp = RegResults[i];
2290 
2291  // If the result type of the LLVM IR asm doesn't match the result type of
2292  // the expression, do the conversion.
2293  if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2294  llvm::Type *TruncTy = ResultTruncRegTypes[i];
2295 
2296  // Truncate the integer result to the right size, note that TruncTy can be
2297  // a pointer.
2298  if (TruncTy->isFloatingPointTy())
2299  Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2300  else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2301  uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2302  Tmp = Builder.CreateTrunc(Tmp,
2303  llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2304  Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2305  } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2306  uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2307  Tmp = Builder.CreatePtrToInt(Tmp,
2308  llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2309  Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2310  } else if (TruncTy->isIntegerTy()) {
2311  Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2312  } else if (TruncTy->isVectorTy()) {
2313  Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2314  }
2315  }
2316 
2317  LValue Dest = ResultRegDests[i];
2318  // ResultTypeRequiresCast elements correspond to the first
2319  // ResultTypeRequiresCast.size() elements of RegResults.
2320  if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2321  unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2323  ResultRegTypes[i]->getPointerTo());
2324  QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2325  if (Ty.isNull()) {
2326  const Expr *OutExpr = S.getOutputExpr(i);
2327  CGM.Error(
2328  OutExpr->getExprLoc(),
2329  "impossible constraint in asm: can't store value into a register");
2330  return;
2331  }
2332  Dest = MakeAddrLValue(A, Ty);
2333  }
2334  EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2335  }
2336 }
2337 
2339  const RecordDecl *RD = S.getCapturedRecordDecl();
2340  QualType RecordTy = getContext().getRecordType(RD);
2341 
2342  // Initialize the captured struct.
2343  LValue SlotLV =
2344  MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2345 
2346  RecordDecl::field_iterator CurField = RD->field_begin();
2348  E = S.capture_init_end();
2349  I != E; ++I, ++CurField) {
2350  LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2351  if (CurField->hasCapturedVLAType()) {
2352  auto VAT = CurField->getCapturedVLAType();
2353  EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2354  } else {
2355  EmitInitializerForField(*CurField, LV, *I);
2356  }
2357  }
2358 
2359  return SlotLV;
2360 }
2361 
2362 /// Generate an outlined function for the body of a CapturedStmt, store any
2363 /// captured variables into the captured struct, and call the outlined function.
2364 llvm::Function *
2366  LValue CapStruct = InitCapturedStruct(S);
2367 
2368  // Emit the CapturedDecl
2369  CodeGenFunction CGF(CGM, true);
2370  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2371  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2372  delete CGF.CapturedStmtInfo;
2373 
2374  // Emit call to the helper function.
2375  EmitCallOrInvoke(F, CapStruct.getPointer());
2376 
2377  return F;
2378 }
2379 
2381  LValue CapStruct = InitCapturedStruct(S);
2382  return CapStruct.getAddress();
2383 }
2384 
2385 /// Creates the outlined function for a CapturedStmt.
2386 llvm::Function *
2388  assert(CapturedStmtInfo &&
2389  "CapturedStmtInfo should be set when generating the captured function");
2390  const CapturedDecl *CD = S.getCapturedDecl();
2391  const RecordDecl *RD = S.getCapturedRecordDecl();
2392  SourceLocation Loc = S.getBeginLoc();
2393  assert(CD->hasBody() && "missing CapturedDecl body");
2394 
2395  // Build the argument list.
2396  ASTContext &Ctx = CGM.getContext();
2397  FunctionArgList Args;
2398  Args.append(CD->param_begin(), CD->param_end());
2399 
2400  // Create the function declaration.
2401  const CGFunctionInfo &FuncInfo =
2402  CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2403  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2404 
2405  llvm::Function *F =
2408  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2409  if (CD->isNothrow())
2410  F->addFnAttr(llvm::Attribute::NoUnwind);
2411 
2412  // Generate the function.
2413  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2414  CD->getBody()->getBeginLoc());
2415  // Set the context parameter in CapturedStmtInfo.
2416  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2418 
2419  // Initialize variable-length arrays.
2421  Ctx.getTagDeclType(RD));
2422  for (auto *FD : RD->fields()) {
2423  if (FD->hasCapturedVLAType()) {
2424  auto *ExprArg =
2426  .getScalarVal();
2427  auto VAT = FD->getCapturedVLAType();
2428  VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2429  }
2430  }
2431 
2432  // If 'this' is captured, load it into CXXThisValue.
2435  LValue ThisLValue = EmitLValueForField(Base, FD);
2436  CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2437  }
2438 
2439  PGO.assignRegionCounters(GlobalDecl(CD), F);
2440  CapturedStmtInfo->EmitBody(*this, CD->getBody());
2442 
2443  return F;
2444 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:652
bool isAggregate() const
Definition: CGValue.h:53
const llvm::DataLayout & getDataLayout() const
Expr * getInc()
Definition: Stmt.h:2417
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition: CGStmt.cpp:600
void EmitCoroutineBody(const CoroutineBodyStmt &S)
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:2852
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
unsigned getNumInputs() const
Definition: Stmt.h:2764
SourceLocation getBeginLoc() const
Definition: Stmt.h:2669
unsigned getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it...
A (possibly-)qualified type.
Definition: Type.h:643
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition: Stmt.h:3524
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
const CodeGenOptions & getCodeGenOpts() const
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt *> &ResultStmts)
Definition: CGStmt.cpp:1366
void EmitGotoStmt(const GotoStmt &S)
Definition: CGStmt.cpp:589
void EmitAttributedStmt(const AttributedStmt &S)
Definition: CGStmt.cpp:585
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4196
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
void enterFullExpression(const FullExpr *E)
Expr * getCond()
Definition: Stmt.h:2249
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
Stmt - This represents one statement.
Definition: Stmt.h:66
IfStmt - This represents an if/then/else.
Definition: Stmt.h:1812
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1210
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:383
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1019
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
unsigned getNumOutputs() const
Definition: Stmt.h:2742
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
bool isNothrow() const
Definition: Decl.cpp:4641
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
Represents an attribute applied to a statement.
Definition: Stmt.h:1754
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1925
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition: CGStmt.cpp:2380
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:635
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1290
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition: Stmt.h:3468
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference...
Definition: CGExpr.cpp:4127
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant, or if it does but contains a label, return false.
Represents a point when we exit a loop.
Definition: ProgramPoint.h:713
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:344
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
Stmt * getSubStmt()
Definition: Stmt.h:1645
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
Represents a variable declaration or definition.
Definition: Decl.h:812
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization. ...
Definition: Stmt.h:2652
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:54
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope, by being a (possibly-labelled) DeclStmt.
DiagnosticsEngine & getDiags() const
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition: CGStmt.cpp:520
Stmt * getThen()
Definition: Stmt.h:1899
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block...
Definition: CGStmt.cpp:432
The collection of all-type qualifiers we support.
Definition: Type.h:137
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Definition: CGCleanup.cpp:1019
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:1710
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition: CGStmt.cpp:509
Represents a struct/union/class.
Definition: Decl.h:3634
const TargetInfo & getTarget() const
void EmitOMPSimdDirective(const OMPSimdDirective &S)
Stmt * getBody()
Definition: Stmt.h:2353
void setScopeDepth(EHScopeStack::stable_iterator depth)
Address getAddress() const
Definition: CGValue.h:326
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:160
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:497
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition: CGExpr.cpp:593
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:920
field_range fields() const
Definition: Decl.h:3849
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:274
Represents a member of a struct/union/class.
Definition: Decl.h:2615
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4181
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:558
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt *> &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:1521
Stmt * getStmtExprResult()
Definition: Stmt.h:1402
bool isReferenceType() const
Definition: Type.h:6402
Stmt *const * const_body_iterator
Definition: Stmt.h:1352
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:513
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr *> Attrs=None)
Definition: CGStmt.cpp:848
__DEVICE__ int max(int __a, int __b)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:49
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition: CGClass.cpp:665
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:194
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2494
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:4638
static bool hasScalarEvaluationKind(QualType T)
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:867
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3511
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
ForStmt - This represents a &#39;for (init;cond;inc)&#39; stmt.
Definition: Stmt.h:2384
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:314
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:582
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:758
LabelDecl * getDecl() const
Definition: Stmt.h:1727
SourceLocation getLBracLoc() const
Definition: Stmt.h:1417
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
RAII for correct setting/restoring of CapturedStmtInfo.
const Expr * getOutputExpr(unsigned i) const
Definition: Stmt.cpp:388
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4198
Stmt * getBody()
Definition: Stmt.h:2418
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitContinueStmt(const ContinueStmt &S)
Definition: CGStmt.cpp:1148
void EmitOMPTargetDirective(const OMPTargetDirective &S)
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:349
Stmt * getInit()
Definition: Stmt.h:2397
CXXForRangeStmt - This represents C++0x [stmt.ranged]&#39;s ranged for statement, represented as &#39;for (ra...
Definition: StmtCXX.h:134
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition: CGStmt.cpp:1574
If a crash happens while one of these objects are live, the message is printed out along with the spe...
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool ReadOnly, bool ReadNone, const AsmStmt &S, const std::vector< llvm::Type *> &ResultRegTypes, CodeGenFunction &CGF, std::vector< llvm::Value *> &RegResults)
Definition: CGStmt.cpp:1901
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:40
field_iterator field_begin() const
Definition: Decl.cpp:4317
CaseStmt - Represent a case statement.
Definition: Stmt.h:1478
Expr * getCond()
Definition: Stmt.h:2416
void EmitOMPParallelDirective(const OMPParallelDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:182
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:759
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition: CGStmt.cpp:2365
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
void EmitDefaultStmt(const DefaultStmt &S)
Definition: CGStmt.cpp:1324
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCaseStmtRange(const CaseStmt &S)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition: CGStmt.cpp:1163
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4116
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition: Stmt.cpp:924
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:70
Stmt * getBody()
Definition: Stmt.h:2089
virtual bool isValidGCCRegisterName(StringRef Name) const
Returns whether the passed in string is a valid register name according to GCC.
Definition: TargetInfo.cpp:452
Stmt * getInit()
Definition: Stmt.h:1955
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
Definition: CGExpr.cpp:3983
bool isValid() const
Definition: Address.h:35
StringRef getString() const
Definition: Expr.h:1764
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1310
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
const TargetCodeGenInfo & getTargetCodeGenInfo()
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition: CGExpr.cpp:223
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
bool isConstexpr() const
Definition: Stmt.h:1985
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition: Stmt.cpp:372
Exposes information about the current target.
Definition: TargetInfo.h:161
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
EHScopeStack::stable_iterator getScopeDepth() const
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:636
This represents one expression.
Definition: Expr.h:108
DeclStmt * getEndStmt()
Definition: StmtCXX.h:165
Emit only debug info necessary for generating line number tables (-gline-tables-only).
static Address invalid()
Definition: Address.h:34
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
void EmitCaseStmt(const CaseStmt &S)
Definition: CGStmt.cpp:1241
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:65
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
#define V(N, I)
Definition: ASTContext.h:2913
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition: CGStmt.cpp:368
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition: Stmt.cpp:1065
Stmt * getBody()
Definition: Stmt.h:2261
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:43
void EmitSEHTryStmt(const SEHTryStmt &S)
Expr * getRHS()
Definition: Stmt.h:1579
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction...
Definition: CGStmt.cpp:1874
llvm::LLVMContext & getLLVMContext()
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:3681
llvm::BasicBlock * GetIndirectGotoBlock()
QualType getType() const
Definition: Expr.h:137
void EmitOMPMasterDirective(const OMPMasterDirective &S)
LabelDecl * getLabel() const
Definition: Stmt.h:2468
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition: CGStmt.cpp:2387
ReturnStmt - This represents a return, optionally of an expression: return; return 4;...
Definition: Stmt.h:2610
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
QualType getRecordType(const RecordDecl *Decl) const
SwitchCase * getSwitchCaseList()
Definition: Stmt.h:2146
void ResolveBranchFixups(llvm::BasicBlock *Target)
Definition: CGCleanup.cpp:378
SourceLocation getEnd() const
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
StringRef getClobber(unsigned i) const
Definition: Stmt.cpp:412
Expr * getCond()
Definition: Stmt.h:1887
ValueDecl * getDecl()
Definition: Expr.h:1217
const LangOptions & getLangOpts() const
ASTContext & getContext() const
bool isNull() const
Return true if this QualType doesn&#39;t point to a type yet.
Definition: Type.h:708
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:135
const SourceManager & SM
Definition: Format.cpp:1609
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:40
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
DoStmt - This represents a &#39;do/while&#39; stmt.
Definition: Stmt.h:2328
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:2693
void EmitDeclStmt(const DeclStmt &S)
Definition: CGStmt.cpp:1126
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:992
The l-value was considered opaque, so the alignment was determined from a type.
void EmitOMPFlushDirective(const OMPFlushDirective &S)
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
StringRef getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition: Stmt.cpp:396
This captures a statement into a function.
Definition: Stmt.h:3350
ActionResult - This structure is used while parsing/acting on expressions, stmts, etc...
Definition: Ownership.h:153
StringRef getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition: Stmt.cpp:380
Encodes a location in the source.
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr *> Attrs=None)
Definition: CGStmt.cpp:786
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
body_range body()
Definition: Stmt.h:1343
Expr * getRetValue()
Definition: Stmt.h:2643
void EmitOMPForDirective(const OMPForDirective &S)
llvm::APSInt APSInt
A saved depth on the scope stack.
Definition: EHScopeStack.h:106
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:164
Expr * getLHS()
Definition: Stmt.h:1567
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition: CGObjC.cpp:1622
Stmt * getElse()
Definition: Stmt.h:1908
DeclStmt - Adaptor class for mixing declarations with statements and expressions. ...
Definition: Stmt.h:1203
Represents the declaration of a label.
Definition: Decl.h:468
An aggregate value slot.
Definition: CGValue.h:436
Expr * getCond()
Definition: Stmt.h:2077
void EmitStmt(const Stmt *S, ArrayRef< const Attr *> Attrs=None)
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:44
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:538
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1856
void EmitOMPSingleDirective(const OMPSingleDirective &S)
An aligned address.
Definition: Address.h:24
llvm::APInt APInt
Definition: Integral.h:27
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition: Stmt.cpp:987
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can&#39;t be done.
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
const CGFunctionInfo * CurFnInfo
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
Definition: CGDecl.cpp:42
const TargetCodeGenInfo & getTargetHooks() const
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:215
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr *> Attrs=None)
Definition: CGStmt.cpp:698
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:358
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
LabelStmt * getStmt() const
Definition: Decl.h:492
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3749
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition: CGObjC.cpp:1919
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn&#39;t support the specified stmt yet.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
Dataflow Directional Tag Classes.
bool isVolatile() const
Definition: Stmt.h:2729
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition: CGStmt.cpp:2338
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition: Stmt.cpp:1043
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:580
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
ArrayRef< const Attr * > getAttrs() const
Definition: Stmt.h:1790
CSFC_Result
CollectStatementsForCase - Given the body of a &#39;switch&#39; statement and a constant value that is being ...
Definition: CGStmt.cpp:1365
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
StmtClass getStmtClass() const
Definition: Stmt.h:1087
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand...
Definition: TargetInfo.h:874
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:107
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.h:3542
llvm::Module & getModule() const
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:687
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition: CGObjC.cpp:1915
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
Definition: DeclBase.h:2048
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type. ...
Definition: CGExprAgg.cpp:1848
void EmitCoreturnStmt(const CoreturnStmt &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
virtual StringRef getHelperName() const
Get the name of the capture helper.
static bool hasAggregateEvaluationKind(QualType T)
SwitchStmt - This represents a &#39;switch&#39; stmt.
Definition: Stmt.h:2017
API for captured statement code generation.
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:1723
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
bool resolveSymbolicName(const char *&Name, ArrayRef< ConstraintInfo > OutputConstraints, unsigned &Index) const
Definition: TargetInfo.cpp:612
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
T * getAttr() const
Definition: DeclBase.h:538
void EmitAsmStmt(const AsmStmt &S)
Definition: CGStmt.cpp:1950
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:1776
Stmt * getInit()
Definition: Stmt.h:2098
decl_range decls()
Definition: Stmt.h:1251
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
const Expr * getInputExpr(unsigned i) const
Definition: Stmt.cpp:404
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
Definition: Linkage.h:31
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:454
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
unsigned getNumClobbers() const
Definition: Stmt.h:2774
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2085
SourceManager & getSourceManager()
Definition: ASTContext.h:675
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
DeclStmt * getRangeStmt()
Definition: StmtCXX.h:161
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition: CGStmt.cpp:34
SourceLocation getAsmLoc() const
Definition: Stmt.h:2723
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2455
Expr * getTarget()
Definition: Stmt.h:2514
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1453
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition: Stmt.cpp:1291
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count...
Definition: CodeGenPGO.h:73
Expr * getCond()
Definition: Stmt.h:2346
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer...
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition: Stmt.cpp:886
ActionResult< Expr * > ExprResult
Definition: Ownership.h:263
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition: CGStmt.cpp:621
ContinueStmt - This represents a continue.
Definition: Stmt.h:2543
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
Definition: CGStmt.cpp:474
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO)...
Definition: Decl.h:1337
bool isVoidType() const
Definition: Type.h:6649
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition: CGStmt.cpp:1042
llvm::Type * ConvertType(QualType T)
virtual llvm::Type * adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type *Ty) const
Corrects the low-level LLVM type for a given constraint and "usual" type.
Definition: TargetInfo.h:127
WhileStmt - This represents a &#39;while&#39; stmt.
Definition: Stmt.h:2200
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1243
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitLabelStmt(const LabelStmt &S)
Definition: CGStmt.cpp:580
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:262
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1772
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1681
Defines the clang::TargetInfo interface.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2516
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:275
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type *> &ResultRegTypes, std::vector< llvm::Type *> &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:134
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:1911
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1141
static RValue get(llvm::Value *V)
Definition: CGValue.h:85
bool EmitSimpleStmt(const Stmt *S)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition: CGStmt.cpp:345
BreakStmt - This represents a break.
Definition: Stmt.h:2573
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
Definition: CGCleanup.cpp:1044
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:759
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:2870
Stmt * getSubStmt()
Definition: Stmt.h:1731
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr *> Attrs=None)
Definition: CGStmt.cpp:947
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition: CGStmt.cpp:380
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
DeclStmt * getLoopVarStmt()
Definition: StmtCXX.h:168
A trivial tuple used to represent a source range.
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition: CGObjC.cpp:1911
LValue - This represents an lvalue references.
Definition: CGValue.h:166
SanitizerMetadata * getSanitizerMetadata()
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition: CGStmt.cpp:491
APSInt & getInt()
Definition: APValue.h:336
const LangOptions & getLangOpts() const
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parenthese and casts which do not change the value (including ptr->int casts of the sam...
Definition: Expr.cpp:2987
DeclStmt * getBeginStmt()
Definition: StmtCXX.h:162
void EmitBreakStmt(const BreakStmt &S)
Definition: CGStmt.cpp:1136
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition: CGObjC.cpp:3438
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
SourceLocation getBegin() const
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition: Stmt.h:3534
llvm::Value * getPointer() const
Definition: CGValue.h:322
This class handles loading and caching of source files into memory.
Stmt * getSubStmt()
Definition: Stmt.h:1794
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:50
Defines enum values for all the target-independent builtin functions.
void EmitOMPTaskDirective(const OMPTaskDirective &S)
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:896
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1306
bool isScalar() const
Definition: CGValue.h:51
Attr - This represents one attribute.
Definition: Attr.h:45
SourceLocation getLocation() const
Definition: DeclBase.h:429
virtual std::string convertConstraint(const char *&Constraint) const
Definition: TargetInfo.h:974
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
virtual const char * getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Stmt * getSubStmt()
Definition: Stmt.h:1597
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1541