clang  9.0.0svn
CGStmt.cpp
Go to the documentation of this file.
1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGDebugInfo.h"
15 #include "CodeGenModule.h"
16 #include "TargetInfo.h"
17 #include "clang/AST/StmtVisitor.h"
18 #include "clang/Basic/Builtins.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/IR/CallSite.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/InlineAsm.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/MDBuilder.h"
27 
28 using namespace clang;
29 using namespace CodeGen;
30 
31 //===----------------------------------------------------------------------===//
32 // Statement Emission
33 //===----------------------------------------------------------------------===//
34 
36  if (CGDebugInfo *DI = getDebugInfo()) {
37  SourceLocation Loc;
38  Loc = S->getBeginLoc();
39  DI->EmitLocation(Builder, Loc);
40 
41  LastStopPoint = Loc;
42  }
43 }
44 
46  assert(S && "Null statement?");
47  PGO.setCurrentStmt(S);
48 
49  // These statements have their own debug info handling.
50  if (EmitSimpleStmt(S))
51  return;
52 
53  // Check if we are generating unreachable code.
54  if (!HaveInsertPoint()) {
55  // If so, and the statement doesn't contain a label, then we do not need to
56  // generate actual code. This is safe because (1) the current point is
57  // unreachable, so we don't need to execute the code, and (2) we've already
58  // handled the statements which update internal data structures (like the
59  // local variable map) which could be used by subsequent statements.
60  if (!ContainsLabel(S)) {
61  // Verify that any decl statements were handled as simple, they may be in
62  // scope of subsequent reachable statements.
63  assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
64  return;
65  }
66 
67  // Otherwise, make a new block to hold the code.
69  }
70 
71  // Generate a stoppoint if we are emitting debug info.
72  EmitStopPoint(S);
73 
74  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
75  // enabled.
76  if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
77  if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
79  return;
80  }
81  }
82 
83  switch (S->getStmtClass()) {
84  case Stmt::NoStmtClass:
85  case Stmt::CXXCatchStmtClass:
86  case Stmt::SEHExceptStmtClass:
87  case Stmt::SEHFinallyStmtClass:
88  case Stmt::MSDependentExistsStmtClass:
89  llvm_unreachable("invalid statement class to emit generically");
90  case Stmt::NullStmtClass:
91  case Stmt::CompoundStmtClass:
92  case Stmt::DeclStmtClass:
93  case Stmt::LabelStmtClass:
94  case Stmt::AttributedStmtClass:
95  case Stmt::GotoStmtClass:
96  case Stmt::BreakStmtClass:
97  case Stmt::ContinueStmtClass:
98  case Stmt::DefaultStmtClass:
99  case Stmt::CaseStmtClass:
100  case Stmt::SEHLeaveStmtClass:
101  llvm_unreachable("should have emitted these statements as simple");
102 
103 #define STMT(Type, Base)
104 #define ABSTRACT_STMT(Op)
105 #define EXPR(Type, Base) \
106  case Stmt::Type##Class:
107 #include "clang/AST/StmtNodes.inc"
108  {
109  // Remember the block we came in on.
110  llvm::BasicBlock *incoming = Builder.GetInsertBlock();
111  assert(incoming && "expression emission must have an insertion point");
112 
113  EmitIgnoredExpr(cast<Expr>(S));
114 
115  llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
116  assert(outgoing && "expression emission cleared block!");
117 
118  // The expression emitters assume (reasonably!) that the insertion
119  // point is always set. To maintain that, the call-emission code
120  // for noreturn functions has to enter a new block with no
121  // predecessors. We want to kill that block and mark the current
122  // insertion point unreachable in the common case of a call like
123  // "exit();". Since expression emission doesn't otherwise create
124  // blocks with no predecessors, we can just test for that.
125  // However, we must be careful not to do this to our incoming
126  // block, because *statement* emission does sometimes create
127  // reachable blocks which will have no predecessors until later in
128  // the function. This occurs with, e.g., labels that are not
129  // reachable by fallthrough.
130  if (incoming != outgoing && outgoing->use_empty()) {
131  outgoing->eraseFromParent();
132  Builder.ClearInsertionPoint();
133  }
134  break;
135  }
136 
137  case Stmt::IndirectGotoStmtClass:
138  EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
139 
140  case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
141  case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
142  case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
143  case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
144 
145  case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
146 
147  case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
148  case Stmt::GCCAsmStmtClass: // Intentional fall-through.
149  case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
150  case Stmt::CoroutineBodyStmtClass:
151  EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
152  break;
153  case Stmt::CoreturnStmtClass:
154  EmitCoreturnStmt(cast<CoreturnStmt>(*S));
155  break;
156  case Stmt::CapturedStmtClass: {
157  const CapturedStmt *CS = cast<CapturedStmt>(S);
159  }
160  break;
161  case Stmt::ObjCAtTryStmtClass:
162  EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
163  break;
164  case Stmt::ObjCAtCatchStmtClass:
165  llvm_unreachable(
166  "@catch statements should be handled by EmitObjCAtTryStmt");
167  case Stmt::ObjCAtFinallyStmtClass:
168  llvm_unreachable(
169  "@finally statements should be handled by EmitObjCAtTryStmt");
170  case Stmt::ObjCAtThrowStmtClass:
171  EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
172  break;
173  case Stmt::ObjCAtSynchronizedStmtClass:
174  EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
175  break;
176  case Stmt::ObjCForCollectionStmtClass:
177  EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
178  break;
179  case Stmt::ObjCAutoreleasePoolStmtClass:
180  EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
181  break;
182 
183  case Stmt::CXXTryStmtClass:
184  EmitCXXTryStmt(cast<CXXTryStmt>(*S));
185  break;
186  case Stmt::CXXForRangeStmtClass:
187  EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
188  break;
189  case Stmt::SEHTryStmtClass:
190  EmitSEHTryStmt(cast<SEHTryStmt>(*S));
191  break;
192  case Stmt::OMPParallelDirectiveClass:
193  EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
194  break;
195  case Stmt::OMPSimdDirectiveClass:
196  EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
197  break;
198  case Stmt::OMPForDirectiveClass:
199  EmitOMPForDirective(cast<OMPForDirective>(*S));
200  break;
201  case Stmt::OMPForSimdDirectiveClass:
202  EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
203  break;
204  case Stmt::OMPSectionsDirectiveClass:
205  EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
206  break;
207  case Stmt::OMPSectionDirectiveClass:
208  EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
209  break;
210  case Stmt::OMPSingleDirectiveClass:
211  EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
212  break;
213  case Stmt::OMPMasterDirectiveClass:
214  EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
215  break;
216  case Stmt::OMPCriticalDirectiveClass:
217  EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
218  break;
219  case Stmt::OMPParallelForDirectiveClass:
220  EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
221  break;
222  case Stmt::OMPParallelForSimdDirectiveClass:
223  EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
224  break;
225  case Stmt::OMPParallelSectionsDirectiveClass:
226  EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
227  break;
228  case Stmt::OMPTaskDirectiveClass:
229  EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
230  break;
231  case Stmt::OMPTaskyieldDirectiveClass:
232  EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
233  break;
234  case Stmt::OMPBarrierDirectiveClass:
235  EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
236  break;
237  case Stmt::OMPTaskwaitDirectiveClass:
238  EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
239  break;
240  case Stmt::OMPTaskgroupDirectiveClass:
241  EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
242  break;
243  case Stmt::OMPFlushDirectiveClass:
244  EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
245  break;
246  case Stmt::OMPOrderedDirectiveClass:
247  EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
248  break;
249  case Stmt::OMPAtomicDirectiveClass:
250  EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
251  break;
252  case Stmt::OMPTargetDirectiveClass:
253  EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
254  break;
255  case Stmt::OMPTeamsDirectiveClass:
256  EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
257  break;
258  case Stmt::OMPCancellationPointDirectiveClass:
259  EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
260  break;
261  case Stmt::OMPCancelDirectiveClass:
262  EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
263  break;
264  case Stmt::OMPTargetDataDirectiveClass:
265  EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
266  break;
267  case Stmt::OMPTargetEnterDataDirectiveClass:
268  EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
269  break;
270  case Stmt::OMPTargetExitDataDirectiveClass:
271  EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
272  break;
273  case Stmt::OMPTargetParallelDirectiveClass:
274  EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
275  break;
276  case Stmt::OMPTargetParallelForDirectiveClass:
277  EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
278  break;
279  case Stmt::OMPTaskLoopDirectiveClass:
280  EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
281  break;
282  case Stmt::OMPTaskLoopSimdDirectiveClass:
283  EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
284  break;
285  case Stmt::OMPDistributeDirectiveClass:
286  EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
287  break;
288  case Stmt::OMPTargetUpdateDirectiveClass:
289  EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
290  break;
291  case Stmt::OMPDistributeParallelForDirectiveClass:
293  cast<OMPDistributeParallelForDirective>(*S));
294  break;
295  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
297  cast<OMPDistributeParallelForSimdDirective>(*S));
298  break;
299  case Stmt::OMPDistributeSimdDirectiveClass:
300  EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
301  break;
302  case Stmt::OMPTargetParallelForSimdDirectiveClass:
304  cast<OMPTargetParallelForSimdDirective>(*S));
305  break;
306  case Stmt::OMPTargetSimdDirectiveClass:
307  EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
308  break;
309  case Stmt::OMPTeamsDistributeDirectiveClass:
310  EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
311  break;
312  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
314  cast<OMPTeamsDistributeSimdDirective>(*S));
315  break;
316  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
318  cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
319  break;
320  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
322  cast<OMPTeamsDistributeParallelForDirective>(*S));
323  break;
324  case Stmt::OMPTargetTeamsDirectiveClass:
325  EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
326  break;
327  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
329  cast<OMPTargetTeamsDistributeDirective>(*S));
330  break;
331  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
333  cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
334  break;
335  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
337  cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
338  break;
339  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
341  cast<OMPTargetTeamsDistributeSimdDirective>(*S));
342  break;
343  }
344 }
345 
347  switch (S->getStmtClass()) {
348  default: return false;
349  case Stmt::NullStmtClass: break;
350  case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
351  case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
352  case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
353  case Stmt::AttributedStmtClass:
354  EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
355  case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
356  case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
357  case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
358  case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
359  case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
360  case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
361  }
362 
363  return true;
364 }
365 
366 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
367 /// this captures the expression result of the last sub-statement and returns it
368 /// (for use by the statement expression extension).
370  AggValueSlot AggSlot) {
371  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
372  "LLVM IR generation of compound statement ('{}')");
373 
374  // Keep track of the current cleanup stack depth, including debug scopes.
375  LexicalScope Scope(*this, S.getSourceRange());
376 
377  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
378 }
379 
380 Address
382  bool GetLast,
383  AggValueSlot AggSlot) {
384 
386  E = S.body_end()-GetLast; I != E; ++I)
387  EmitStmt(*I);
388 
389  Address RetAlloca = Address::invalid();
390  if (GetLast) {
391  // We have to special case labels here. They are statements, but when put
392  // at the end of a statement expression, they yield the value of their
393  // subexpression. Handle this by walking through all labels we encounter,
394  // emitting them before we evaluate the subexpr.
395  const Stmt *LastStmt = S.body_back();
396  while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
397  EmitLabel(LS->getDecl());
398  LastStmt = LS->getSubStmt();
399  }
400 
402 
403  QualType ExprTy = cast<Expr>(LastStmt)->getType();
404  if (hasAggregateEvaluationKind(ExprTy)) {
405  EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
406  } else {
407  // We can't return an RValue here because there might be cleanups at
408  // the end of the StmtExpr. Because of that, we have to emit the result
409  // here into a temporary alloca.
410  RetAlloca = CreateMemTemp(ExprTy);
411  EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
412  /*IsInit*/false);
413  }
414 
415  }
416 
417  return RetAlloca;
418 }
419 
420 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
421  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
422 
423  // If there is a cleanup stack, then we it isn't worth trying to
424  // simplify this block (we would need to remove it from the scope map
425  // and cleanup entry).
426  if (!EHStack.empty())
427  return;
428 
429  // Can only simplify direct branches.
430  if (!BI || !BI->isUnconditional())
431  return;
432 
433  // Can only simplify empty blocks.
434  if (BI->getIterator() != BB->begin())
435  return;
436 
437  BB->replaceAllUsesWith(BI->getSuccessor(0));
438  BI->eraseFromParent();
439  BB->eraseFromParent();
440 }
441 
442 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
443  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
444 
445  // Fall out of the current block (if necessary).
446  EmitBranch(BB);
447 
448  if (IsFinished && BB->use_empty()) {
449  delete BB;
450  return;
451  }
452 
453  // Place the block after the current block, if possible, or else at
454  // the end of the function.
455  if (CurBB && CurBB->getParent())
456  CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
457  else
458  CurFn->getBasicBlockList().push_back(BB);
459  Builder.SetInsertPoint(BB);
460 }
461 
462 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
463  // Emit a branch from the current block to the target one if this
464  // was a real block. If this was just a fall-through block after a
465  // terminator, don't emit it.
466  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
467 
468  if (!CurBB || CurBB->getTerminator()) {
469  // If there is no insert point or the previous block is already
470  // terminated, don't touch it.
471  } else {
472  // Otherwise, create a fall-through branch.
473  Builder.CreateBr(Target);
474  }
475 
476  Builder.ClearInsertionPoint();
477 }
478 
479 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
480  bool inserted = false;
481  for (llvm::User *u : block->users()) {
482  if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
483  CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
484  block);
485  inserted = true;
486  break;
487  }
488  }
489 
490  if (!inserted)
491  CurFn->getBasicBlockList().push_back(block);
492 
493  Builder.SetInsertPoint(block);
494 }
495 
498  JumpDest &Dest = LabelMap[D];
499  if (Dest.isValid()) return Dest;
500 
501  // Create, but don't insert, the new block.
502  Dest = JumpDest(createBasicBlock(D->getName()),
505  return Dest;
506 }
507 
509  // Add this label to the current lexical scope if we're within any
510  // normal cleanups. Jumps "in" to this label --- when permitted by
511  // the language --- may need to be routed around such cleanups.
512  if (EHStack.hasNormalCleanups() && CurLexicalScope)
513  CurLexicalScope->addLabel(D);
514 
515  JumpDest &Dest = LabelMap[D];
516 
517  // If we didn't need a forward reference to this label, just go
518  // ahead and create a destination at the current scope.
519  if (!Dest.isValid()) {
520  Dest = getJumpDestInCurrentScope(D->getName());
521 
522  // Otherwise, we need to give this label a target depth and remove
523  // it from the branch-fixups list.
524  } else {
525  assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
528  }
529 
530  EmitBlock(Dest.getBlock());
532 }
533 
534 /// Change the cleanup scope of the labels in this lexical scope to
535 /// match the scope of the enclosing context.
537  assert(!Labels.empty());
538  EHScopeStack::stable_iterator innermostScope
539  = CGF.EHStack.getInnermostNormalCleanup();
540 
541  // Change the scope depth of all the labels.
543  i = Labels.begin(), e = Labels.end(); i != e; ++i) {
544  assert(CGF.LabelMap.count(*i));
545  JumpDest &dest = CGF.LabelMap.find(*i)->second;
546  assert(dest.getScopeDepth().isValid());
547  assert(innermostScope.encloses(dest.getScopeDepth()));
548  dest.setScopeDepth(innermostScope);
549  }
550 
551  // Reparent the labels if the new scope also has cleanups.
552  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
553  ParentScope->Labels.append(Labels.begin(), Labels.end());
554  }
555 }
556 
557 
559  EmitLabel(S.getDecl());
560  EmitStmt(S.getSubStmt());
561 }
562 
564  EmitStmt(S.getSubStmt(), S.getAttrs());
565 }
566 
568  // If this code is reachable then emit a stop point (if generating
569  // debug info). We have to do this ourselves because we are on the
570  // "simple" statement path.
571  if (HaveInsertPoint())
572  EmitStopPoint(&S);
573 
575 }
576 
577 
579  if (const LabelDecl *Target = S.getConstantTarget()) {
581  return;
582  }
583 
584  // Ensure that we have an i8* for our PHI node.
586  Int8PtrTy, "addr");
587  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
588 
589  // Get the basic block for the indirect goto.
590  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
591 
592  // The first instruction in the block has to be the PHI for the switch dest,
593  // add an entry for this branch.
594  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
595 
596  EmitBranch(IndGotoBB);
597 }
598 
600  // C99 6.8.4.1: The first substatement is executed if the expression compares
601  // unequal to 0. The condition must be a scalar type.
602  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
603 
604  if (S.getInit())
605  EmitStmt(S.getInit());
606 
607  if (S.getConditionVariable())
609 
610  // If the condition constant folds and can be elided, try to avoid emitting
611  // the condition and the dead arm of the if/else.
612  bool CondConstant;
613  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
614  S.isConstexpr())) {
615  // Figure out which block (then or else) is executed.
616  const Stmt *Executed = S.getThen();
617  const Stmt *Skipped = S.getElse();
618  if (!CondConstant) // Condition false?
619  std::swap(Executed, Skipped);
620 
621  // If the skipped block has no labels in it, just emit the executed block.
622  // This avoids emitting dead code and simplifies the CFG substantially.
623  if (S.isConstexpr() || !ContainsLabel(Skipped)) {
624  if (CondConstant)
626  if (Executed) {
627  RunCleanupsScope ExecutedScope(*this);
628  EmitStmt(Executed);
629  }
630  return;
631  }
632  }
633 
634  // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
635  // the conditional branch.
636  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
637  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
638  llvm::BasicBlock *ElseBlock = ContBlock;
639  if (S.getElse())
640  ElseBlock = createBasicBlock("if.else");
641 
642  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
643  getProfileCount(S.getThen()));
644 
645  // Emit the 'then' code.
646  EmitBlock(ThenBlock);
648  {
649  RunCleanupsScope ThenScope(*this);
650  EmitStmt(S.getThen());
651  }
652  EmitBranch(ContBlock);
653 
654  // Emit the 'else' code if present.
655  if (const Stmt *Else = S.getElse()) {
656  {
657  // There is no need to emit line number for an unconditional branch.
658  auto NL = ApplyDebugLocation::CreateEmpty(*this);
659  EmitBlock(ElseBlock);
660  }
661  {
662  RunCleanupsScope ElseScope(*this);
663  EmitStmt(Else);
664  }
665  {
666  // There is no need to emit line number for an unconditional branch.
667  auto NL = ApplyDebugLocation::CreateEmpty(*this);
668  EmitBranch(ContBlock);
669  }
670  }
671 
672  // Emit the continuation block for code after the if.
673  EmitBlock(ContBlock, true);
674 }
675 
677  ArrayRef<const Attr *> WhileAttrs) {
678  // Emit the header for the loop, which will also become
679  // the continue target.
680  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
681  EmitBlock(LoopHeader.getBlock());
682 
683  const SourceRange &R = S.getSourceRange();
684  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs,
685  SourceLocToDebugLoc(R.getBegin()),
686  SourceLocToDebugLoc(R.getEnd()));
687 
688  // Create an exit block for when the condition fails, which will
689  // also become the break target.
691 
692  // Store the blocks to use for break and continue.
693  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
694 
695  // C++ [stmt.while]p2:
696  // When the condition of a while statement is a declaration, the
697  // scope of the variable that is declared extends from its point
698  // of declaration (3.3.2) to the end of the while statement.
699  // [...]
700  // The object created in a condition is destroyed and created
701  // with each iteration of the loop.
702  RunCleanupsScope ConditionScope(*this);
703 
704  if (S.getConditionVariable())
706 
707  // Evaluate the conditional in the while header. C99 6.8.5.1: The
708  // evaluation of the controlling expression takes place before each
709  // execution of the loop body.
710  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
711 
712  // while(1) is common, avoid extra exit blocks. Be sure
713  // to correctly handle break/continue though.
714  bool EmitBoolCondBranch = true;
715  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
716  if (C->isOne())
717  EmitBoolCondBranch = false;
718 
719  // As long as the condition is true, go to the loop body.
720  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
721  if (EmitBoolCondBranch) {
722  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
723  if (ConditionScope.requiresCleanups())
724  ExitBlock = createBasicBlock("while.exit");
725  Builder.CreateCondBr(
726  BoolCondVal, LoopBody, ExitBlock,
727  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
728 
729  if (ExitBlock != LoopExit.getBlock()) {
730  EmitBlock(ExitBlock);
731  EmitBranchThroughCleanup(LoopExit);
732  }
733  }
734 
735  // Emit the loop body. We have to emit this in a cleanup scope
736  // because it might be a singleton DeclStmt.
737  {
738  RunCleanupsScope BodyScope(*this);
739  EmitBlock(LoopBody);
741  EmitStmt(S.getBody());
742  }
743 
744  BreakContinueStack.pop_back();
745 
746  // Immediately force cleanup.
747  ConditionScope.ForceCleanup();
748 
749  EmitStopPoint(&S);
750  // Branch to the loop header again.
751  EmitBranch(LoopHeader.getBlock());
752 
753  LoopStack.pop();
754 
755  // Emit the exit block.
756  EmitBlock(LoopExit.getBlock(), true);
757 
758  // The LoopHeader typically is just a branch if we skipped emitting
759  // a branch, try to erase it.
760  if (!EmitBoolCondBranch)
761  SimplifyForwardingBlocks(LoopHeader.getBlock());
762 }
763 
765  ArrayRef<const Attr *> DoAttrs) {
767  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
768 
769  uint64_t ParentCount = getCurrentProfileCount();
770 
771  // Store the blocks to use for break and continue.
772  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
773 
774  // Emit the body of the loop.
775  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
776 
777  EmitBlockWithFallThrough(LoopBody, &S);
778  {
779  RunCleanupsScope BodyScope(*this);
780  EmitStmt(S.getBody());
781  }
782 
783  EmitBlock(LoopCond.getBlock());
784 
785  const SourceRange &R = S.getSourceRange();
786  LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
787  SourceLocToDebugLoc(R.getBegin()),
788  SourceLocToDebugLoc(R.getEnd()));
789 
790  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
791  // after each execution of the loop body."
792 
793  // Evaluate the conditional in the while header.
794  // C99 6.8.5p2/p4: The first substatement is executed if the expression
795  // compares unequal to 0. The condition must be a scalar type.
796  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
797 
798  BreakContinueStack.pop_back();
799 
800  // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
801  // to correctly handle break/continue though.
802  bool EmitBoolCondBranch = true;
803  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
804  if (C->isZero())
805  EmitBoolCondBranch = false;
806 
807  // As long as the condition is true, iterate the loop.
808  if (EmitBoolCondBranch) {
809  uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
810  Builder.CreateCondBr(
811  BoolCondVal, LoopBody, LoopExit.getBlock(),
812  createProfileWeightsForLoop(S.getCond(), BackedgeCount));
813  }
814 
815  LoopStack.pop();
816 
817  // Emit the exit block.
818  EmitBlock(LoopExit.getBlock());
819 
820  // The DoCond block typically is just a branch if we skipped
821  // emitting a branch, try to erase it.
822  if (!EmitBoolCondBranch)
824 }
825 
827  ArrayRef<const Attr *> ForAttrs) {
829 
830  LexicalScope ForScope(*this, S.getSourceRange());
831 
832  // Evaluate the first part before the loop.
833  if (S.getInit())
834  EmitStmt(S.getInit());
835 
836  // Start the loop with a block that tests the condition.
837  // If there's an increment, the continue scope will be overwritten
838  // later.
839  JumpDest Continue = getJumpDestInCurrentScope("for.cond");
840  llvm::BasicBlock *CondBlock = Continue.getBlock();
841  EmitBlock(CondBlock);
842 
843  const SourceRange &R = S.getSourceRange();
844  LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
847 
848  // If the for loop doesn't have an increment we can just use the
849  // condition as the continue block. Otherwise we'll need to create
850  // a block for it (in the current scope, i.e. in the scope of the
851  // condition), and that we will become our continue block.
852  if (S.getInc())
853  Continue = getJumpDestInCurrentScope("for.inc");
854 
855  // Store the blocks to use for break and continue.
856  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
857 
858  // Create a cleanup scope for the condition variable cleanups.
859  LexicalScope ConditionScope(*this, S.getSourceRange());
860 
861  if (S.getCond()) {
862  // If the for statement has a condition scope, emit the local variable
863  // declaration.
864  if (S.getConditionVariable()) {
866  }
867 
868  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
869  // If there are any cleanups between here and the loop-exit scope,
870  // create a block to stage a loop exit along.
871  if (ForScope.requiresCleanups())
872  ExitBlock = createBasicBlock("for.cond.cleanup");
873 
874  // As long as the condition is true, iterate the loop.
875  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
876 
877  // C99 6.8.5p2/p4: The first substatement is executed if the expression
878  // compares unequal to 0. The condition must be a scalar type.
879  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
880  Builder.CreateCondBr(
881  BoolCondVal, ForBody, ExitBlock,
882  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
883 
884  if (ExitBlock != LoopExit.getBlock()) {
885  EmitBlock(ExitBlock);
886  EmitBranchThroughCleanup(LoopExit);
887  }
888 
889  EmitBlock(ForBody);
890  } else {
891  // Treat it as a non-zero constant. Don't even create a new block for the
892  // body, just fall into it.
893  }
895 
896  {
897  // Create a separate cleanup scope for the body, in case it is not
898  // a compound statement.
899  RunCleanupsScope BodyScope(*this);
900  EmitStmt(S.getBody());
901  }
902 
903  // If there is an increment, emit it next.
904  if (S.getInc()) {
905  EmitBlock(Continue.getBlock());
906  EmitStmt(S.getInc());
907  }
908 
909  BreakContinueStack.pop_back();
910 
911  ConditionScope.ForceCleanup();
912 
913  EmitStopPoint(&S);
914  EmitBranch(CondBlock);
915 
916  ForScope.ForceCleanup();
917 
918  LoopStack.pop();
919 
920  // Emit the fall-through block.
921  EmitBlock(LoopExit.getBlock(), true);
922 }
923 
924 void
926  ArrayRef<const Attr *> ForAttrs) {
928 
929  LexicalScope ForScope(*this, S.getSourceRange());
930 
931  // Evaluate the first pieces before the loop.
932  if (S.getInit())
933  EmitStmt(S.getInit());
934  EmitStmt(S.getRangeStmt());
935  EmitStmt(S.getBeginStmt());
936  EmitStmt(S.getEndStmt());
937 
938  // Start the loop with a block that tests the condition.
939  // If there's an increment, the continue scope will be overwritten
940  // later.
941  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
942  EmitBlock(CondBlock);
943 
944  const SourceRange &R = S.getSourceRange();
945  LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
948 
949  // If there are any cleanups between here and the loop-exit scope,
950  // create a block to stage a loop exit along.
951  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
952  if (ForScope.requiresCleanups())
953  ExitBlock = createBasicBlock("for.cond.cleanup");
954 
955  // The loop body, consisting of the specified body and the loop variable.
956  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
957 
958  // The body is executed if the expression, contextually converted
959  // to bool, is true.
960  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
961  Builder.CreateCondBr(
962  BoolCondVal, ForBody, ExitBlock,
963  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
964 
965  if (ExitBlock != LoopExit.getBlock()) {
966  EmitBlock(ExitBlock);
967  EmitBranchThroughCleanup(LoopExit);
968  }
969 
970  EmitBlock(ForBody);
972 
973  // Create a block for the increment. In case of a 'continue', we jump there.
974  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
975 
976  // Store the blocks to use for break and continue.
977  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
978 
979  {
980  // Create a separate cleanup scope for the loop variable and body.
981  LexicalScope BodyScope(*this, S.getSourceRange());
983  EmitStmt(S.getBody());
984  }
985 
986  EmitStopPoint(&S);
987  // If there is an increment, emit it next.
988  EmitBlock(Continue.getBlock());
989  EmitStmt(S.getInc());
990 
991  BreakContinueStack.pop_back();
992 
993  EmitBranch(CondBlock);
994 
995  ForScope.ForceCleanup();
996 
997  LoopStack.pop();
998 
999  // Emit the fall-through block.
1000  EmitBlock(LoopExit.getBlock(), true);
1001 }
1002 
1003 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1004  if (RV.isScalar()) {
1006  } else if (RV.isAggregate()) {
1007  LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1008  LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1009  EmitAggregateCopy(Dest, Src, Ty, overlapForReturnValue());
1010  } else {
1012  /*init*/ true);
1013  }
1015 }
1016 
1017 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1018 /// if the function returns void, or may be missing one if the function returns
1019 /// non-void. Fun stuff :).
1021  if (requiresReturnValueCheck()) {
1022  llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1023  auto *SLocPtr =
1024  new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1025  llvm::GlobalVariable::PrivateLinkage, SLoc);
1026  SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1028  assert(ReturnLocation.isValid() && "No valid return location");
1030  ReturnLocation);
1031  }
1032 
1033  // Returning from an outlined SEH helper is UB, and we already warn on it.
1034  if (IsOutlinedSEHHelper) {
1035  Builder.CreateUnreachable();
1036  Builder.ClearInsertionPoint();
1037  }
1038 
1039  // Emit the result value, even if unused, to evaluate the side effects.
1040  const Expr *RV = S.getRetValue();
1041 
1042  // Treat block literals in a return expression as if they appeared
1043  // in their own scope. This permits a small, easily-implemented
1044  // exception to our over-conservative rules about not jumping to
1045  // statements following block literals with non-trivial cleanups.
1046  RunCleanupsScope cleanupScope(*this);
1047  if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
1048  enterFullExpression(fe);
1049  RV = fe->getSubExpr();
1050  }
1051 
1052  // FIXME: Clean this up by using an LValue for ReturnTemp,
1053  // EmitStoreThroughLValue, and EmitAnyExpr.
1054  if (getLangOpts().ElideConstructors &&
1056  // Apply the named return value optimization for this return statement,
1057  // which means doing nothing: the appropriate result has already been
1058  // constructed into the NRVO variable.
1059 
1060  // If there is an NRVO flag for this variable, set it to 1 into indicate
1061  // that the cleanup code should not destroy the variable.
1062  if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1063  Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1064  } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1065  // Make sure not to return anything, but evaluate the expression
1066  // for side effects.
1067  if (RV)
1068  EmitAnyExpr(RV);
1069  } else if (!RV) {
1070  // Do nothing (return value is left uninitialized)
1071  } else if (FnRetTy->isReferenceType()) {
1072  // If this function returns a reference, take the address of the expression
1073  // rather than the value.
1074  RValue Result = EmitReferenceBindingToExpr(RV);
1076  } else {
1077  switch (getEvaluationKind(RV->getType())) {
1078  case TEK_Scalar:
1080  break;
1081  case TEK_Complex:
1083  /*isInit*/ true);
1084  break;
1085  case TEK_Aggregate:
1092  break;
1093  }
1094  }
1095 
1096  ++NumReturnExprs;
1097  if (!RV || RV->isEvaluatable(getContext()))
1098  ++NumSimpleReturnExprs;
1099 
1100  cleanupScope.ForceCleanup();
1102 }
1103 
1105  // As long as debug info is modeled with instructions, we have to ensure we
1106  // have a place to insert here and write the stop point here.
1107  if (HaveInsertPoint())
1108  EmitStopPoint(&S);
1109 
1110  for (const auto *I : S.decls())
1111  EmitDecl(*I);
1112 }
1113 
1115  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1116 
1117  // If this code is reachable then emit a stop point (if generating
1118  // debug info). We have to do this ourselves because we are on the
1119  // "simple" statement path.
1120  if (HaveInsertPoint())
1121  EmitStopPoint(&S);
1122 
1123  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1124 }
1125 
1127  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1128 
1129  // If this code is reachable then emit a stop point (if generating
1130  // debug info). We have to do this ourselves because we are on the
1131  // "simple" statement path.
1132  if (HaveInsertPoint())
1133  EmitStopPoint(&S);
1134 
1135  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1136 }
1137 
1138 /// EmitCaseStmtRange - If case statement range is not too big then
1139 /// add multiple cases to switch instruction, one for each value within
1140 /// the range. If range is too big then emit "if" condition check.
1142  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1143 
1144  llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1145  llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1146 
1147  // Emit the code for this case. We do this first to make sure it is
1148  // properly chained from our predecessor before generating the
1149  // switch machinery to enter this block.
1150  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1151  EmitBlockWithFallThrough(CaseDest, &S);
1152  EmitStmt(S.getSubStmt());
1153 
1154  // If range is empty, do nothing.
1155  if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1156  return;
1157 
1158  llvm::APInt Range = RHS - LHS;
1159  // FIXME: parameters such as this should not be hardcoded.
1160  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1161  // Range is small enough to add multiple switch instruction cases.
1162  uint64_t Total = getProfileCount(&S);
1163  unsigned NCases = Range.getZExtValue() + 1;
1164  // We only have one region counter for the entire set of cases here, so we
1165  // need to divide the weights evenly between the generated cases, ensuring
1166  // that the total weight is preserved. E.g., a weight of 5 over three cases
1167  // will be distributed as weights of 2, 2, and 1.
1168  uint64_t Weight = Total / NCases, Rem = Total % NCases;
1169  for (unsigned I = 0; I != NCases; ++I) {
1170  if (SwitchWeights)
1171  SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1172  if (Rem)
1173  Rem--;
1174  SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1175  ++LHS;
1176  }
1177  return;
1178  }
1179 
1180  // The range is too big. Emit "if" condition into a new block,
1181  // making sure to save and restore the current insertion point.
1182  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1183 
1184  // Push this test onto the chain of range checks (which terminates
1185  // in the default basic block). The switch's default will be changed
1186  // to the top of this chain after switch emission is complete.
1187  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1188  CaseRangeBlock = createBasicBlock("sw.caserange");
1189 
1190  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1191  Builder.SetInsertPoint(CaseRangeBlock);
1192 
1193  // Emit range check.
1194  llvm::Value *Diff =
1195  Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1196  llvm::Value *Cond =
1197  Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1198 
1199  llvm::MDNode *Weights = nullptr;
1200  if (SwitchWeights) {
1201  uint64_t ThisCount = getProfileCount(&S);
1202  uint64_t DefaultCount = (*SwitchWeights)[0];
1203  Weights = createProfileWeights(ThisCount, DefaultCount);
1204 
1205  // Since we're chaining the switch default through each large case range, we
1206  // need to update the weight for the default, ie, the first case, to include
1207  // this case.
1208  (*SwitchWeights)[0] += ThisCount;
1209  }
1210  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1211 
1212  // Restore the appropriate insertion point.
1213  if (RestoreBB)
1214  Builder.SetInsertPoint(RestoreBB);
1215  else
1216  Builder.ClearInsertionPoint();
1217 }
1218 
1220  // If there is no enclosing switch instance that we're aware of, then this
1221  // case statement and its block can be elided. This situation only happens
1222  // when we've constant-folded the switch, are emitting the constant case,
1223  // and part of the constant case includes another case statement. For
1224  // instance: switch (4) { case 4: do { case 5: } while (1); }
1225  if (!SwitchInsn) {
1226  EmitStmt(S.getSubStmt());
1227  return;
1228  }
1229 
1230  // Handle case ranges.
1231  if (S.getRHS()) {
1232  EmitCaseStmtRange(S);
1233  return;
1234  }
1235 
1236  llvm::ConstantInt *CaseVal =
1238 
1239  // If the body of the case is just a 'break', try to not emit an empty block.
1240  // If we're profiling or we're not optimizing, leave the block in for better
1241  // debug and coverage analysis.
1243  CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1244  isa<BreakStmt>(S.getSubStmt())) {
1245  JumpDest Block = BreakContinueStack.back().BreakBlock;
1246 
1247  // Only do this optimization if there are no cleanups that need emitting.
1248  if (isObviouslyBranchWithoutCleanups(Block)) {
1249  if (SwitchWeights)
1250  SwitchWeights->push_back(getProfileCount(&S));
1251  SwitchInsn->addCase(CaseVal, Block.getBlock());
1252 
1253  // If there was a fallthrough into this case, make sure to redirect it to
1254  // the end of the switch as well.
1255  if (Builder.GetInsertBlock()) {
1256  Builder.CreateBr(Block.getBlock());
1257  Builder.ClearInsertionPoint();
1258  }
1259  return;
1260  }
1261  }
1262 
1263  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1264  EmitBlockWithFallThrough(CaseDest, &S);
1265  if (SwitchWeights)
1266  SwitchWeights->push_back(getProfileCount(&S));
1267  SwitchInsn->addCase(CaseVal, CaseDest);
1268 
1269  // Recursively emitting the statement is acceptable, but is not wonderful for
1270  // code where we have many case statements nested together, i.e.:
1271  // case 1:
1272  // case 2:
1273  // case 3: etc.
1274  // Handling this recursively will create a new block for each case statement
1275  // that falls through to the next case which is IR intensive. It also causes
1276  // deep recursion which can run into stack depth limitations. Handle
1277  // sequential non-range case statements specially.
1278  const CaseStmt *CurCase = &S;
1279  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1280 
1281  // Otherwise, iteratively add consecutive cases to this switch stmt.
1282  while (NextCase && NextCase->getRHS() == nullptr) {
1283  CurCase = NextCase;
1284  llvm::ConstantInt *CaseVal =
1285  Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1286 
1287  if (SwitchWeights)
1288  SwitchWeights->push_back(getProfileCount(NextCase));
1290  CaseDest = createBasicBlock("sw.bb");
1291  EmitBlockWithFallThrough(CaseDest, &S);
1292  }
1293 
1294  SwitchInsn->addCase(CaseVal, CaseDest);
1295  NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1296  }
1297 
1298  // Normal default recursion for non-cases.
1299  EmitStmt(CurCase->getSubStmt());
1300 }
1301 
1303  // If there is no enclosing switch instance that we're aware of, then this
1304  // default statement can be elided. This situation only happens when we've
1305  // constant-folded the switch.
1306  if (!SwitchInsn) {
1307  EmitStmt(S.getSubStmt());
1308  return;
1309  }
1310 
1311  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1312  assert(DefaultBlock->empty() &&
1313  "EmitDefaultStmt: Default block already defined?");
1314 
1315  EmitBlockWithFallThrough(DefaultBlock, &S);
1316 
1317  EmitStmt(S.getSubStmt());
1318 }
1319 
1320 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1321 /// constant value that is being switched on, see if we can dead code eliminate
1322 /// the body of the switch to a simple series of statements to emit. Basically,
1323 /// on a switch (5) we want to find these statements:
1324 /// case 5:
1325 /// printf(...); <--
1326 /// ++i; <--
1327 /// break;
1328 ///
1329 /// and add them to the ResultStmts vector. If it is unsafe to do this
1330 /// transformation (for example, one of the elided statements contains a label
1331 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1332 /// should include statements after it (e.g. the printf() line is a substmt of
1333 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1334 /// statement, then return CSFC_Success.
1335 ///
1336 /// If Case is non-null, then we are looking for the specified case, checking
1337 /// that nothing we jump over contains labels. If Case is null, then we found
1338 /// the case and are looking for the break.
1339 ///
1340 /// If the recursive walk actually finds our Case, then we set FoundCase to
1341 /// true.
1342 ///
1345  const SwitchCase *Case,
1346  bool &FoundCase,
1347  SmallVectorImpl<const Stmt*> &ResultStmts) {
1348  // If this is a null statement, just succeed.
1349  if (!S)
1350  return Case ? CSFC_Success : CSFC_FallThrough;
1351 
1352  // If this is the switchcase (case 4: or default) that we're looking for, then
1353  // we're in business. Just add the substatement.
1354  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1355  if (S == Case) {
1356  FoundCase = true;
1357  return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1358  ResultStmts);
1359  }
1360 
1361  // Otherwise, this is some other case or default statement, just ignore it.
1362  return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1363  ResultStmts);
1364  }
1365 
1366  // If we are in the live part of the code and we found our break statement,
1367  // return a success!
1368  if (!Case && isa<BreakStmt>(S))
1369  return CSFC_Success;
1370 
1371  // If this is a switch statement, then it might contain the SwitchCase, the
1372  // break, or neither.
1373  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1374  // Handle this as two cases: we might be looking for the SwitchCase (if so
1375  // the skipped statements must be skippable) or we might already have it.
1376  CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1377  bool StartedInLiveCode = FoundCase;
1378  unsigned StartSize = ResultStmts.size();
1379 
1380  // If we've not found the case yet, scan through looking for it.
1381  if (Case) {
1382  // Keep track of whether we see a skipped declaration. The code could be
1383  // using the declaration even if it is skipped, so we can't optimize out
1384  // the decl if the kept statements might refer to it.
1385  bool HadSkippedDecl = false;
1386 
1387  // If we're looking for the case, just see if we can skip each of the
1388  // substatements.
1389  for (; Case && I != E; ++I) {
1390  HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1391 
1392  switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1393  case CSFC_Failure: return CSFC_Failure;
1394  case CSFC_Success:
1395  // A successful result means that either 1) that the statement doesn't
1396  // have the case and is skippable, or 2) does contain the case value
1397  // and also contains the break to exit the switch. In the later case,
1398  // we just verify the rest of the statements are elidable.
1399  if (FoundCase) {
1400  // If we found the case and skipped declarations, we can't do the
1401  // optimization.
1402  if (HadSkippedDecl)
1403  return CSFC_Failure;
1404 
1405  for (++I; I != E; ++I)
1406  if (CodeGenFunction::ContainsLabel(*I, true))
1407  return CSFC_Failure;
1408  return CSFC_Success;
1409  }
1410  break;
1411  case CSFC_FallThrough:
1412  // If we have a fallthrough condition, then we must have found the
1413  // case started to include statements. Consider the rest of the
1414  // statements in the compound statement as candidates for inclusion.
1415  assert(FoundCase && "Didn't find case but returned fallthrough?");
1416  // We recursively found Case, so we're not looking for it anymore.
1417  Case = nullptr;
1418 
1419  // If we found the case and skipped declarations, we can't do the
1420  // optimization.
1421  if (HadSkippedDecl)
1422  return CSFC_Failure;
1423  break;
1424  }
1425  }
1426 
1427  if (!FoundCase)
1428  return CSFC_Success;
1429 
1430  assert(!HadSkippedDecl && "fallthrough after skipping decl");
1431  }
1432 
1433  // If we have statements in our range, then we know that the statements are
1434  // live and need to be added to the set of statements we're tracking.
1435  bool AnyDecls = false;
1436  for (; I != E; ++I) {
1437  AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1438 
1439  switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1440  case CSFC_Failure: return CSFC_Failure;
1441  case CSFC_FallThrough:
1442  // A fallthrough result means that the statement was simple and just
1443  // included in ResultStmt, keep adding them afterwards.
1444  break;
1445  case CSFC_Success:
1446  // A successful result means that we found the break statement and
1447  // stopped statement inclusion. We just ensure that any leftover stmts
1448  // are skippable and return success ourselves.
1449  for (++I; I != E; ++I)
1450  if (CodeGenFunction::ContainsLabel(*I, true))
1451  return CSFC_Failure;
1452  return CSFC_Success;
1453  }
1454  }
1455 
1456  // If we're about to fall out of a scope without hitting a 'break;', we
1457  // can't perform the optimization if there were any decls in that scope
1458  // (we'd lose their end-of-lifetime).
1459  if (AnyDecls) {
1460  // If the entire compound statement was live, there's one more thing we
1461  // can try before giving up: emit the whole thing as a single statement.
1462  // We can do that unless the statement contains a 'break;'.
1463  // FIXME: Such a break must be at the end of a construct within this one.
1464  // We could emit this by just ignoring the BreakStmts entirely.
1465  if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1466  ResultStmts.resize(StartSize);
1467  ResultStmts.push_back(S);
1468  } else {
1469  return CSFC_Failure;
1470  }
1471  }
1472 
1473  return CSFC_FallThrough;
1474  }
1475 
1476  // Okay, this is some other statement that we don't handle explicitly, like a
1477  // for statement or increment etc. If we are skipping over this statement,
1478  // just verify it doesn't have labels, which would make it invalid to elide.
1479  if (Case) {
1480  if (CodeGenFunction::ContainsLabel(S, true))
1481  return CSFC_Failure;
1482  return CSFC_Success;
1483  }
1484 
1485  // Otherwise, we want to include this statement. Everything is cool with that
1486  // so long as it doesn't contain a break out of the switch we're in.
1488 
1489  // Otherwise, everything is great. Include the statement and tell the caller
1490  // that we fall through and include the next statement as well.
1491  ResultStmts.push_back(S);
1492  return CSFC_FallThrough;
1493 }
1494 
1495 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1496 /// then invoke CollectStatementsForCase to find the list of statements to emit
1497 /// for a switch on constant. See the comment above CollectStatementsForCase
1498 /// for more details.
1500  const llvm::APSInt &ConstantCondValue,
1501  SmallVectorImpl<const Stmt*> &ResultStmts,
1502  ASTContext &C,
1503  const SwitchCase *&ResultCase) {
1504  // First step, find the switch case that is being branched to. We can do this
1505  // efficiently by scanning the SwitchCase list.
1506  const SwitchCase *Case = S.getSwitchCaseList();
1507  const DefaultStmt *DefaultCase = nullptr;
1508 
1509  for (; Case; Case = Case->getNextSwitchCase()) {
1510  // It's either a default or case. Just remember the default statement in
1511  // case we're not jumping to any numbered cases.
1512  if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1513  DefaultCase = DS;
1514  continue;
1515  }
1516 
1517  // Check to see if this case is the one we're looking for.
1518  const CaseStmt *CS = cast<CaseStmt>(Case);
1519  // Don't handle case ranges yet.
1520  if (CS->getRHS()) return false;
1521 
1522  // If we found our case, remember it as 'case'.
1523  if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1524  break;
1525  }
1526 
1527  // If we didn't find a matching case, we use a default if it exists, or we
1528  // elide the whole switch body!
1529  if (!Case) {
1530  // It is safe to elide the body of the switch if it doesn't contain labels
1531  // etc. If it is safe, return successfully with an empty ResultStmts list.
1532  if (!DefaultCase)
1533  return !CodeGenFunction::ContainsLabel(&S);
1534  Case = DefaultCase;
1535  }
1536 
1537  // Ok, we know which case is being jumped to, try to collect all the
1538  // statements that follow it. This can fail for a variety of reasons. Also,
1539  // check to see that the recursive walk actually found our case statement.
1540  // Insane cases like this can fail to find it in the recursive walk since we
1541  // don't handle every stmt kind:
1542  // switch (4) {
1543  // while (1) {
1544  // case 4: ...
1545  bool FoundCase = false;
1546  ResultCase = Case;
1547  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1548  ResultStmts) != CSFC_Failure &&
1549  FoundCase;
1550 }
1551 
1553  // Handle nested switch statements.
1554  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1555  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1556  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1557 
1558  // See if we can constant fold the condition of the switch and therefore only
1559  // emit the live case statement (if any) of the switch.
1560  llvm::APSInt ConstantCondValue;
1561  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1562  SmallVector<const Stmt*, 4> CaseStmts;
1563  const SwitchCase *Case = nullptr;
1564  if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1565  getContext(), Case)) {
1566  if (Case)
1568  RunCleanupsScope ExecutedScope(*this);
1569 
1570  if (S.getInit())
1571  EmitStmt(S.getInit());
1572 
1573  // Emit the condition variable if needed inside the entire cleanup scope
1574  // used by this special case for constant folded switches.
1575  if (S.getConditionVariable())
1577 
1578  // At this point, we are no longer "within" a switch instance, so
1579  // we can temporarily enforce this to ensure that any embedded case
1580  // statements are not emitted.
1581  SwitchInsn = nullptr;
1582 
1583  // Okay, we can dead code eliminate everything except this case. Emit the
1584  // specified series of statements and we're good.
1585  for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1586  EmitStmt(CaseStmts[i]);
1588 
1589  // Now we want to restore the saved switch instance so that nested
1590  // switches continue to function properly
1591  SwitchInsn = SavedSwitchInsn;
1592 
1593  return;
1594  }
1595  }
1596 
1597  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1598 
1599  RunCleanupsScope ConditionScope(*this);
1600 
1601  if (S.getInit())
1602  EmitStmt(S.getInit());
1603 
1604  if (S.getConditionVariable())
1606  llvm::Value *CondV = EmitScalarExpr(S.getCond());
1607 
1608  // Create basic block to hold stuff that comes after switch
1609  // statement. We also need to create a default block now so that
1610  // explicit case ranges tests can have a place to jump to on
1611  // failure.
1612  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1613  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1614  if (PGO.haveRegionCounts()) {
1615  // Walk the SwitchCase list to find how many there are.
1616  uint64_t DefaultCount = 0;
1617  unsigned NumCases = 0;
1618  for (const SwitchCase *Case = S.getSwitchCaseList();
1619  Case;
1620  Case = Case->getNextSwitchCase()) {
1621  if (isa<DefaultStmt>(Case))
1622  DefaultCount = getProfileCount(Case);
1623  NumCases += 1;
1624  }
1625  SwitchWeights = new SmallVector<uint64_t, 16>();
1626  SwitchWeights->reserve(NumCases);
1627  // The default needs to be first. We store the edge count, so we already
1628  // know the right weight.
1629  SwitchWeights->push_back(DefaultCount);
1630  }
1631  CaseRangeBlock = DefaultBlock;
1632 
1633  // Clear the insertion point to indicate we are in unreachable code.
1634  Builder.ClearInsertionPoint();
1635 
1636  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1637  // then reuse last ContinueBlock.
1638  JumpDest OuterContinue;
1639  if (!BreakContinueStack.empty())
1640  OuterContinue = BreakContinueStack.back().ContinueBlock;
1641 
1642  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1643 
1644  // Emit switch body.
1645  EmitStmt(S.getBody());
1646 
1647  BreakContinueStack.pop_back();
1648 
1649  // Update the default block in case explicit case range tests have
1650  // been chained on top.
1651  SwitchInsn->setDefaultDest(CaseRangeBlock);
1652 
1653  // If a default was never emitted:
1654  if (!DefaultBlock->getParent()) {
1655  // If we have cleanups, emit the default block so that there's a
1656  // place to jump through the cleanups from.
1657  if (ConditionScope.requiresCleanups()) {
1658  EmitBlock(DefaultBlock);
1659 
1660  // Otherwise, just forward the default block to the switch end.
1661  } else {
1662  DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1663  delete DefaultBlock;
1664  }
1665  }
1666 
1667  ConditionScope.ForceCleanup();
1668 
1669  // Emit continuation.
1670  EmitBlock(SwitchExit.getBlock(), true);
1672 
1673  // If the switch has a condition wrapped by __builtin_unpredictable,
1674  // create metadata that specifies that the switch is unpredictable.
1675  // Don't bother if not optimizing because that metadata would not be used.
1676  auto *Call = dyn_cast<CallExpr>(S.getCond());
1677  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1678  auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1679  if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1680  llvm::MDBuilder MDHelper(getLLVMContext());
1681  SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1682  MDHelper.createUnpredictable());
1683  }
1684  }
1685 
1686  if (SwitchWeights) {
1687  assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1688  "switch weights do not match switch cases");
1689  // If there's only one jump destination there's no sense weighting it.
1690  if (SwitchWeights->size() > 1)
1691  SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1692  createProfileWeights(*SwitchWeights));
1693  delete SwitchWeights;
1694  }
1695  SwitchInsn = SavedSwitchInsn;
1696  SwitchWeights = SavedSwitchWeights;
1697  CaseRangeBlock = SavedCRBlock;
1698 }
1699 
1700 static std::string
1701 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1703  std::string Result;
1704 
1705  while (*Constraint) {
1706  switch (*Constraint) {
1707  default:
1708  Result += Target.convertConstraint(Constraint);
1709  break;
1710  // Ignore these
1711  case '*':
1712  case '?':
1713  case '!':
1714  case '=': // Will see this and the following in mult-alt constraints.
1715  case '+':
1716  break;
1717  case '#': // Ignore the rest of the constraint alternative.
1718  while (Constraint[1] && Constraint[1] != ',')
1719  Constraint++;
1720  break;
1721  case '&':
1722  case '%':
1723  Result += *Constraint;
1724  while (Constraint[1] && Constraint[1] == *Constraint)
1725  Constraint++;
1726  break;
1727  case ',':
1728  Result += "|";
1729  break;
1730  case 'g':
1731  Result += "imr";
1732  break;
1733  case '[': {
1734  assert(OutCons &&
1735  "Must pass output names to constraints with a symbolic name");
1736  unsigned Index;
1737  bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1738  assert(result && "Could not resolve symbolic name"); (void)result;
1739  Result += llvm::utostr(Index);
1740  break;
1741  }
1742  }
1743 
1744  Constraint++;
1745  }
1746 
1747  return Result;
1748 }
1749 
1750 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1751 /// as using a particular register add that as a constraint that will be used
1752 /// in this asm stmt.
1753 static std::string
1754 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1756  const AsmStmt &Stmt, const bool EarlyClobber) {
1757  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1758  if (!AsmDeclRef)
1759  return Constraint;
1760  const ValueDecl &Value = *AsmDeclRef->getDecl();
1761  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1762  if (!Variable)
1763  return Constraint;
1764  if (Variable->getStorageClass() != SC_Register)
1765  return Constraint;
1766  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1767  if (!Attr)
1768  return Constraint;
1769  StringRef Register = Attr->getLabel();
1770  assert(Target.isValidGCCRegisterName(Register));
1771  // We're using validateOutputConstraint here because we only care if
1772  // this is a register constraint.
1773  TargetInfo::ConstraintInfo Info(Constraint, "");
1774  if (Target.validateOutputConstraint(Info) &&
1775  !Info.allowsRegister()) {
1776  CGM.ErrorUnsupported(&Stmt, "__asm__");
1777  return Constraint;
1778  }
1779  // Canonicalize the register here before returning it.
1780  Register = Target.getNormalizedGCCRegisterName(Register);
1781  return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
1782 }
1783 
1784 llvm::Value*
1785 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1786  LValue InputValue, QualType InputType,
1787  std::string &ConstraintStr,
1788  SourceLocation Loc) {
1789  llvm::Value *Arg;
1790  if (Info.allowsRegister() || !Info.allowsMemory()) {
1792  Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1793  } else {
1794  llvm::Type *Ty = ConvertType(InputType);
1795  uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1796  if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1797  Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1798  Ty = llvm::PointerType::getUnqual(Ty);
1799 
1800  Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
1801  Ty));
1802  } else {
1803  Arg = InputValue.getPointer();
1804  ConstraintStr += '*';
1805  }
1806  }
1807  } else {
1808  Arg = InputValue.getPointer();
1809  ConstraintStr += '*';
1810  }
1811 
1812  return Arg;
1813 }
1814 
1815 llvm::Value* CodeGenFunction::EmitAsmInput(
1816  const TargetInfo::ConstraintInfo &Info,
1817  const Expr *InputExpr,
1818  std::string &ConstraintStr) {
1819  // If this can't be a register or memory, i.e., has to be a constant
1820  // (immediate or symbolic), try to emit it as such.
1821  if (!Info.allowsRegister() && !Info.allowsMemory()) {
1822  if (Info.requiresImmediateConstant()) {
1823  llvm::APSInt AsmConst = InputExpr->EvaluateKnownConstInt(getContext());
1824  return llvm::ConstantInt::get(getLLVMContext(), AsmConst);
1825  }
1826 
1827  Expr::EvalResult Result;
1828  if (InputExpr->EvaluateAsInt(Result, getContext()))
1829  return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
1830  }
1831 
1832  if (Info.allowsRegister() || !Info.allowsMemory())
1834  return EmitScalarExpr(InputExpr);
1835  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
1836  return EmitScalarExpr(InputExpr);
1837  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1838  LValue Dest = EmitLValue(InputExpr);
1839  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1840  InputExpr->getExprLoc());
1841 }
1842 
1843 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1844 /// asm call instruction. The !srcloc MDNode contains a list of constant
1845 /// integers which are the source locations of the start of each line in the
1846 /// asm.
1847 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1848  CodeGenFunction &CGF) {
1850  // Add the location of the first line to the MDNode.
1851  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1852  CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
1853  StringRef StrVal = Str->getString();
1854  if (!StrVal.empty()) {
1855  const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1856  const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1857  unsigned StartToken = 0;
1858  unsigned ByteOffset = 0;
1859 
1860  // Add the location of the start of each subsequent line of the asm to the
1861  // MDNode.
1862  for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
1863  if (StrVal[i] != '\n') continue;
1864  SourceLocation LineLoc = Str->getLocationOfByte(
1865  i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
1866  Locs.push_back(llvm::ConstantAsMetadata::get(
1867  llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1868  }
1869  }
1870 
1871  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1872 }
1873 
1875  // Assemble the final asm string.
1876  std::string AsmString = S.generateAsmString(getContext());
1877 
1878  // Get all the output and input constraints together.
1879  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
1880  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
1881 
1882  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1883  StringRef Name;
1884  if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1885  Name = GAS->getOutputName(i);
1887  bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
1888  assert(IsValid && "Failed to parse output constraint");
1889  OutputConstraintInfos.push_back(Info);
1890  }
1891 
1892  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1893  StringRef Name;
1894  if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1895  Name = GAS->getInputName(i);
1897  bool IsValid =
1898  getTarget().validateInputConstraint(OutputConstraintInfos, Info);
1899  assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
1900  InputConstraintInfos.push_back(Info);
1901  }
1902 
1903  std::string Constraints;
1904 
1905  std::vector<LValue> ResultRegDests;
1906  std::vector<QualType> ResultRegQualTys;
1907  std::vector<llvm::Type *> ResultRegTypes;
1908  std::vector<llvm::Type *> ResultTruncRegTypes;
1909  std::vector<llvm::Type *> ArgTypes;
1910  std::vector<llvm::Value*> Args;
1911 
1912  // Keep track of inout constraints.
1913  std::string InOutConstraints;
1914  std::vector<llvm::Value*> InOutArgs;
1915  std::vector<llvm::Type*> InOutArgTypes;
1916 
1917  // An inline asm can be marked readonly if it meets the following conditions:
1918  // - it doesn't have any sideeffects
1919  // - it doesn't clobber memory
1920  // - it doesn't return a value by-reference
1921  // It can be marked readnone if it doesn't have any input memory constraints
1922  // in addition to meeting the conditions listed above.
1923  bool ReadOnly = true, ReadNone = true;
1924 
1925  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1926  TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
1927 
1928  // Simplify the output constraint.
1929  std::string OutputConstraint(S.getOutputConstraint(i));
1930  OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
1931  getTarget(), &OutputConstraintInfos);
1932 
1933  const Expr *OutExpr = S.getOutputExpr(i);
1934  OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
1935 
1936  OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
1937  getTarget(), CGM, S,
1938  Info.earlyClobber());
1939 
1940  LValue Dest = EmitLValue(OutExpr);
1941  if (!Constraints.empty())
1942  Constraints += ',';
1943 
1944  // If this is a register output, then make the inline asm return it
1945  // by-value. If this is a memory result, return the value by-reference.
1946  if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) {
1947  Constraints += "=" + OutputConstraint;
1948  ResultRegQualTys.push_back(OutExpr->getType());
1949  ResultRegDests.push_back(Dest);
1950  ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
1951  ResultTruncRegTypes.push_back(ResultRegTypes.back());
1952 
1953  // If this output is tied to an input, and if the input is larger, then
1954  // we need to set the actual result type of the inline asm node to be the
1955  // same as the input type.
1956  if (Info.hasMatchingInput()) {
1957  unsigned InputNo;
1958  for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
1959  TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
1960  if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
1961  break;
1962  }
1963  assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
1964 
1965  QualType InputTy = S.getInputExpr(InputNo)->getType();
1966  QualType OutputType = OutExpr->getType();
1967 
1968  uint64_t InputSize = getContext().getTypeSize(InputTy);
1969  if (getContext().getTypeSize(OutputType) < InputSize) {
1970  // Form the asm to return the value as a larger integer or fp type.
1971  ResultRegTypes.back() = ConvertType(InputTy);
1972  }
1973  }
1974  if (llvm::Type* AdjTy =
1975  getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1976  ResultRegTypes.back()))
1977  ResultRegTypes.back() = AdjTy;
1978  else {
1979  CGM.getDiags().Report(S.getAsmLoc(),
1980  diag::err_asm_invalid_type_in_input)
1981  << OutExpr->getType() << OutputConstraint;
1982  }
1983 
1984  // Update largest vector width for any vector types.
1985  if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
1986  LargestVectorWidth = std::max(LargestVectorWidth,
1987  VT->getPrimitiveSizeInBits());
1988  } else {
1989  ArgTypes.push_back(Dest.getAddress().getType());
1990  Args.push_back(Dest.getPointer());
1991  Constraints += "=*";
1992  Constraints += OutputConstraint;
1993  ReadOnly = ReadNone = false;
1994  }
1995 
1996  if (Info.isReadWrite()) {
1997  InOutConstraints += ',';
1998 
1999  const Expr *InputExpr = S.getOutputExpr(i);
2000  llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2001  InOutConstraints,
2002  InputExpr->getExprLoc());
2003 
2004  if (llvm::Type* AdjTy =
2005  getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2006  Arg->getType()))
2007  Arg = Builder.CreateBitCast(Arg, AdjTy);
2008 
2009  // Update largest vector width for any vector types.
2010  if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2011  LargestVectorWidth = std::max(LargestVectorWidth,
2012  VT->getPrimitiveSizeInBits());
2013  if (Info.allowsRegister())
2014  InOutConstraints += llvm::utostr(i);
2015  else
2016  InOutConstraints += OutputConstraint;
2017 
2018  InOutArgTypes.push_back(Arg->getType());
2019  InOutArgs.push_back(Arg);
2020  }
2021  }
2022 
2023  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2024  // to the return value slot. Only do this when returning in registers.
2025  if (isa<MSAsmStmt>(&S)) {
2026  const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2027  if (RetAI.isDirect() || RetAI.isExtend()) {
2028  // Make a fake lvalue for the return value slot.
2029  LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2031  *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2032  ResultRegDests, AsmString, S.getNumOutputs());
2033  SawAsmBlock = true;
2034  }
2035  }
2036 
2037  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2038  const Expr *InputExpr = S.getInputExpr(i);
2039 
2040  TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2041 
2042  if (Info.allowsMemory())
2043  ReadNone = false;
2044 
2045  if (!Constraints.empty())
2046  Constraints += ',';
2047 
2048  // Simplify the input constraint.
2049  std::string InputConstraint(S.getInputConstraint(i));
2050  InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2051  &OutputConstraintInfos);
2052 
2053  InputConstraint = AddVariableConstraints(
2054  InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2055  getTarget(), CGM, S, false /* No EarlyClobber */);
2056 
2057  llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2058 
2059  // If this input argument is tied to a larger output result, extend the
2060  // input to be the same size as the output. The LLVM backend wants to see
2061  // the input and output of a matching constraint be the same size. Note
2062  // that GCC does not define what the top bits are here. We use zext because
2063  // that is usually cheaper, but LLVM IR should really get an anyext someday.
2064  if (Info.hasTiedOperand()) {
2065  unsigned Output = Info.getTiedOperand();
2066  QualType OutputType = S.getOutputExpr(Output)->getType();
2067  QualType InputTy = InputExpr->getType();
2068 
2069  if (getContext().getTypeSize(OutputType) >
2070  getContext().getTypeSize(InputTy)) {
2071  // Use ptrtoint as appropriate so that we can do our extension.
2072  if (isa<llvm::PointerType>(Arg->getType()))
2073  Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2074  llvm::Type *OutputTy = ConvertType(OutputType);
2075  if (isa<llvm::IntegerType>(OutputTy))
2076  Arg = Builder.CreateZExt(Arg, OutputTy);
2077  else if (isa<llvm::PointerType>(OutputTy))
2078  Arg = Builder.CreateZExt(Arg, IntPtrTy);
2079  else {
2080  assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2081  Arg = Builder.CreateFPExt(Arg, OutputTy);
2082  }
2083  }
2084  }
2085  if (llvm::Type* AdjTy =
2086  getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
2087  Arg->getType()))
2088  Arg = Builder.CreateBitCast(Arg, AdjTy);
2089  else
2090  CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2091  << InputExpr->getType() << InputConstraint;
2092 
2093  // Update largest vector width for any vector types.
2094  if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2095  LargestVectorWidth = std::max(LargestVectorWidth,
2096  VT->getPrimitiveSizeInBits());
2097 
2098  ArgTypes.push_back(Arg->getType());
2099  Args.push_back(Arg);
2100  Constraints += InputConstraint;
2101  }
2102 
2103  // Append the "input" part of inout constraints last.
2104  for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2105  ArgTypes.push_back(InOutArgTypes[i]);
2106  Args.push_back(InOutArgs[i]);
2107  }
2108  Constraints += InOutConstraints;
2109 
2110  // Clobbers
2111  for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2112  StringRef Clobber = S.getClobber(i);
2113 
2114  if (Clobber == "memory")
2115  ReadOnly = ReadNone = false;
2116  else if (Clobber != "cc")
2117  Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2118 
2119  if (!Constraints.empty())
2120  Constraints += ',';
2121 
2122  Constraints += "~{";
2123  Constraints += Clobber;
2124  Constraints += '}';
2125  }
2126 
2127  // Add machine specific clobbers
2128  std::string MachineClobbers = getTarget().getClobbers();
2129  if (!MachineClobbers.empty()) {
2130  if (!Constraints.empty())
2131  Constraints += ',';
2132  Constraints += MachineClobbers;
2133  }
2134 
2135  llvm::Type *ResultType;
2136  if (ResultRegTypes.empty())
2137  ResultType = VoidTy;
2138  else if (ResultRegTypes.size() == 1)
2139  ResultType = ResultRegTypes[0];
2140  else
2141  ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2142 
2143  llvm::FunctionType *FTy =
2144  llvm::FunctionType::get(ResultType, ArgTypes, false);
2145 
2146  bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2147  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2148  llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2149  llvm::InlineAsm *IA =
2150  llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2151  /* IsAlignStack */ false, AsmDialect);
2152  llvm::CallInst *Result =
2153  Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2154  Result->addAttribute(llvm::AttributeList::FunctionIndex,
2155  llvm::Attribute::NoUnwind);
2156 
2157  // Attach readnone and readonly attributes.
2158  if (!HasSideEffect) {
2159  if (ReadNone)
2160  Result->addAttribute(llvm::AttributeList::FunctionIndex,
2161  llvm::Attribute::ReadNone);
2162  else if (ReadOnly)
2163  Result->addAttribute(llvm::AttributeList::FunctionIndex,
2164  llvm::Attribute::ReadOnly);
2165  }
2166 
2167  // Slap the source location of the inline asm into a !srcloc metadata on the
2168  // call.
2169  if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) {
2170  Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
2171  *this));
2172  } else {
2173  // At least put the line number on MS inline asm blobs.
2174  auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding());
2175  Result->setMetadata("srcloc",
2176  llvm::MDNode::get(getLLVMContext(),
2177  llvm::ConstantAsMetadata::get(Loc)));
2178  }
2179 
2180  if (getLangOpts().assumeFunctionsAreConvergent()) {
2181  // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2182  // convergent (meaning, they may call an intrinsically convergent op, such
2183  // as bar.sync, and so can't have certain optimizations applied around
2184  // them).
2185  Result->addAttribute(llvm::AttributeList::FunctionIndex,
2186  llvm::Attribute::Convergent);
2187  }
2188 
2189  // Extract all of the register value results from the asm.
2190  std::vector<llvm::Value*> RegResults;
2191  if (ResultRegTypes.size() == 1) {
2192  RegResults.push_back(Result);
2193  } else {
2194  for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2195  llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
2196  RegResults.push_back(Tmp);
2197  }
2198  }
2199 
2200  assert(RegResults.size() == ResultRegTypes.size());
2201  assert(RegResults.size() == ResultTruncRegTypes.size());
2202  assert(RegResults.size() == ResultRegDests.size());
2203  for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2204  llvm::Value *Tmp = RegResults[i];
2205 
2206  // If the result type of the LLVM IR asm doesn't match the result type of
2207  // the expression, do the conversion.
2208  if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2209  llvm::Type *TruncTy = ResultTruncRegTypes[i];
2210 
2211  // Truncate the integer result to the right size, note that TruncTy can be
2212  // a pointer.
2213  if (TruncTy->isFloatingPointTy())
2214  Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2215  else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2216  uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2217  Tmp = Builder.CreateTrunc(Tmp,
2218  llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2219  Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2220  } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2221  uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2222  Tmp = Builder.CreatePtrToInt(Tmp,
2223  llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2224  Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2225  } else if (TruncTy->isIntegerTy()) {
2226  Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2227  } else if (TruncTy->isVectorTy()) {
2228  Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2229  }
2230  }
2231 
2232  EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
2233  }
2234 }
2235 
2237  const RecordDecl *RD = S.getCapturedRecordDecl();
2238  QualType RecordTy = getContext().getRecordType(RD);
2239 
2240  // Initialize the captured struct.
2241  LValue SlotLV =
2242  MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2243 
2244  RecordDecl::field_iterator CurField = RD->field_begin();
2246  E = S.capture_init_end();
2247  I != E; ++I, ++CurField) {
2248  LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2249  if (CurField->hasCapturedVLAType()) {
2250  auto VAT = CurField->getCapturedVLAType();
2251  EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2252  } else {
2253  EmitInitializerForField(*CurField, LV, *I);
2254  }
2255  }
2256 
2257  return SlotLV;
2258 }
2259 
2260 /// Generate an outlined function for the body of a CapturedStmt, store any
2261 /// captured variables into the captured struct, and call the outlined function.
2262 llvm::Function *
2264  LValue CapStruct = InitCapturedStruct(S);
2265 
2266  // Emit the CapturedDecl
2267  CodeGenFunction CGF(CGM, true);
2268  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2269  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2270  delete CGF.CapturedStmtInfo;
2271 
2272  // Emit call to the helper function.
2273  EmitCallOrInvoke(F, CapStruct.getPointer());
2274 
2275  return F;
2276 }
2277 
2279  LValue CapStruct = InitCapturedStruct(S);
2280  return CapStruct.getAddress();
2281 }
2282 
2283 /// Creates the outlined function for a CapturedStmt.
2284 llvm::Function *
2286  assert(CapturedStmtInfo &&
2287  "CapturedStmtInfo should be set when generating the captured function");
2288  const CapturedDecl *CD = S.getCapturedDecl();
2289  const RecordDecl *RD = S.getCapturedRecordDecl();
2290  SourceLocation Loc = S.getBeginLoc();
2291  assert(CD->hasBody() && "missing CapturedDecl body");
2292 
2293  // Build the argument list.
2294  ASTContext &Ctx = CGM.getContext();
2295  FunctionArgList Args;
2296  Args.append(CD->param_begin(), CD->param_end());
2297 
2298  // Create the function declaration.
2299  const CGFunctionInfo &FuncInfo =
2300  CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2301  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2302 
2303  llvm::Function *F =
2306  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2307  if (CD->isNothrow())
2308  F->addFnAttr(llvm::Attribute::NoUnwind);
2309 
2310  // Generate the function.
2311  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2312  CD->getBody()->getBeginLoc());
2313  // Set the context parameter in CapturedStmtInfo.
2314  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2316 
2317  // Initialize variable-length arrays.
2319  Ctx.getTagDeclType(RD));
2320  for (auto *FD : RD->fields()) {
2321  if (FD->hasCapturedVLAType()) {
2322  auto *ExprArg =
2324  .getScalarVal();
2325  auto VAT = FD->getCapturedVLAType();
2326  VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2327  }
2328  }
2329 
2330  // If 'this' is captured, load it into CXXThisValue.
2333  LValue ThisLValue = EmitLValueForField(Base, FD);
2334  CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2335  }
2336 
2337  PGO.assignRegionCounters(GlobalDecl(CD), F);
2338  CapturedStmtInfo->EmitBody(*this, CD->getBody());
2340 
2341  return F;
2342 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:658
bool isAggregate() const
Definition: CGValue.h:53
const llvm::DataLayout & getDataLayout() const
Expr * getInc()
Definition: Stmt.h:2269
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition: CGStmt.cpp:578
void EmitCoroutineBody(const CoroutineBodyStmt &S)
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:2674
Stmt * body_back()
Definition: Stmt.h:1278
body_iterator body_end()
Definition: Stmt.h:1275
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
unsigned getNumInputs() const
Definition: Stmt.h:2590
SourceLocation getBeginLoc() const
Definition: Stmt.h:2501
unsigned getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it...
A (possibly-)qualified type.
Definition: Type.h:637
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition: Stmt.h:3278
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
const CodeGenOptions & getCodeGenOpts() const
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt *> &ResultStmts)
Definition: CGStmt.cpp:1344
void EmitGotoStmt(const GotoStmt &S)
Definition: CGStmt.cpp:567
void EmitAttributedStmt(const AttributedStmt &S)
Definition: CGStmt.cpp:563
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4123
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:138
void enterFullExpression(const FullExpr *E)
Expr * getCond()
Definition: Stmt.h:2111
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer...
Stmt - This represents one statement.
Definition: Stmt.h:65
IfStmt - This represents an if/then/else.
Definition: Stmt.h:1686
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1117
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:383
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
StorageClass getStorageClass() const
Returns the storage class as written in the source.
Definition: Decl.h:1019
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
unsigned getNumOutputs() const
Definition: Stmt.h:2568
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
bool isNothrow() const
Definition: Decl.cpp:4468
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
Represents an attribute applied to a statement.
Definition: Stmt.h:1632
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1911
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition: CGStmt.cpp:2278
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:622
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1294
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition: Stmt.h:3222
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3782
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference...
Definition: CGExpr.cpp:3982
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant, or if it does but contains a label, return false.
Represents a point when we exit a loop.
Definition: ProgramPoint.h:714
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:344
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
Stmt * getSubStmt()
Definition: Stmt.h:1554
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
Represents a variable declaration or definition.
Definition: Decl.h:812
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization. ...
Definition: Stmt.h:2484
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:53
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope, by being a (possibly-labelled) DeclStmt.
DiagnosticsEngine & getDiags() const
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition: CGStmt.cpp:508
Stmt * getThen()
Definition: Stmt.h:1773
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block...
Definition: CGStmt.cpp:420
The collection of all-type qualifiers we support.
Definition: Type.h:140
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Definition: CGCleanup.cpp:1024
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:1592
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition: CGStmt.cpp:497
Represents a struct/union/class.
Definition: Decl.h:3592
const TargetInfo & getTarget() const
void EmitOMPSimdDirective(const OMPSimdDirective &S)
Stmt * getBody()
Definition: Stmt.h:2209
void setScopeDepth(EHScopeStack::stable_iterator depth)
Address getAddress() const
Definition: CGValue.h:326
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:484
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition: CGExpr.cpp:592
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:883
field_range fields() const
Definition: Decl.h:3783
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:287
Represents a member of a struct/union/class.
Definition: Decl.h:2578
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4108
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:536
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt *> &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:1499
bool isReferenceType() const
Definition: Type.h:6310
Stmt *const * const_body_iterator
Definition: Stmt.h:1287
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:513
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr *> Attrs=None)
Definition: CGStmt.cpp:826
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:49
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition: CGClass.cpp:666
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:193
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2338
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:4465
static bool hasScalarEvaluationKind(QualType T)
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:826
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3265
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
ForStmt - This represents a &#39;for (init;cond;inc)&#39; stmt.
Definition: Stmt.h:2236
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:573
void pop()
End the current loop.
Definition: CGLoopInfo.cpp:365
LabelDecl * getDecl() const
Definition: Stmt.h:1609
SourceLocation getLBracLoc() const
Definition: Stmt.h:1332
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
RAII for correct setting/restoring of CapturedStmtInfo.
const Expr * getOutputExpr(unsigned i) const
Definition: Stmt.cpp:384
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4125
Stmt * getBody()
Definition: Stmt.h:2270
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitContinueStmt(const ContinueStmt &S)
Definition: CGStmt.cpp:1126
void EmitOMPTargetDirective(const OMPTargetDirective &S)
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:349
Stmt * getInit()
Definition: Stmt.h:2249
CXXForRangeStmt - This represents C++0x [stmt.ranged]&#39;s ranged for statement, represented as &#39;for (ra...
Definition: StmtCXX.h:126
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition: CGStmt.cpp:1552
If a crash happens while one of these objects are live, the message is printed out along with the spe...
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:40
field_iterator field_begin() const
Definition: Decl.cpp:4144
CaseStmt - Represent a case statement.
Definition: Stmt.h:1393
Expr * getCond()
Definition: Stmt.h:2268
void EmitOMPParallelDirective(const OMPParallelDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:181
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:759
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition: CGStmt.cpp:2263
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
void EmitDefaultStmt(const DefaultStmt &S)
Definition: CGStmt.cpp:1302
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCaseStmtRange(const CaseStmt &S)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition: CGStmt.cpp:1141
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4043
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition: Stmt.cpp:901
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:70
Stmt * getBody()
Definition: Stmt.h:1957
virtual bool isValidGCCRegisterName(StringRef Name) const
Returns whether the passed in string is a valid register name according to GCC.
Definition: TargetInfo.cpp:439
Stmt * getInit()
Definition: Stmt.h:1829
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
Definition: CGExpr.cpp:3854
bool isValid() const
Definition: Address.h:35
StringRef getString() const
Definition: Expr.h:1655
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1240
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
const TargetCodeGenInfo & getTargetCodeGenInfo()
Expr * IgnoreParenNoopCasts(ASTContext &Ctx) LLVM_READONLY
IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the value (including ptr->int ...
Definition: Expr.cpp:2725
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition: CGExpr.cpp:222
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
bool isConstexpr() const
Definition: Stmt.h:1859
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition: Stmt.cpp:368
Exposes information about the current target.
Definition: TargetInfo.h:53
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
EHScopeStack::stable_iterator getScopeDepth() const
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:636
This represents one expression.
Definition: Expr.h:106
DeclStmt * getEndStmt()
Definition: StmtCXX.h:157
static Address invalid()
Definition: Address.h:34
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
void EmitCaseStmt(const CaseStmt &S)
Definition: CGStmt.cpp:1219
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:65
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition: CGStmt.cpp:369
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition: Stmt.cpp:1042
Stmt * getBody()
Definition: Stmt.h:2123
void EmitSEHTryStmt(const SEHTryStmt &S)
Expr * getRHS()
Definition: Stmt.h:1494
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction...
Definition: CGStmt.cpp:1847
llvm::LLVMContext & getLLVMContext()
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:3712
llvm::BasicBlock * GetIndirectGotoBlock()
QualType getType() const
Definition: Expr.h:128
void EmitOMPMasterDirective(const OMPMasterDirective &S)
LabelDecl * getLabel() const
Definition: Stmt.h:2316
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition: CGStmt.cpp:2285
ReturnStmt - This represents a return, optionally of an expression: return; return 4;...
Definition: Stmt.h:2442
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
QualType getRecordType(const RecordDecl *Decl) const
SwitchCase * getSwitchCaseList()
Definition: Stmt.h:2014
void ResolveBranchFixups(llvm::BasicBlock *Target)
Definition: CGCleanup.cpp:384
SourceLocation getEnd() const
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
StringRef getClobber(unsigned i) const
Definition: Stmt.cpp:408
Expr * getCond()
Definition: Stmt.h:1761
ValueDecl * getDecl()
Definition: Expr.h:1120
const LangOptions & getLangOpts() const
ASTContext & getContext() const
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:135
const SourceManager & SM
Definition: Format.cpp:1489
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:34
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
DoStmt - This represents a &#39;do/while&#39; stmt.
Definition: Stmt.h:2184
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:2519
void EmitDeclStmt(const DeclStmt &S)
Definition: CGStmt.cpp:1104
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:984
The l-value was considered opaque, so the alignment was determined from a type.
void EmitOMPFlushDirective(const OMPFlushDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
StringRef getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition: Stmt.cpp:392
This captures a statement into a function.
Definition: Stmt.h:3104
StringRef getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition: Stmt.cpp:376
Encodes a location in the source.
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr *> Attrs=None)
Definition: CGStmt.cpp:764
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
Expr * getRetValue()
Definition: Stmt.h:2475
void EmitOMPForDirective(const OMPForDirective &S)
A saved depth on the scope stack.
Definition: EHScopeStack.h:106
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:163
AggValueSlot::Overlap_t overlapForReturnValue()
Determine whether a return value slot may overlap some other object.
Expr * getLHS()
Definition: Stmt.h:1482
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition: CGObjC.cpp:1561
Stmt * getElse()
Definition: Stmt.h:1782
DeclStmt - Adaptor class for mixing declarations with statements and expressions. ...
Definition: Stmt.h:1142
Represents the declaration of a label.
Definition: Decl.h:468
An aggregate value slot.
Definition: CGValue.h:436
Expr * getCond()
Definition: Stmt.h:1945
void EmitStmt(const Stmt *S, ArrayRef< const Attr *> Attrs=None)
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:45
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:525
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1747
void EmitOMPSingleDirective(const OMPSingleDirective &S)
An aligned address.
Definition: Address.h:24
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition: Stmt.cpp:964
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
const CGFunctionInfo * CurFnInfo
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
Definition: CGDecl.cpp:41
const TargetCodeGenInfo & getTargetHooks() const
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:214
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr *> Attrs=None)
Definition: CGStmt.cpp:676
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:355
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
LabelStmt * getStmt() const
Definition: Decl.h:492
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition: CGObjC.cpp:1858
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn&#39;t support the specified stmt yet.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
Dataflow Directional Tag Classes.
bool isVolatile() const
Definition: Stmt.h:2555
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition: CGStmt.cpp:2236
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition: Stmt.cpp:1020
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:571
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
ArrayRef< const Attr * > getAttrs() const
Definition: Stmt.h:1668
CSFC_Result
CollectStatementsForCase - Given the body of a &#39;switch&#39; statement and a constant value that is being ...
Definition: CGStmt.cpp:1343
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
StmtClass getStmtClass() const
Definition: Stmt.h:1028
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitOMPCancelDirective(const OMPCancelDirective &S)
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand...
Definition: TargetInfo.h:833
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:107
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.h:3296
llvm::Module & getModule() const
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition: CGObjC.cpp:1854
body_iterator body_begin()
Definition: Stmt.h:1274
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
Definition: DeclBase.h:2016
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type. ...
Definition: CGExprAgg.cpp:1821
void EmitCoreturnStmt(const CoreturnStmt &S)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
virtual StringRef getHelperName() const
Get the name of the capture helper.
static bool hasAggregateEvaluationKind(QualType T)
SwitchStmt - This represents a &#39;switch&#39; stmt.
Definition: Stmt.h:1885
API for captured statement code generation.
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:1701
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
bool resolveSymbolicName(const char *&Name, ArrayRef< ConstraintInfo > OutputConstraints, unsigned &Index) const
Definition: TargetInfo.cpp:599
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
T * getAttr() const
Definition: DeclBase.h:526
void EmitAsmStmt(const AsmStmt &S)
Definition: CGStmt.cpp:1874
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:1754
Stmt * getInit()
Definition: Stmt.h:1966
decl_range decls()
Definition: Stmt.h:1185
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
const Expr * getInputExpr(unsigned i) const
Definition: Stmt.cpp:400
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
Definition: Linkage.h:31
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:442
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
unsigned getNumClobbers() const
Definition: Stmt.h:2600
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2072
SourceManager & getSourceManager()
Definition: ASTContext.h:661
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
DeclStmt * getRangeStmt()
Definition: StmtCXX.h:153
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition: CGStmt.cpp:35
SourceLocation getAsmLoc() const
Definition: Stmt.h:2549
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2303
Expr * getTarget()
Definition: Stmt.h:2358
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1368
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition: Stmt.cpp:1264
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count...
Definition: CodeGenPGO.h:73
Expr * getCond()
Definition: Stmt.h:2202
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition: Stmt.cpp:863
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition: CGStmt.cpp:599
ContinueStmt - This represents a continue.
Definition: Stmt.h:2383
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
Definition: CGStmt.cpp:462
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO)...
Definition: Decl.h:1324
bool isVoidType() const
Definition: Type.h:6546
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition: CGStmt.cpp:1020
llvm::Type * ConvertType(QualType T)
virtual llvm::Type * adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type *Ty) const
Corrects the low-level LLVM type for a given constraint and "usual" type.
Definition: TargetInfo.h:127
WhileStmt - This represents a &#39;while&#39; stmt.
Definition: Stmt.h:2062
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1235
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
void EmitLabelStmt(const LabelStmt &S)
Definition: CGStmt.cpp:558
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:275
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1757
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1572
Defines the clang::TargetInfo interface.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2402
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:275
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type *> &ResultRegTypes, std::vector< llvm::Type *> &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:134
CGCapturedStmtInfo * CapturedStmtInfo
__DEVICE__ int max(int __a, int __b)
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:1867
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1047
static RValue get(llvm::Value *V)
Definition: CGValue.h:85
bool EmitSimpleStmt(const Stmt *S)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition: CGStmt.cpp:346
BreakStmt - This represents a break.
Definition: Stmt.h:2409
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
Definition: CGCleanup.cpp:1049
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:731
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:2788
Stmt * getSubStmt()
Definition: Stmt.h:1613
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr *> Attrs=None)
Definition: CGStmt.cpp:925
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition: CGStmt.cpp:381
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
DeclStmt * getLoopVarStmt()
Definition: StmtCXX.h:160
A trivial tuple used to represent a source range.
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition: CGObjC.cpp:1850
LValue - This represents an lvalue references.
Definition: CGValue.h:166
SanitizerMetadata * getSanitizerMetadata()
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition: CGStmt.cpp:479
APSInt & getInt()
Definition: APValue.h:257
const LangOptions & getLangOpts() const
DeclStmt * getBeginStmt()
Definition: StmtCXX.h:154
void EmitBreakStmt(const BreakStmt &S)
Definition: CGStmt.cpp:1114
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition: CGObjC.cpp:3354
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
SourceLocation getBegin() const
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition: Stmt.h:3288
llvm::Value * getPointer() const
Definition: CGValue.h:322
This class handles loading and caching of source files into memory.
Stmt * getSubStmt()
Definition: Stmt.h:1672
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:50
Defines enum values for all the target-independent builtin functions.
void EmitOMPTaskDirective(const OMPTaskDirective &S)
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:905
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1279
bool isScalar() const
Definition: CGValue.h:51
Attr - This represents one attribute.
Definition: Attr.h:43
SourceLocation getLocation() const
Definition: DeclBase.h:417
virtual std::string convertConstraint(const char *&Constraint) const
Definition: TargetInfo.h:931
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
virtual const char * getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Stmt * getSubStmt()
Definition: Stmt.h:1512
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1549