clang  10.0.0svn
CodeGenFunction.cpp
Go to the documentation of this file.
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCleanup.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/StmtCXX.h"
28 #include "clang/AST/StmtObjC.h"
29 #include "clang/Basic/Builtins.h"
31 #include "clang/Basic/TargetInfo.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
44 /// markers.
45 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
46  const LangOptions &LangOpts) {
47  if (CGOpts.DisableLifetimeMarkers)
48  return false;
49 
50  // Disable lifetime markers in msan builds.
51  // FIXME: Remove this when msan works with lifetime markers.
52  if (LangOpts.Sanitize.has(SanitizerKind::Memory))
53  return false;
54 
55  // Asan uses markers for use-after-scope checks.
56  if (CGOpts.SanitizeAddressUseAfterScope)
57  return true;
58 
59  // For now, only in optimized builds.
60  return CGOpts.OptimizationLevel != 0;
61 }
62 
63 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
64  : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
65  Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
66  CGBuilderInserterTy(this)),
67  SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
68  PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
69  CGM.getCodeGenOpts(), CGM.getLangOpts())) {
70  if (!suppressNewContext)
72 
73  llvm::FastMathFlags FMF;
74  if (CGM.getLangOpts().FastMath)
75  FMF.setFast();
76  if (CGM.getLangOpts().FiniteMathOnly) {
77  FMF.setNoNaNs();
78  FMF.setNoInfs();
79  }
80  if (CGM.getCodeGenOpts().NoNaNsFPMath) {
81  FMF.setNoNaNs();
82  }
83  if (CGM.getCodeGenOpts().NoSignedZeros) {
84  FMF.setNoSignedZeros();
85  }
86  if (CGM.getCodeGenOpts().ReciprocalMath) {
87  FMF.setAllowReciprocal();
88  }
89  if (CGM.getCodeGenOpts().Reassociate) {
90  FMF.setAllowReassoc();
91  }
92  Builder.setFastMathFlags(FMF);
93 }
94 
96  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
97 
98  // If there are any unclaimed block infos, go ahead and destroy them
99  // now. This can happen if IR-gen gets clever and skips evaluating
100  // something.
101  if (FirstBlockInfo)
103 
104  if (getLangOpts().OpenMP && CurFn)
105  CGM.getOpenMPRuntime().functionFinished(*this);
106 }
107 
109  LValueBaseInfo *BaseInfo,
110  TBAAAccessInfo *TBAAInfo) {
111  return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
112  /* forPointeeType= */ true);
113 }
114 
116  LValueBaseInfo *BaseInfo,
117  TBAAAccessInfo *TBAAInfo,
118  bool forPointeeType) {
119  if (TBAAInfo)
120  *TBAAInfo = CGM.getTBAAAccessInfo(T);
121 
122  // Honor alignment typedef attributes even on incomplete types.
123  // We also honor them straight for C++ class types, even as pointees;
124  // there's an expressivity gap here.
125  if (auto TT = T->getAs<TypedefType>()) {
126  if (auto Align = TT->getDecl()->getMaxAlignment()) {
127  if (BaseInfo)
129  return getContext().toCharUnitsFromBits(Align);
130  }
131  }
132 
133  if (BaseInfo)
135 
136  CharUnits Alignment;
137  if (T->isIncompleteType()) {
138  Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
139  } else {
140  // For C++ class pointees, we don't know whether we're pointing at a
141  // base or a complete object, so we generally need to use the
142  // non-virtual alignment.
143  const CXXRecordDecl *RD;
144  if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
145  Alignment = CGM.getClassPointerAlignment(RD);
146  } else {
147  Alignment = getContext().getTypeAlignInChars(T);
148  if (T.getQualifiers().hasUnaligned())
149  Alignment = CharUnits::One();
150  }
151 
152  // Cap to the global maximum type alignment unless the alignment
153  // was somehow explicit on the type.
154  if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
155  if (Alignment.getQuantity() > MaxAlign &&
157  Alignment = CharUnits::fromQuantity(MaxAlign);
158  }
159  }
160  return Alignment;
161 }
162 
164  LValueBaseInfo BaseInfo;
165  TBAAAccessInfo TBAAInfo;
166  CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
167  return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
168  TBAAInfo);
169 }
170 
171 /// Given a value of type T* that may not be to a complete object,
172 /// construct an l-value with the natural pointee alignment of T.
173 LValue
175  LValueBaseInfo BaseInfo;
176  TBAAAccessInfo TBAAInfo;
177  CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
178  /* forPointeeType= */ true);
179  return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
180 }
181 
182 
184  return CGM.getTypes().ConvertTypeForMem(T);
185 }
186 
188  return CGM.getTypes().ConvertType(T);
189 }
190 
192  type = type.getCanonicalType();
193  while (true) {
194  switch (type->getTypeClass()) {
195 #define TYPE(name, parent)
196 #define ABSTRACT_TYPE(name, parent)
197 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
198 #define DEPENDENT_TYPE(name, parent) case Type::name:
199 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
200 #include "clang/AST/TypeNodes.def"
201  llvm_unreachable("non-canonical or dependent type in IR-generation");
202 
203  case Type::Auto:
204  case Type::DeducedTemplateSpecialization:
205  llvm_unreachable("undeduced type in IR-generation");
206 
207  // Various scalar types.
208  case Type::Builtin:
209  case Type::Pointer:
210  case Type::BlockPointer:
211  case Type::LValueReference:
212  case Type::RValueReference:
213  case Type::MemberPointer:
214  case Type::Vector:
215  case Type::ExtVector:
216  case Type::FunctionProto:
217  case Type::FunctionNoProto:
218  case Type::Enum:
219  case Type::ObjCObjectPointer:
220  case Type::Pipe:
221  return TEK_Scalar;
222 
223  // Complexes.
224  case Type::Complex:
225  return TEK_Complex;
226 
227  // Arrays, records, and Objective-C objects.
228  case Type::ConstantArray:
229  case Type::IncompleteArray:
230  case Type::VariableArray:
231  case Type::Record:
232  case Type::ObjCObject:
233  case Type::ObjCInterface:
234  return TEK_Aggregate;
235 
236  // We operate on atomic values according to their underlying type.
237  case Type::Atomic:
238  type = cast<AtomicType>(type)->getValueType();
239  continue;
240  }
241  llvm_unreachable("unknown type kind!");
242  }
243 }
244 
246  // For cleanliness, we try to avoid emitting the return block for
247  // simple cases.
248  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
249 
250  if (CurBB) {
251  assert(!CurBB->getTerminator() && "Unexpected terminated block.");
252 
253  // We have a valid insert point, reuse it if it is empty or there are no
254  // explicit jumps to the return block.
255  if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
256  ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
257  delete ReturnBlock.getBlock();
258  ReturnBlock = JumpDest();
259  } else
261  return llvm::DebugLoc();
262  }
263 
264  // Otherwise, if the return block is the target of a single direct
265  // branch then we can just put the code in that block instead. This
266  // cleans up functions which started with a unified return block.
267  if (ReturnBlock.getBlock()->hasOneUse()) {
268  llvm::BranchInst *BI =
269  dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
270  if (BI && BI->isUnconditional() &&
271  BI->getSuccessor(0) == ReturnBlock.getBlock()) {
272  // Record/return the DebugLoc of the simple 'return' expression to be used
273  // later by the actual 'ret' instruction.
274  llvm::DebugLoc Loc = BI->getDebugLoc();
275  Builder.SetInsertPoint(BI->getParent());
276  BI->eraseFromParent();
277  delete ReturnBlock.getBlock();
278  ReturnBlock = JumpDest();
279  return Loc;
280  }
281  }
282 
283  // FIXME: We are at an unreachable point, there is no reason to emit the block
284  // unless it has uses. However, we still need a place to put the debug
285  // region.end for now.
286 
288  return llvm::DebugLoc();
289 }
290 
291 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
292  if (!BB) return;
293  if (!BB->use_empty())
294  return CGF.CurFn->getBasicBlockList().push_back(BB);
295  delete BB;
296 }
297 
299  assert(BreakContinueStack.empty() &&
300  "mismatched push/pop in break/continue stack!");
301 
302  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
303  && NumSimpleReturnExprs == NumReturnExprs
304  && ReturnBlock.getBlock()->use_empty();
305  // Usually the return expression is evaluated before the cleanup
306  // code. If the function contains only a simple return statement,
307  // such as a constant, the location before the cleanup code becomes
308  // the last useful breakpoint in the function, because the simple
309  // return expression will be evaluated after the cleanup code. To be
310  // safe, set the debug location for cleanup code to the location of
311  // the return statement. Otherwise the cleanup code should be at the
312  // end of the function's lexical scope.
313  //
314  // If there are multiple branches to the return block, the branch
315  // instructions will get the location of the return statements and
316  // all will be fine.
317  if (CGDebugInfo *DI = getDebugInfo()) {
318  if (OnlySimpleReturnStmts)
319  DI->EmitLocation(Builder, LastStopPoint);
320  else
321  DI->EmitLocation(Builder, EndLoc);
322  }
323 
324  // Pop any cleanups that might have been associated with the
325  // parameters. Do this in whatever block we're currently in; it's
326  // important to do this before we enter the return block or return
327  // edges will be *really* confused.
328  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
329  bool HasOnlyLifetimeMarkers =
331  bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
332  if (HasCleanups) {
333  // Make sure the line table doesn't jump back into the body for
334  // the ret after it's been at EndLoc.
335  if (CGDebugInfo *DI = getDebugInfo())
336  if (OnlySimpleReturnStmts)
337  DI->EmitLocation(Builder, EndLoc);
338 
340  }
341 
342  // Emit function epilog (to return).
343  llvm::DebugLoc Loc = EmitReturnBlock();
344 
345  if (ShouldInstrumentFunction()) {
346  if (CGM.getCodeGenOpts().InstrumentFunctions)
347  CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
348  if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
349  CurFn->addFnAttr("instrument-function-exit-inlined",
350  "__cyg_profile_func_exit");
351  }
352 
353  // Emit debug descriptor for function end.
354  if (CGDebugInfo *DI = getDebugInfo())
355  DI->EmitFunctionEnd(Builder, CurFn);
356 
357  // Reset the debug location to that of the simple 'return' expression, if any
358  // rather than that of the end of the function's scope '}'.
359  ApplyDebugLocation AL(*this, Loc);
360  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
362 
363  assert(EHStack.empty() &&
364  "did not remove all scopes from cleanup stack!");
365 
366  // If someone did an indirect goto, emit the indirect goto block at the end of
367  // the function.
368  if (IndirectBranch) {
369  EmitBlock(IndirectBranch->getParent());
370  Builder.ClearInsertionPoint();
371  }
372 
373  // If some of our locals escaped, insert a call to llvm.localescape in the
374  // entry block.
375  if (!EscapedLocals.empty()) {
376  // Invert the map from local to index into a simple vector. There should be
377  // no holes.
379  EscapeArgs.resize(EscapedLocals.size());
380  for (auto &Pair : EscapedLocals)
381  EscapeArgs[Pair.second] = Pair.first;
382  llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
383  &CGM.getModule(), llvm::Intrinsic::localescape);
384  CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
385  }
386 
387  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
388  llvm::Instruction *Ptr = AllocaInsertPt;
389  AllocaInsertPt = nullptr;
390  Ptr->eraseFromParent();
391 
392  // If someone took the address of a label but never did an indirect goto, we
393  // made a zero entry PHI node, which is illegal, zap it now.
394  if (IndirectBranch) {
395  llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
396  if (PN->getNumIncomingValues() == 0) {
397  PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
398  PN->eraseFromParent();
399  }
400  }
401 
402  EmitIfUsed(*this, EHResumeBlock);
403  EmitIfUsed(*this, TerminateLandingPad);
404  EmitIfUsed(*this, TerminateHandler);
405  EmitIfUsed(*this, UnreachableBlock);
406 
407  for (const auto &FuncletAndParent : TerminateFunclets)
408  EmitIfUsed(*this, FuncletAndParent.second);
409 
410  if (CGM.getCodeGenOpts().EmitDeclMetadata)
411  EmitDeclMetadata();
412 
413  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
414  I = DeferredReplacements.begin(),
415  E = DeferredReplacements.end();
416  I != E; ++I) {
417  I->first->replaceAllUsesWith(I->second);
418  I->first->eraseFromParent();
419  }
420 
421  // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
422  // PHIs if the current function is a coroutine. We don't do it for all
423  // functions as it may result in slight increase in numbers of instructions
424  // if compiled with no optimizations. We do it for coroutine as the lifetime
425  // of CleanupDestSlot alloca make correct coroutine frame building very
426  // difficult.
428  llvm::DominatorTree DT(*CurFn);
429  llvm::PromoteMemToReg(
430  cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
432  }
433 
434  // Scan function arguments for vector width.
435  for (llvm::Argument &A : CurFn->args())
436  if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
437  LargestVectorWidth = std::max(LargestVectorWidth,
438  VT->getPrimitiveSizeInBits());
439 
440  // Update vector width based on return type.
441  if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
442  LargestVectorWidth = std::max(LargestVectorWidth,
443  VT->getPrimitiveSizeInBits());
444 
445  // Add the required-vector-width attribute. This contains the max width from:
446  // 1. min-vector-width attribute used in the source program.
447  // 2. Any builtins used that have a vector width specified.
448  // 3. Values passed in and out of inline assembly.
449  // 4. Width of vector arguments and return types for this function.
450  // 5. Width of vector aguments and return types for functions called by this
451  // function.
452  CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
453 
454  // If we generated an unreachable return block, delete it now.
455  if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
456  Builder.ClearInsertionPoint();
457  ReturnBlock.getBlock()->eraseFromParent();
458  }
459  if (ReturnValue.isValid()) {
460  auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
461  if (RetAlloca && RetAlloca->use_empty()) {
462  RetAlloca->eraseFromParent();
464  }
465  }
466 }
467 
468 /// ShouldInstrumentFunction - Return true if the current function should be
469 /// instrumented with __cyg_profile_func_* calls
471  if (!CGM.getCodeGenOpts().InstrumentFunctions &&
472  !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
473  !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
474  return false;
475  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
476  return false;
477  return true;
478 }
479 
480 /// ShouldXRayInstrument - Return true if the current function should be
481 /// instrumented with XRay nop sleds.
483  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
484 }
485 
486 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
487 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
489  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
490  (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
493 }
494 
496  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
497  (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
500 }
501 
502 llvm::Constant *
504  llvm::Constant *Addr) {
505  // Addresses stored in prologue data can't require run-time fixups and must
506  // be PC-relative. Run-time fixups are undesirable because they necessitate
507  // writable text segments, which are unsafe. And absolute addresses are
508  // undesirable because they break PIE mode.
509 
510  // Add a layer of indirection through a private global. Taking its address
511  // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
512  auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
513  /*isConstant=*/true,
514  llvm::GlobalValue::PrivateLinkage, Addr);
515 
516  // Create a PC-relative address.
517  auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
518  auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
519  auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
520  return (IntPtrTy == Int32Ty)
521  ? PCRelAsInt
522  : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
523 }
524 
525 llvm::Value *
527  llvm::Value *EncodedAddr) {
528  // Reconstruct the address of the global.
529  auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
530  auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
531  auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
532  auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
533 
534  // Load the original pointer through the global.
535  return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
536  "decoded_addr");
537 }
538 
539 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
540  llvm::Function *Fn)
541 {
542  if (!FD->hasAttr<OpenCLKernelAttr>())
543  return;
544 
545  llvm::LLVMContext &Context = getLLVMContext();
546 
547  CGM.GenOpenCLArgMetadata(Fn, FD, this);
548 
549  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
550  QualType HintQTy = A->getTypeHint();
551  const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
552  bool IsSignedInteger =
553  HintQTy->isSignedIntegerType() ||
554  (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
555  llvm::Metadata *AttrMDArgs[] = {
556  llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
557  CGM.getTypes().ConvertType(A->getTypeHint()))),
558  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
559  llvm::IntegerType::get(Context, 32),
560  llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
561  Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
562  }
563 
564  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
565  llvm::Metadata *AttrMDArgs[] = {
566  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
567  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
568  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
569  Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
570  }
571 
572  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
573  llvm::Metadata *AttrMDArgs[] = {
574  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
575  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
576  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
577  Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
578  }
579 
580  if (const OpenCLIntelReqdSubGroupSizeAttr *A =
581  FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
582  llvm::Metadata *AttrMDArgs[] = {
583  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
584  Fn->setMetadata("intel_reqd_sub_group_size",
585  llvm::MDNode::get(Context, AttrMDArgs));
586  }
587 }
588 
589 /// Determine whether the function F ends with a return stmt.
590 static bool endsWithReturn(const Decl* F) {
591  const Stmt *Body = nullptr;
592  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
593  Body = FD->getBody();
594  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
595  Body = OMD->getBody();
596 
597  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
598  auto LastStmt = CS->body_rbegin();
599  if (LastStmt != CS->body_rend())
600  return isa<ReturnStmt>(*LastStmt);
601  }
602  return false;
603 }
604 
606  if (SanOpts.has(SanitizerKind::Thread)) {
607  Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
608  Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
609  }
610 }
611 
612 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
613  auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
614  if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
615  !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
616  (MD->getNumParams() != 1 && MD->getNumParams() != 2))
617  return false;
618 
619  if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
620  return false;
621 
622  if (MD->getNumParams() == 2) {
623  auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
624  if (!PT || !PT->isVoidPointerType() ||
625  !PT->getPointeeType().isConstQualified())
626  return false;
627  }
628 
629  return true;
630 }
631 
632 /// Return the UBSan prologue signature for \p FD if one is available.
633 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
634  const FunctionDecl *FD) {
635  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
636  if (!MD->isStatic())
637  return nullptr;
639 }
640 
642  QualType RetTy,
643  llvm::Function *Fn,
644  const CGFunctionInfo &FnInfo,
645  const FunctionArgList &Args,
646  SourceLocation Loc,
647  SourceLocation StartLoc) {
648  assert(!CurFn &&
649  "Do not use a CodeGenFunction object for more than one function");
650 
651  const Decl *D = GD.getDecl();
652 
653  DidCallStackSave = false;
654  CurCodeDecl = D;
655  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
656  if (FD->usesSEHTry())
657  CurSEHParent = FD;
658  CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
659  FnRetTy = RetTy;
660  CurFn = Fn;
661  CurFnInfo = &FnInfo;
662  assert(CurFn->isDeclaration() && "Function already has body?");
663 
664  // If this function has been blacklisted for any of the enabled sanitizers,
665  // disable the sanitizer for the function.
666  do {
667 #define SANITIZER(NAME, ID) \
668  if (SanOpts.empty()) \
669  break; \
670  if (SanOpts.has(SanitizerKind::ID)) \
671  if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
672  SanOpts.set(SanitizerKind::ID, false);
673 
674 #include "clang/Basic/Sanitizers.def"
675 #undef SANITIZER
676  } while (0);
677 
678  if (D) {
679  // Apply the no_sanitize* attributes to SanOpts.
680  for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
681  SanitizerMask mask = Attr->getMask();
682  SanOpts.Mask &= ~mask;
683  if (mask & SanitizerKind::Address)
684  SanOpts.set(SanitizerKind::KernelAddress, false);
685  if (mask & SanitizerKind::KernelAddress)
686  SanOpts.set(SanitizerKind::Address, false);
687  if (mask & SanitizerKind::HWAddress)
688  SanOpts.set(SanitizerKind::KernelHWAddress, false);
689  if (mask & SanitizerKind::KernelHWAddress)
690  SanOpts.set(SanitizerKind::HWAddress, false);
691  }
692  }
693 
694  // Apply sanitizer attributes to the function.
695  if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
696  Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
697  if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
698  Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
699  if (SanOpts.has(SanitizerKind::MemTag))
700  Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
701  if (SanOpts.has(SanitizerKind::Thread))
702  Fn->addFnAttr(llvm::Attribute::SanitizeThread);
703  if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
704  Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
705  if (SanOpts.has(SanitizerKind::SafeStack))
706  Fn->addFnAttr(llvm::Attribute::SafeStack);
707  if (SanOpts.has(SanitizerKind::ShadowCallStack))
708  Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
709 
710  // Apply fuzzing attribute to the function.
711  if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
712  Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
713 
714  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
715  // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
716  if (SanOpts.has(SanitizerKind::Thread)) {
717  if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
718  IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
719  if (OMD->getMethodFamily() == OMF_dealloc ||
720  OMD->getMethodFamily() == OMF_initialize ||
721  (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
723  }
724  }
725  }
726 
727  // Ignore unrelated casts in STL allocate() since the allocator must cast
728  // from void* to T* before object initialization completes. Don't match on the
729  // namespace because not all allocators are in std::
730  if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
732  SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
733  }
734 
735  // Ignore null checks in coroutine functions since the coroutines passes
736  // are not aware of how to move the extra UBSan instructions across the split
737  // coroutine boundaries.
738  if (D && SanOpts.has(SanitizerKind::Null))
739  if (const auto *FD = dyn_cast<FunctionDecl>(D))
740  if (FD->getBody() &&
741  FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
742  SanOpts.Mask &= ~SanitizerKind::Null;
743 
744  // Apply xray attributes to the function (as a string, for now)
745  if (D) {
746  if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
749  if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
750  Fn->addFnAttr("function-instrument", "xray-always");
751  if (XRayAttr->neverXRayInstrument())
752  Fn->addFnAttr("function-instrument", "xray-never");
753  if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
755  Fn->addFnAttr("xray-log-args",
756  llvm::utostr(LogArgs->getArgumentCount()));
757  }
758  } else {
760  Fn->addFnAttr(
761  "xray-instruction-threshold",
762  llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
763  }
764  }
765 
766  // Add no-jump-tables value.
767  Fn->addFnAttr("no-jump-tables",
768  llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
769 
770  // Add profile-sample-accurate value.
771  if (CGM.getCodeGenOpts().ProfileSampleAccurate)
772  Fn->addFnAttr("profile-sample-accurate");
773 
774  if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
775  Fn->addFnAttr("cfi-canonical-jump-table");
776 
777  if (getLangOpts().OpenCL) {
778  // Add metadata for a kernel function.
779  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
780  EmitOpenCLKernelMetadata(FD, Fn);
781  }
782 
783  // If we are checking function types, emit a function type signature as
784  // prologue data.
786  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
787  if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
788  // Remove any (C++17) exception specifications, to allow calling e.g. a
789  // noexcept function through a non-noexcept pointer.
790  auto ProtoTy =
792  EST_None);
793  llvm::Constant *FTRTTIConst =
794  CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
795  llvm::Constant *FTRTTIConstEncoded =
796  EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
797  llvm::Constant *PrologueStructElems[] = {PrologueSig,
798  FTRTTIConstEncoded};
799  llvm::Constant *PrologueStructConst =
800  llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
801  Fn->setPrologueData(PrologueStructConst);
802  }
803  }
804  }
805 
806  // If we're checking nullability, we need to know whether we can check the
807  // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
808  if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
811  if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
812  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
813  RetValNullabilityPrecondition =
814  llvm::ConstantInt::getTrue(getLLVMContext());
815  }
816  }
817 
818  // If we're in C++ mode and the function name is "main", it is guaranteed
819  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
820  // used within a program").
821  if (getLangOpts().CPlusPlus)
822  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
823  if (FD->isMain())
824  Fn->addFnAttr(llvm::Attribute::NoRecurse);
825 
826  // If a custom alignment is used, force realigning to this alignment on
827  // any main function which certainly will need it.
828  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
829  if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
830  CGM.getCodeGenOpts().StackAlignment)
831  Fn->addFnAttr("stackrealign");
832 
833  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
834 
835  // Create a marker to make it easy to insert allocas into the entryblock
836  // later. Don't create this with the builder, because we don't want it
837  // folded.
838  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
839  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
840 
842 
843  Builder.SetInsertPoint(EntryBB);
844 
845  // If we're checking the return value, allocate space for a pointer to a
846  // precise source location of the checked return statement.
847  if (requiresReturnValueCheck()) {
848  ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
849  InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
850  }
851 
852  // Emit subprogram debug descriptor.
853  if (CGDebugInfo *DI = getDebugInfo()) {
854  // Reconstruct the type from the argument list so that implicit parameters,
855  // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
856  // convention.
858  if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
859  if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
860  CC = SrcFnTy->getCallConv();
861  SmallVector<QualType, 16> ArgTypes;
862  for (const VarDecl *VD : Args)
863  ArgTypes.push_back(VD->getType());
865  RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
866  DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
867  Builder);
868  }
869 
870  if (ShouldInstrumentFunction()) {
871  if (CGM.getCodeGenOpts().InstrumentFunctions)
872  CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
873  if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
874  CurFn->addFnAttr("instrument-function-entry-inlined",
875  "__cyg_profile_func_enter");
876  if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
877  CurFn->addFnAttr("instrument-function-entry-inlined",
878  "__cyg_profile_func_enter_bare");
879  }
880 
881  // Since emitting the mcount call here impacts optimizations such as function
882  // inlining, we just add an attribute to insert a mcount call in backend.
883  // The attribute "counting-function" is set to mcount function name which is
884  // architecture dependent.
885  if (CGM.getCodeGenOpts().InstrumentForProfiling) {
886  // Calls to fentry/mcount should not be generated if function has
887  // the no_instrument_function attribute.
888  if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
889  if (CGM.getCodeGenOpts().CallFEntry)
890  Fn->addFnAttr("fentry-call", "true");
891  else {
892  Fn->addFnAttr("instrument-function-entry-inlined",
893  getTarget().getMCountName());
894  }
895  }
896  }
897 
898  if (RetTy->isVoidType()) {
899  // Void type; nothing to return.
901 
902  // Count the implicit return.
903  if (!endsWithReturn(D))
904  ++NumReturnExprs;
906  // Indirect return; emit returned value directly into sret slot.
907  // This reduces code size, and affects correctness in C++.
908  auto AI = CurFn->arg_begin();
910  ++AI;
918  }
921  // Load the sret pointer from the argument struct and return into that.
922  unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
923  llvm::Function::arg_iterator EI = CurFn->arg_end();
924  --EI;
925  llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
927  Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
929  } else {
930  ReturnValue = CreateIRTemp(RetTy, "retval");
931 
932  // Tell the epilog emitter to autorelease the result. We do this
933  // now so that various specialized functions can suppress it
934  // during their IR-generation.
935  if (getLangOpts().ObjCAutoRefCount &&
937  RetTy->isObjCRetainableType())
938  AutoreleaseResult = true;
939  }
940 
942 
944 
945  // Emit OpenMP specific initialization of the device functions.
946  if (getLangOpts().OpenMP && CurCodeDecl)
947  CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
948 
950 
951  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
953  const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
954  if (MD->getParent()->isLambda() &&
955  MD->getOverloadedOperator() == OO_Call) {
956  // We're in a lambda; figure out the captures.
960  // If the lambda captures the object referred to by '*this' - either by
961  // value or by reference, make sure CXXThisValue points to the correct
962  // object.
963 
964  // Get the lvalue for the field (which is a copy of the enclosing object
965  // or contains the address of the enclosing object).
968  // If the enclosing object was captured by value, just use its address.
969  CXXThisValue = ThisFieldLValue.getAddress().getPointer();
970  } else {
971  // Load the lvalue pointed to by the field, since '*this' was captured
972  // by reference.
973  CXXThisValue =
974  EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
975  }
976  }
977  for (auto *FD : MD->getParent()->fields()) {
978  if (FD->hasCapturedVLAType()) {
979  auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
981  auto VAT = FD->getCapturedVLAType();
982  VLASizeMap[VAT->getSizeExpr()] = ExprArg;
983  }
984  }
985  } else {
986  // Not in a lambda; just use 'this' from the method.
987  // FIXME: Should we generate a new load for each use of 'this'? The
988  // fast register allocator would be happier...
989  CXXThisValue = CXXABIThisValue;
990  }
991 
992  // Check the 'this' pointer once per function, if it's available.
993  if (CXXABIThisValue) {
994  SanitizerSet SkippedChecks;
995  SkippedChecks.set(SanitizerKind::ObjectSize, true);
996  QualType ThisTy = MD->getThisType();
997 
998  // If this is the call operator of a lambda with no capture-default, it
999  // may have a static invoker function, which may call this operator with
1000  // a null 'this' pointer.
1001  if (isLambdaCallOperator(MD) &&
1003  SkippedChecks.set(SanitizerKind::Null, true);
1004 
1005  EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1006  : TCK_MemberCall,
1007  Loc, CXXABIThisValue, ThisTy,
1008  getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1009  SkippedChecks);
1010  }
1011  }
1012 
1013  // If any of the arguments have a variably modified type, make sure to
1014  // emit the type size.
1015  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1016  i != e; ++i) {
1017  const VarDecl *VD = *i;
1018 
1019  // Dig out the type as written from ParmVarDecls; it's unclear whether
1020  // the standard (C99 6.9.1p10) requires this, but we're following the
1021  // precedent set by gcc.
1022  QualType Ty;
1023  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1024  Ty = PVD->getOriginalType();
1025  else
1026  Ty = VD->getType();
1027 
1028  if (Ty->isVariablyModifiedType())
1030  }
1031  // Emit a location at the end of the prologue.
1032  if (CGDebugInfo *DI = getDebugInfo())
1033  DI->EmitLocation(Builder, StartLoc);
1034 
1035  // TODO: Do we need to handle this in two places like we do with
1036  // target-features/target-cpu?
1037  if (CurFuncDecl)
1038  if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1039  LargestVectorWidth = VecWidth->getVectorWidth();
1040 }
1041 
1044  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1046  else
1047  EmitStmt(Body);
1048 }
1049 
1050 /// When instrumenting to collect profile data, the counts for some blocks
1051 /// such as switch cases need to not include the fall-through counts, so
1052 /// emit a branch around the instrumentation code. When not instrumenting,
1053 /// this just calls EmitBlock().
1055  const Stmt *S) {
1056  llvm::BasicBlock *SkipCountBB = nullptr;
1058  // When instrumenting for profiling, the fallthrough to certain
1059  // statements needs to skip over the instrumentation code so that we
1060  // get an accurate count.
1061  SkipCountBB = createBasicBlock("skipcount");
1062  EmitBranch(SkipCountBB);
1063  }
1064  EmitBlock(BB);
1065  uint64_t CurrentCount = getCurrentProfileCount();
1068  if (SkipCountBB)
1069  EmitBlock(SkipCountBB);
1070 }
1071 
1072 /// Tries to mark the given function nounwind based on the
1073 /// non-existence of any throwing calls within it. We believe this is
1074 /// lightweight enough to do at -O0.
1075 static void TryMarkNoThrow(llvm::Function *F) {
1076  // LLVM treats 'nounwind' on a function as part of the type, so we
1077  // can't do this on functions that can be overwritten.
1078  if (F->isInterposable()) return;
1079 
1080  for (llvm::BasicBlock &BB : *F)
1081  for (llvm::Instruction &I : BB)
1082  if (I.mayThrow())
1083  return;
1084 
1085  F->setDoesNotThrow();
1086 }
1087 
1089  FunctionArgList &Args) {
1090  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1091  QualType ResTy = FD->getReturnType();
1092 
1093  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1094  if (MD && MD->isInstance()) {
1095  if (CGM.getCXXABI().HasThisReturn(GD))
1096  ResTy = MD->getThisType();
1097  else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1098  ResTy = CGM.getContext().VoidPtrTy;
1099  CGM.getCXXABI().buildThisParam(*this, Args);
1100  }
1101 
1102  // The base version of an inheriting constructor whose constructed base is a
1103  // virtual base is not passed any arguments (because it doesn't actually call
1104  // the inherited constructor).
1105  bool PassedParams = true;
1106  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1107  if (auto Inherited = CD->getInheritedConstructor())
1108  PassedParams =
1109  getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1110 
1111  if (PassedParams) {
1112  for (auto *Param : FD->parameters()) {
1113  Args.push_back(Param);
1114  if (!Param->hasAttr<PassObjectSizeAttr>())
1115  continue;
1116 
1117  auto *Implicit = ImplicitParamDecl::Create(
1118  getContext(), Param->getDeclContext(), Param->getLocation(),
1119  /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1120  SizeArguments[Param] = Implicit;
1121  Args.push_back(Implicit);
1122  }
1123  }
1124 
1125  if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1126  CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1127 
1128  return ResTy;
1129 }
1130 
1131 static bool
1133  const ASTContext &Context) {
1134  QualType T = FD->getReturnType();
1135  // Avoid the optimization for functions that return a record type with a
1136  // trivial destructor or another trivially copyable type.
1137  if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1138  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1139  return !ClassDecl->hasTrivialDestructor();
1140  }
1141  return !T.isTriviallyCopyableType(Context);
1142 }
1143 
1144 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1145  const CGFunctionInfo &FnInfo) {
1146  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1147  CurGD = GD;
1148 
1149  FunctionArgList Args;
1150  QualType ResTy = BuildFunctionArgList(GD, Args);
1151 
1152  // Check if we should generate debug info for this function.
1153  if (FD->hasAttr<NoDebugAttr>())
1154  DebugInfo = nullptr; // disable debug info indefinitely for this function
1155 
1156  // The function might not have a body if we're generating thunks for a
1157  // function declaration.
1158  SourceRange BodyRange;
1159  if (Stmt *Body = FD->getBody())
1160  BodyRange = Body->getSourceRange();
1161  else
1162  BodyRange = FD->getLocation();
1163  CurEHLocation = BodyRange.getEnd();
1164 
1165  // Use the location of the start of the function to determine where
1166  // the function definition is located. By default use the location
1167  // of the declaration as the location for the subprogram. A function
1168  // may lack a declaration in the source code if it is created by code
1169  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1170  SourceLocation Loc = FD->getLocation();
1171 
1172  // If this is a function specialization then use the pattern body
1173  // as the location for the function.
1174  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1175  if (SpecDecl->hasBody(SpecDecl))
1176  Loc = SpecDecl->getLocation();
1177 
1178  Stmt *Body = FD->getBody();
1179 
1180  // Initialize helper which will detect jumps which can cause invalid lifetime
1181  // markers.
1182  if (Body && ShouldEmitLifetimeMarkers)
1183  Bypasses.Init(Body);
1184 
1185  // Emit the standard function prologue.
1186  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1187 
1188  // Generate the body of the function.
1189  PGO.assignRegionCounters(GD, CurFn);
1190  if (isa<CXXDestructorDecl>(FD))
1191  EmitDestructorBody(Args);
1192  else if (isa<CXXConstructorDecl>(FD))
1193  EmitConstructorBody(Args);
1194  else if (getLangOpts().CUDA &&
1195  !getLangOpts().CUDAIsDevice &&
1196  FD->hasAttr<CUDAGlobalAttr>())
1197  CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1198  else if (isa<CXXMethodDecl>(FD) &&
1199  cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1200  // The lambda static invoker function is special, because it forwards or
1201  // clones the body of the function call operator (but is actually static).
1202  EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1203  } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1204  (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1205  cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1206  // Implicit copy-assignment gets the same special treatment as implicit
1207  // copy-constructors.
1209  } else if (Body) {
1210  EmitFunctionBody(Body);
1211  } else
1212  llvm_unreachable("no definition for emitted function");
1213 
1214  // C++11 [stmt.return]p2:
1215  // Flowing off the end of a function [...] results in undefined behavior in
1216  // a value-returning function.
1217  // C11 6.9.1p12:
1218  // If the '}' that terminates a function is reached, and the value of the
1219  // function call is used by the caller, the behavior is undefined.
1221  !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1222  bool ShouldEmitUnreachable =
1223  CGM.getCodeGenOpts().StrictReturn ||
1225  if (SanOpts.has(SanitizerKind::Return)) {
1226  SanitizerScope SanScope(this);
1227  llvm::Value *IsFalse = Builder.getFalse();
1228  EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1229  SanitizerHandler::MissingReturn,
1230  EmitCheckSourceLocation(FD->getLocation()), None);
1231  } else if (ShouldEmitUnreachable) {
1232  if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1233  EmitTrapCall(llvm::Intrinsic::trap);
1234  }
1235  if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1236  Builder.CreateUnreachable();
1237  Builder.ClearInsertionPoint();
1238  }
1239  }
1240 
1241  // Emit the standard function epilogue.
1242  FinishFunction(BodyRange.getEnd());
1243 
1244  // If we haven't marked the function nothrow through other means, do
1245  // a quick pass now to see if we can.
1246  if (!CurFn->doesNotThrow())
1248 }
1249 
1250 /// ContainsLabel - Return true if the statement contains a label in it. If
1251 /// this statement is not executed normally, it not containing a label means
1252 /// that we can just remove the code.
1253 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1254  // Null statement, not a label!
1255  if (!S) return false;
1256 
1257  // If this is a label, we have to emit the code, consider something like:
1258  // if (0) { ... foo: bar(); } goto foo;
1259  //
1260  // TODO: If anyone cared, we could track __label__'s, since we know that you
1261  // can't jump to one from outside their declared region.
1262  if (isa<LabelStmt>(S))
1263  return true;
1264 
1265  // If this is a case/default statement, and we haven't seen a switch, we have
1266  // to emit the code.
1267  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1268  return true;
1269 
1270  // If this is a switch statement, we want to ignore cases below it.
1271  if (isa<SwitchStmt>(S))
1272  IgnoreCaseStmts = true;
1273 
1274  // Scan subexpressions for verboten labels.
1275  for (const Stmt *SubStmt : S->children())
1276  if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1277  return true;
1278 
1279  return false;
1280 }
1281 
1282 /// containsBreak - Return true if the statement contains a break out of it.
1283 /// If the statement (recursively) contains a switch or loop with a break
1284 /// inside of it, this is fine.
1286  // Null statement, not a label!
1287  if (!S) return false;
1288 
1289  // If this is a switch or loop that defines its own break scope, then we can
1290  // include it and anything inside of it.
1291  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1292  isa<ForStmt>(S))
1293  return false;
1294 
1295  if (isa<BreakStmt>(S))
1296  return true;
1297 
1298  // Scan subexpressions for verboten breaks.
1299  for (const Stmt *SubStmt : S->children())
1300  if (containsBreak(SubStmt))
1301  return true;
1302 
1303  return false;
1304 }
1305 
1307  if (!S) return false;
1308 
1309  // Some statement kinds add a scope and thus never add a decl to the current
1310  // scope. Note, this list is longer than the list of statements that might
1311  // have an unscoped decl nested within them, but this way is conservatively
1312  // correct even if more statement kinds are added.
1313  if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1314  isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1315  isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1316  isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1317  return false;
1318 
1319  if (isa<DeclStmt>(S))
1320  return true;
1321 
1322  for (const Stmt *SubStmt : S->children())
1323  if (mightAddDeclToScope(SubStmt))
1324  return true;
1325 
1326  return false;
1327 }
1328 
1329 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1330 /// to a constant, or if it does but contains a label, return false. If it
1331 /// constant folds return true and set the boolean result in Result.
1333  bool &ResultBool,
1334  bool AllowLabels) {
1335  llvm::APSInt ResultInt;
1336  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1337  return false;
1338 
1339  ResultBool = ResultInt.getBoolValue();
1340  return true;
1341 }
1342 
1343 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1344 /// to a constant, or if it does but contains a label, return false. If it
1345 /// constant folds return true and set the folded value.
1347  llvm::APSInt &ResultInt,
1348  bool AllowLabels) {
1349  // FIXME: Rename and handle conversion of other evaluatable things
1350  // to bool.
1351  Expr::EvalResult Result;
1352  if (!Cond->EvaluateAsInt(Result, getContext()))
1353  return false; // Not foldable, not integer or not fully evaluatable.
1354 
1355  llvm::APSInt Int = Result.Val.getInt();
1356  if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1357  return false; // Contains a label.
1358 
1359  ResultInt = Int;
1360  return true;
1361 }
1362 
1363 
1364 
1365 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1366 /// statement) to the specified blocks. Based on the condition, this might try
1367 /// to simplify the codegen of the conditional based on the branch.
1368 ///
1370  llvm::BasicBlock *TrueBlock,
1371  llvm::BasicBlock *FalseBlock,
1372  uint64_t TrueCount) {
1373  Cond = Cond->IgnoreParens();
1374 
1375  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1376 
1377  // Handle X && Y in a condition.
1378  if (CondBOp->getOpcode() == BO_LAnd) {
1379  // If we have "1 && X", simplify the code. "0 && X" would have constant
1380  // folded if the case was simple enough.
1381  bool ConstantBool = false;
1382  if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1383  ConstantBool) {
1384  // br(1 && X) -> br(X).
1385  incrementProfileCounter(CondBOp);
1386  return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1387  TrueCount);
1388  }
1389 
1390  // If we have "X && 1", simplify the code to use an uncond branch.
1391  // "X && 0" would have been constant folded to 0.
1392  if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1393  ConstantBool) {
1394  // br(X && 1) -> br(X).
1395  return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1396  TrueCount);
1397  }
1398 
1399  // Emit the LHS as a conditional. If the LHS conditional is false, we
1400  // want to jump to the FalseBlock.
1401  llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1402  // The counter tells us how often we evaluate RHS, and all of TrueCount
1403  // can be propagated to that branch.
1404  uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1405 
1406  ConditionalEvaluation eval(*this);
1407  {
1408  ApplyDebugLocation DL(*this, Cond);
1409  EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1410  EmitBlock(LHSTrue);
1411  }
1412 
1413  incrementProfileCounter(CondBOp);
1414  setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1415 
1416  // Any temporaries created here are conditional.
1417  eval.begin(*this);
1418  EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1419  eval.end(*this);
1420 
1421  return;
1422  }
1423 
1424  if (CondBOp->getOpcode() == BO_LOr) {
1425  // If we have "0 || X", simplify the code. "1 || X" would have constant
1426  // folded if the case was simple enough.
1427  bool ConstantBool = false;
1428  if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1429  !ConstantBool) {
1430  // br(0 || X) -> br(X).
1431  incrementProfileCounter(CondBOp);
1432  return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1433  TrueCount);
1434  }
1435 
1436  // If we have "X || 0", simplify the code to use an uncond branch.
1437  // "X || 1" would have been constant folded to 1.
1438  if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1439  !ConstantBool) {
1440  // br(X || 0) -> br(X).
1441  return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1442  TrueCount);
1443  }
1444 
1445  // Emit the LHS as a conditional. If the LHS conditional is true, we
1446  // want to jump to the TrueBlock.
1447  llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1448  // We have the count for entry to the RHS and for the whole expression
1449  // being true, so we can divy up True count between the short circuit and
1450  // the RHS.
1451  uint64_t LHSCount =
1452  getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1453  uint64_t RHSCount = TrueCount - LHSCount;
1454 
1455  ConditionalEvaluation eval(*this);
1456  {
1457  ApplyDebugLocation DL(*this, Cond);
1458  EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1459  EmitBlock(LHSFalse);
1460  }
1461 
1462  incrementProfileCounter(CondBOp);
1463  setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1464 
1465  // Any temporaries created here are conditional.
1466  eval.begin(*this);
1467  EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1468 
1469  eval.end(*this);
1470 
1471  return;
1472  }
1473  }
1474 
1475  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1476  // br(!x, t, f) -> br(x, f, t)
1477  if (CondUOp->getOpcode() == UO_LNot) {
1478  // Negate the count.
1479  uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1480  // Negate the condition and swap the destination blocks.
1481  return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1482  FalseCount);
1483  }
1484  }
1485 
1486  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1487  // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1488  llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1489  llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1490 
1491  ConditionalEvaluation cond(*this);
1492  EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1493  getProfileCount(CondOp));
1494 
1495  // When computing PGO branch weights, we only know the overall count for
1496  // the true block. This code is essentially doing tail duplication of the
1497  // naive code-gen, introducing new edges for which counts are not
1498  // available. Divide the counts proportionally between the LHS and RHS of
1499  // the conditional operator.
1500  uint64_t LHSScaledTrueCount = 0;
1501  if (TrueCount) {
1502  double LHSRatio =
1503  getProfileCount(CondOp) / (double)getCurrentProfileCount();
1504  LHSScaledTrueCount = TrueCount * LHSRatio;
1505  }
1506 
1507  cond.begin(*this);
1508  EmitBlock(LHSBlock);
1509  incrementProfileCounter(CondOp);
1510  {
1511  ApplyDebugLocation DL(*this, Cond);
1512  EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1513  LHSScaledTrueCount);
1514  }
1515  cond.end(*this);
1516 
1517  cond.begin(*this);
1518  EmitBlock(RHSBlock);
1519  EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1520  TrueCount - LHSScaledTrueCount);
1521  cond.end(*this);
1522 
1523  return;
1524  }
1525 
1526  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1527  // Conditional operator handling can give us a throw expression as a
1528  // condition for a case like:
1529  // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1530  // Fold this to:
1531  // br(c, throw x, br(y, t, f))
1532  EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1533  return;
1534  }
1535 
1536  // If the branch has a condition wrapped by __builtin_unpredictable,
1537  // create metadata that specifies that the branch is unpredictable.
1538  // Don't bother if not optimizing because that metadata would not be used.
1539  llvm::MDNode *Unpredictable = nullptr;
1540  auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1541  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1542  auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1543  if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1544  llvm::MDBuilder MDHelper(getLLVMContext());
1545  Unpredictable = MDHelper.createUnpredictable();
1546  }
1547  }
1548 
1549  // Create branch weights based on the number of times we get here and the
1550  // number of times the condition should be true.
1551  uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1552  llvm::MDNode *Weights =
1553  createProfileWeights(TrueCount, CurrentCount - TrueCount);
1554 
1555  // Emit the code with the fully general case.
1556  llvm::Value *CondV;
1557  {
1558  ApplyDebugLocation DL(*this, Cond);
1559  CondV = EvaluateExprAsBool(Cond);
1560  }
1561  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1562 }
1563 
1564 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1565 /// specified stmt yet.
1566 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1567  CGM.ErrorUnsupported(S, Type);
1568 }
1569 
1570 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1571 /// variable-length array whose elements have a non-zero bit-pattern.
1572 ///
1573 /// \param baseType the inner-most element type of the array
1574 /// \param src - a char* pointing to the bit-pattern for a single
1575 /// base element of the array
1576 /// \param sizeInChars - the total size of the VLA, in chars
1577 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1578  Address dest, Address src,
1579  llvm::Value *sizeInChars) {
1580  CGBuilderTy &Builder = CGF.Builder;
1581 
1582  CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1583  llvm::Value *baseSizeInChars
1584  = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1585 
1586  Address begin =
1587  Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1588  llvm::Value *end =
1589  Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1590 
1591  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1592  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1593  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1594 
1595  // Make a loop over the VLA. C99 guarantees that the VLA element
1596  // count must be nonzero.
1597  CGF.EmitBlock(loopBB);
1598 
1599  llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1600  cur->addIncoming(begin.getPointer(), originBB);
1601 
1602  CharUnits curAlign =
1603  dest.getAlignment().alignmentOfArrayElement(baseSize);
1604 
1605  // memcpy the individual element bit-pattern.
1606  Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1607  /*volatile*/ false);
1608 
1609  // Go to the next element.
1610  llvm::Value *next =
1611  Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1612 
1613  // Leave if that's the end of the VLA.
1614  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1615  Builder.CreateCondBr(done, contBB, loopBB);
1616  cur->addIncoming(next, loopBB);
1617 
1618  CGF.EmitBlock(contBB);
1619 }
1620 
1621 void
1623  // Ignore empty classes in C++.
1624  if (getLangOpts().CPlusPlus) {
1625  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1626  if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1627  return;
1628  }
1629  }
1630 
1631  // Cast the dest ptr to the appropriate i8 pointer type.
1632  if (DestPtr.getElementType() != Int8Ty)
1633  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1634 
1635  // Get size and alignment info for this aggregate.
1637 
1638  llvm::Value *SizeVal;
1639  const VariableArrayType *vla;
1640 
1641  // Don't bother emitting a zero-byte memset.
1642  if (size.isZero()) {
1643  // But note that getTypeInfo returns 0 for a VLA.
1644  if (const VariableArrayType *vlaType =
1645  dyn_cast_or_null<VariableArrayType>(
1646  getContext().getAsArrayType(Ty))) {
1647  auto VlaSize = getVLASize(vlaType);
1648  SizeVal = VlaSize.NumElts;
1649  CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1650  if (!eltSize.isOne())
1651  SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1652  vla = vlaType;
1653  } else {
1654  return;
1655  }
1656  } else {
1657  SizeVal = CGM.getSize(size);
1658  vla = nullptr;
1659  }
1660 
1661  // If the type contains a pointer to data member we can't memset it to zero.
1662  // Instead, create a null constant and copy it to the destination.
1663  // TODO: there are other patterns besides zero that we can usefully memset,
1664  // like -1, which happens to be the pattern used by member-pointers.
1665  if (!CGM.getTypes().isZeroInitializable(Ty)) {
1666  // For a VLA, emit a single element, then splat that over the VLA.
1667  if (vla) Ty = getContext().getBaseElementType(vla);
1668 
1669  llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1670 
1671  llvm::GlobalVariable *NullVariable =
1672  new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1673  /*isConstant=*/true,
1674  llvm::GlobalVariable::PrivateLinkage,
1675  NullConstant, Twine());
1676  CharUnits NullAlign = DestPtr.getAlignment();
1677  NullVariable->setAlignment(NullAlign.getQuantity());
1678  Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1679  NullAlign);
1680 
1681  if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1682 
1683  // Get and call the appropriate llvm.memcpy overload.
1684  Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1685  return;
1686  }
1687 
1688  // Otherwise, just memset the whole thing to zero. This is legal
1689  // because in LLVM, all default initializers (other than the ones we just
1690  // handled above) are guaranteed to have a bit pattern of all zeros.
1691  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1692 }
1693 
1694 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1695  // Make sure that there is a block for the indirect goto.
1696  if (!IndirectBranch)
1698 
1699  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1700 
1701  // Make sure the indirect branch includes all of the address-taken blocks.
1702  IndirectBranch->addDestination(BB);
1703  return llvm::BlockAddress::get(CurFn, BB);
1704 }
1705 
1707  // If we already made the indirect branch for indirect goto, return its block.
1708  if (IndirectBranch) return IndirectBranch->getParent();
1709 
1710  CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1711 
1712  // Create the PHI node that indirect gotos will add entries to.
1713  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1714  "indirect.goto.dest");
1715 
1716  // Create the indirect branch instruction.
1717  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1718  return IndirectBranch->getParent();
1719 }
1720 
1721 /// Computes the length of an array in elements, as well as the base
1722 /// element type and a properly-typed first element pointer.
1724  QualType &baseType,
1725  Address &addr) {
1726  const ArrayType *arrayType = origArrayType;
1727 
1728  // If it's a VLA, we have to load the stored size. Note that
1729  // this is the size of the VLA in bytes, not its size in elements.
1730  llvm::Value *numVLAElements = nullptr;
1731  if (isa<VariableArrayType>(arrayType)) {
1732  numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1733 
1734  // Walk into all VLAs. This doesn't require changes to addr,
1735  // which has type T* where T is the first non-VLA element type.
1736  do {
1737  QualType elementType = arrayType->getElementType();
1738  arrayType = getContext().getAsArrayType(elementType);
1739 
1740  // If we only have VLA components, 'addr' requires no adjustment.
1741  if (!arrayType) {
1742  baseType = elementType;
1743  return numVLAElements;
1744  }
1745  } while (isa<VariableArrayType>(arrayType));
1746 
1747  // We get out here only if we find a constant array type
1748  // inside the VLA.
1749  }
1750 
1751  // We have some number of constant-length arrays, so addr should
1752  // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1753  // down to the first element of addr.
1754  SmallVector<llvm::Value*, 8> gepIndices;
1755 
1756  // GEP down to the array type.
1757  llvm::ConstantInt *zero = Builder.getInt32(0);
1758  gepIndices.push_back(zero);
1759 
1760  uint64_t countFromCLAs = 1;
1761  QualType eltType;
1762 
1763  llvm::ArrayType *llvmArrayType =
1764  dyn_cast<llvm::ArrayType>(addr.getElementType());
1765  while (llvmArrayType) {
1766  assert(isa<ConstantArrayType>(arrayType));
1767  assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1768  == llvmArrayType->getNumElements());
1769 
1770  gepIndices.push_back(zero);
1771  countFromCLAs *= llvmArrayType->getNumElements();
1772  eltType = arrayType->getElementType();
1773 
1774  llvmArrayType =
1775  dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1776  arrayType = getContext().getAsArrayType(arrayType->getElementType());
1777  assert((!llvmArrayType || arrayType) &&
1778  "LLVM and Clang types are out-of-synch");
1779  }
1780 
1781  if (arrayType) {
1782  // From this point onwards, the Clang array type has been emitted
1783  // as some other type (probably a packed struct). Compute the array
1784  // size, and just emit the 'begin' expression as a bitcast.
1785  while (arrayType) {
1786  countFromCLAs *=
1787  cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1788  eltType = arrayType->getElementType();
1789  arrayType = getContext().getAsArrayType(eltType);
1790  }
1791 
1792  llvm::Type *baseType = ConvertType(eltType);
1793  addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1794  } else {
1795  // Create the actual GEP.
1796  addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1797  gepIndices, "array.begin"),
1798  addr.getAlignment());
1799  }
1800 
1801  baseType = eltType;
1802 
1803  llvm::Value *numElements
1804  = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1805 
1806  // If we had any VLA dimensions, factor them in.
1807  if (numVLAElements)
1808  numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1809 
1810  return numElements;
1811 }
1812 
1815  assert(vla && "type was not a variable array type!");
1816  return getVLASize(vla);
1817 }
1818 
1821  // The number of elements so far; always size_t.
1822  llvm::Value *numElements = nullptr;
1823 
1824  QualType elementType;
1825  do {
1826  elementType = type->getElementType();
1827  llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1828  assert(vlaSize && "no size for VLA!");
1829  assert(vlaSize->getType() == SizeTy);
1830 
1831  if (!numElements) {
1832  numElements = vlaSize;
1833  } else {
1834  // It's undefined behavior if this wraps around, so mark it that way.
1835  // FIXME: Teach -fsanitize=undefined to trap this.
1836  numElements = Builder.CreateNUWMul(numElements, vlaSize);
1837  }
1838  } while ((type = getContext().getAsVariableArrayType(elementType)));
1839 
1840  return { numElements, elementType };
1841 }
1842 
1846  assert(vla && "type was not a variable array type!");
1847  return getVLAElements1D(vla);
1848 }
1849 
1852  llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1853  assert(VlaSize && "no size for VLA!");
1854  assert(VlaSize->getType() == SizeTy);
1855  return { VlaSize, Vla->getElementType() };
1856 }
1857 
1859  assert(type->isVariablyModifiedType() &&
1860  "Must pass variably modified type to EmitVLASizes!");
1861 
1863 
1864  // We're going to walk down into the type and look for VLA
1865  // expressions.
1866  do {
1867  assert(type->isVariablyModifiedType());
1868 
1869  const Type *ty = type.getTypePtr();
1870  switch (ty->getTypeClass()) {
1871 
1872 #define TYPE(Class, Base)
1873 #define ABSTRACT_TYPE(Class, Base)
1874 #define NON_CANONICAL_TYPE(Class, Base)
1875 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1876 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1877 #include "clang/AST/TypeNodes.def"
1878  llvm_unreachable("unexpected dependent type!");
1879 
1880  // These types are never variably-modified.
1881  case Type::Builtin:
1882  case Type::Complex:
1883  case Type::Vector:
1884  case Type::ExtVector:
1885  case Type::Record:
1886  case Type::Enum:
1887  case Type::Elaborated:
1888  case Type::TemplateSpecialization:
1889  case Type::ObjCTypeParam:
1890  case Type::ObjCObject:
1891  case Type::ObjCInterface:
1892  case Type::ObjCObjectPointer:
1893  llvm_unreachable("type class is never variably-modified!");
1894 
1895  case Type::Adjusted:
1896  type = cast<AdjustedType>(ty)->getAdjustedType();
1897  break;
1898 
1899  case Type::Decayed:
1900  type = cast<DecayedType>(ty)->getPointeeType();
1901  break;
1902 
1903  case Type::Pointer:
1904  type = cast<PointerType>(ty)->getPointeeType();
1905  break;
1906 
1907  case Type::BlockPointer:
1908  type = cast<BlockPointerType>(ty)->getPointeeType();
1909  break;
1910 
1911  case Type::LValueReference:
1912  case Type::RValueReference:
1913  type = cast<ReferenceType>(ty)->getPointeeType();
1914  break;
1915 
1916  case Type::MemberPointer:
1917  type = cast<MemberPointerType>(ty)->getPointeeType();
1918  break;
1919 
1920  case Type::ConstantArray:
1921  case Type::IncompleteArray:
1922  // Losing element qualification here is fine.
1923  type = cast<ArrayType>(ty)->getElementType();
1924  break;
1925 
1926  case Type::VariableArray: {
1927  // Losing element qualification here is fine.
1928  const VariableArrayType *vat = cast<VariableArrayType>(ty);
1929 
1930  // Unknown size indication requires no size computation.
1931  // Otherwise, evaluate and record it.
1932  if (const Expr *size = vat->getSizeExpr()) {
1933  // It's possible that we might have emitted this already,
1934  // e.g. with a typedef and a pointer to it.
1935  llvm::Value *&entry = VLASizeMap[size];
1936  if (!entry) {
1937  llvm::Value *Size = EmitScalarExpr(size);
1938 
1939  // C11 6.7.6.2p5:
1940  // If the size is an expression that is not an integer constant
1941  // expression [...] each time it is evaluated it shall have a value
1942  // greater than zero.
1943  if (SanOpts.has(SanitizerKind::VLABound) &&
1944  size->getType()->isSignedIntegerType()) {
1945  SanitizerScope SanScope(this);
1946  llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1947  llvm::Constant *StaticArgs[] = {
1948  EmitCheckSourceLocation(size->getBeginLoc()),
1949  EmitCheckTypeDescriptor(size->getType())};
1950  EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
1951  SanitizerKind::VLABound),
1952  SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
1953  }
1954 
1955  // Always zexting here would be wrong if it weren't
1956  // undefined behavior to have a negative bound.
1957  entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1958  }
1959  }
1960  type = vat->getElementType();
1961  break;
1962  }
1963 
1964  case Type::FunctionProto:
1965  case Type::FunctionNoProto:
1966  type = cast<FunctionType>(ty)->getReturnType();
1967  break;
1968 
1969  case Type::Paren:
1970  case Type::TypeOf:
1971  case Type::UnaryTransform:
1972  case Type::Attributed:
1973  case Type::SubstTemplateTypeParm:
1974  case Type::PackExpansion:
1975  case Type::MacroQualified:
1976  // Keep walking after single level desugaring.
1977  type = type.getSingleStepDesugaredType(getContext());
1978  break;
1979 
1980  case Type::Typedef:
1981  case Type::Decltype:
1982  case Type::Auto:
1983  case Type::DeducedTemplateSpecialization:
1984  // Stop walking: nothing to do.
1985  return;
1986 
1987  case Type::TypeOfExpr:
1988  // Stop walking: emit typeof expression.
1989  EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1990  return;
1991 
1992  case Type::Atomic:
1993  type = cast<AtomicType>(ty)->getValueType();
1994  break;
1995 
1996  case Type::Pipe:
1997  type = cast<PipeType>(ty)->getElementType();
1998  break;
1999  }
2000  } while (type->isVariablyModifiedType());
2001 }
2002 
2004  if (getContext().getBuiltinVaListType()->isArrayType())
2005  return EmitPointerWithAlignment(E);
2006  return EmitLValue(E).getAddress();
2007 }
2008 
2010  return EmitLValue(E).getAddress();
2011 }
2012 
2014  const APValue &Init) {
2015  assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2016  if (CGDebugInfo *Dbg = getDebugInfo())
2017  if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
2018  Dbg->EmitGlobalVariable(E->getDecl(), Init);
2019 }
2020 
2023  // At the moment, the only aggressive peephole we do in IR gen
2024  // is trunc(zext) folding, but if we add more, we can easily
2025  // extend this protection.
2026 
2027  if (!rvalue.isScalar()) return PeepholeProtection();
2028  llvm::Value *value = rvalue.getScalarVal();
2029  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2030 
2031  // Just make an extra bitcast.
2032  assert(HaveInsertPoint());
2033  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2034  Builder.GetInsertBlock());
2035 
2036  PeepholeProtection protection;
2037  protection.Inst = inst;
2038  return protection;
2039 }
2040 
2042  if (!protection.Inst) return;
2043 
2044  // In theory, we could try to duplicate the peepholes now, but whatever.
2045  protection.Inst->eraseFromParent();
2046 }
2047 
2049  QualType Ty, SourceLocation Loc,
2050  SourceLocation AssumptionLoc,
2051  llvm::Value *Alignment,
2052  llvm::Value *OffsetValue) {
2053  llvm::Value *TheCheck;
2054  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2055  CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2056  if (SanOpts.has(SanitizerKind::Alignment)) {
2057  EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2058  OffsetValue, TheCheck, Assumption);
2059  }
2060 }
2061 
2063  QualType Ty, SourceLocation Loc,
2064  SourceLocation AssumptionLoc,
2065  unsigned Alignment,
2066  llvm::Value *OffsetValue) {
2067  llvm::Value *TheCheck;
2068  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2069  CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2070  if (SanOpts.has(SanitizerKind::Alignment)) {
2071  llvm::Value *AlignmentVal = llvm::ConstantInt::get(IntPtrTy, Alignment);
2072  EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, AlignmentVal,
2073  OffsetValue, TheCheck, Assumption);
2074  }
2075 }
2076 
2078  const Expr *E,
2079  SourceLocation AssumptionLoc,
2080  unsigned Alignment,
2081  llvm::Value *OffsetValue) {
2082  if (auto *CE = dyn_cast<CastExpr>(E))
2083  E = CE->getSubExprAsWritten();
2084  QualType Ty = E->getType();
2085  SourceLocation Loc = E->getExprLoc();
2086 
2087  EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2088  OffsetValue);
2089 }
2090 
2091 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2092  llvm::Value *AnnotatedVal,
2093  StringRef AnnotationStr,
2094  SourceLocation Location) {
2095  llvm::Value *Args[4] = {
2096  AnnotatedVal,
2099  CGM.EmitAnnotationLineNo(Location)
2100  };
2101  return Builder.CreateCall(AnnotationFn, Args);
2102 }
2103 
2105  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2106  // FIXME We create a new bitcast for every annotation because that's what
2107  // llvm-gcc was doing.
2108  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2109  EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2110  Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2111  I->getAnnotation(), D->getLocation());
2112 }
2113 
2115  Address Addr) {
2116  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2117  llvm::Value *V = Addr.getPointer();
2118  llvm::Type *VTy = V->getType();
2119  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2120  CGM.Int8PtrTy);
2121 
2122  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2123  // FIXME Always emit the cast inst so we can differentiate between
2124  // annotation on the first field of a struct and annotation on the struct
2125  // itself.
2126  if (VTy != CGM.Int8PtrTy)
2128  V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2129  V = Builder.CreateBitCast(V, VTy);
2130  }
2131 
2132  return Address(V, Addr.getAlignment());
2133 }
2134 
2136 
2138  : CGF(CGF) {
2139  assert(!CGF->IsSanitizerScope);
2140  CGF->IsSanitizerScope = true;
2141 }
2142 
2144  CGF->IsSanitizerScope = false;
2145 }
2146 
2147 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2148  const llvm::Twine &Name,
2149  llvm::BasicBlock *BB,
2150  llvm::BasicBlock::iterator InsertPt) const {
2152  if (IsSanitizerScope)
2154 }
2155 
2157  llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2158  llvm::BasicBlock::iterator InsertPt) const {
2159  llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2160  if (CGF)
2161  CGF->InsertHelper(I, Name, BB, InsertPt);
2162 }
2163 
2164 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2165  CodeGenModule &CGM, const FunctionDecl *FD,
2166  std::string &FirstMissing) {
2167  // If there aren't any required features listed then go ahead and return.
2168  if (ReqFeatures.empty())
2169  return false;
2170 
2171  // Now build up the set of caller features and verify that all the required
2172  // features are there.
2173  llvm::StringMap<bool> CallerFeatureMap;
2174  CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
2175 
2176  // If we have at least one of the features in the feature list return
2177  // true, otherwise return false.
2178  return std::all_of(
2179  ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2180  SmallVector<StringRef, 1> OrFeatures;
2181  Feature.split(OrFeatures, '|');
2182  return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2183  if (!CallerFeatureMap.lookup(Feature)) {
2184  FirstMissing = Feature.str();
2185  return false;
2186  }
2187  return true;
2188  });
2189  });
2190 }
2191 
2192 // Emits an error if we don't have a valid set of target features for the
2193 // called function.
2195  const FunctionDecl *TargetDecl) {
2196  return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2197 }
2198 
2199 // Emits an error if we don't have a valid set of target features for the
2200 // called function.
2202  const FunctionDecl *TargetDecl) {
2203  // Early exit if this is an indirect call.
2204  if (!TargetDecl)
2205  return;
2206 
2207  // Get the current enclosing function if it exists. If it doesn't
2208  // we can't check the target features anyhow.
2209  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
2210  if (!FD)
2211  return;
2212 
2213  // Grab the required features for the call. For a builtin this is listed in
2214  // the td file with the default cpu, for an always_inline function this is any
2215  // listed cpu and any listed features.
2216  unsigned BuiltinID = TargetDecl->getBuiltinID();
2217  std::string MissingFeature;
2218  if (BuiltinID) {
2219  SmallVector<StringRef, 1> ReqFeatures;
2220  const char *FeatureList =
2222  // Return if the builtin doesn't have any required features.
2223  if (!FeatureList || StringRef(FeatureList) == "")
2224  return;
2225  StringRef(FeatureList).split(ReqFeatures, ',');
2226  if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2227  CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2228  << TargetDecl->getDeclName()
2230 
2231  } else if (TargetDecl->hasAttr<TargetAttr>() ||
2232  TargetDecl->hasAttr<CPUSpecificAttr>()) {
2233  // Get the required features for the callee.
2234 
2235  const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2236  TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2237 
2238  SmallVector<StringRef, 1> ReqFeatures;
2239  llvm::StringMap<bool> CalleeFeatureMap;
2240  CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2241 
2242  for (const auto &F : ParsedAttr.Features) {
2243  if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2244  ReqFeatures.push_back(StringRef(F).substr(1));
2245  }
2246 
2247  for (const auto &F : CalleeFeatureMap) {
2248  // Only positive features are "required".
2249  if (F.getValue())
2250  ReqFeatures.push_back(F.getKey());
2251  }
2252  if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2253  CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2254  << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2255  }
2256 }
2257 
2258 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2259  if (!CGM.getCodeGenOpts().SanitizeStats)
2260  return;
2261 
2262  llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2263  IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2264  CGM.getSanStats().create(IRB, SSK);
2265 }
2266 
2267 llvm::Value *
2268 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2269  llvm::Value *Condition = nullptr;
2270 
2271  if (!RO.Conditions.Architecture.empty())
2272  Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2273 
2274  if (!RO.Conditions.Features.empty()) {
2275  llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2276  Condition =
2277  Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2278  }
2279  return Condition;
2280 }
2281 
2283  llvm::Function *Resolver,
2285  llvm::Function *FuncToReturn,
2286  bool SupportsIFunc) {
2287  if (SupportsIFunc) {
2288  Builder.CreateRet(FuncToReturn);
2289  return;
2290  }
2291 
2293  llvm::for_each(Resolver->args(),
2294  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2295 
2296  llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2297  Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2298 
2299  if (Resolver->getReturnType()->isVoidTy())
2300  Builder.CreateRetVoid();
2301  else
2302  Builder.CreateRet(Result);
2303 }
2304 
2306  llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2307  assert((getContext().getTargetInfo().getTriple().getArch() ==
2308  llvm::Triple::x86 ||
2309  getContext().getTargetInfo().getTriple().getArch() ==
2310  llvm::Triple::x86_64) &&
2311  "Only implemented for x86 targets");
2312 
2313  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2314 
2315  // Main function's basic block.
2316  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2317  Builder.SetInsertPoint(CurBlock);
2318  EmitX86CpuInit();
2319 
2320  for (const MultiVersionResolverOption &RO : Options) {
2321  Builder.SetInsertPoint(CurBlock);
2322  llvm::Value *Condition = FormResolverCondition(RO);
2323 
2324  // The 'default' or 'generic' case.
2325  if (!Condition) {
2326  assert(&RO == Options.end() - 1 &&
2327  "Default or Generic case must be last");
2329  SupportsIFunc);
2330  return;
2331  }
2332 
2333  llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2334  CGBuilderTy RetBuilder(*this, RetBlock);
2335  CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2336  SupportsIFunc);
2337  CurBlock = createBasicBlock("resolver_else", Resolver);
2338  Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2339  }
2340 
2341  // If no generic/default, emit an unreachable.
2342  Builder.SetInsertPoint(CurBlock);
2343  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2344  TrapCall->setDoesNotReturn();
2345  TrapCall->setDoesNotThrow();
2346  Builder.CreateUnreachable();
2347  Builder.ClearInsertionPoint();
2348 }
2349 
2350 // Loc - where the diagnostic will point, where in the source code this
2351 // alignment has failed.
2352 // SecondaryLoc - if present (will be present if sufficiently different from
2353 // Loc), the diagnostic will additionally point a "Note:" to this location.
2354 // It should be the location where the __attribute__((assume_aligned))
2355 // was written e.g.
2357  llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2358  SourceLocation SecondaryLoc, llvm::Value *Alignment,
2359  llvm::Value *OffsetValue, llvm::Value *TheCheck,
2360  llvm::Instruction *Assumption) {
2361  assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2362  cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2363  llvm::Intrinsic::getDeclaration(
2364  Builder.GetInsertBlock()->getParent()->getParent(),
2365  llvm::Intrinsic::assume) &&
2366  "Assumption should be a call to llvm.assume().");
2367  assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2368  "Assumption should be the last instruction of the basic block, "
2369  "since the basic block is still being generated.");
2370 
2371  if (!SanOpts.has(SanitizerKind::Alignment))
2372  return;
2373 
2374  // Don't check pointers to volatile data. The behavior here is implementation-
2375  // defined.
2376  if (Ty->getPointeeType().isVolatileQualified())
2377  return;
2378 
2379  // We need to temorairly remove the assumption so we can insert the
2380  // sanitizer check before it, else the check will be dropped by optimizations.
2381  Assumption->removeFromParent();
2382 
2383  {
2384  SanitizerScope SanScope(this);
2385 
2386  if (!OffsetValue)
2387  OffsetValue = Builder.getInt1(0); // no offset.
2388 
2389  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2390  EmitCheckSourceLocation(SecondaryLoc),
2392  llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2393  EmitCheckValue(Alignment),
2394  EmitCheckValue(OffsetValue)};
2395  EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2396  SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2397  }
2398 
2399  // We are now in the (new, empty) "cont" basic block.
2400  // Reintroduce the assumption.
2401  Builder.Insert(Assumption);
2402  // FIXME: Assumption still has it's original basic block as it's Parent.
2403 }
2404 
2406  if (CGDebugInfo *DI = getDebugInfo())
2407  return DI->SourceLocToDebugLoc(Location);
2408 
2409  return llvm::DebugLoc();
2410 }
const llvm::DataLayout & getDataLayout() const
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:178
Defines the clang::ASTContext interface.
Represents a function declaration or definition.
Definition: Decl.h:1748
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
Other implicit parameter.
Definition: Decl.h:1524
no exception specification
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T -> getSizeExpr()))
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2557
CanQualType VoidPtrTy
Definition: ASTContext.h:1030
A (possibly-)qualified type.
Definition: Type.h:643
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler...
Definition: CGExpr.cpp:2785
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EncodeAddrForUseInPrologue(llvm::Function *F, llvm::Constant *Addr)
Encode an address into a form suitable for use in a function prologue.
XRayInstrMask Mask
Definition: XRayInstr.h:64
Specialize PointerLikeTypeTraits to allow LazyGenerationalUpdatePtr to be placed into a PointerUnion...
Definition: Dominators.h:30
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
CharUnits getClassPointerAlignment(const CXXRecordDecl *CD)
Returns the assumed alignment of an opaque pointer to the given class.
Definition: CGClass.cpp:36
Stmt - This represents one statement.
Definition: Stmt.h:66
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3375
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:184
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:505
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program...
Definition: Decl.cpp:2837
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
QualType getThisType() const
Return the type of the this pointer.
Definition: DeclCXX.cpp:2275
Checking the &#39;this&#39; pointer for a constructor call.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1037
constexpr XRayInstrMask Typed
Definition: XRayInstr.h:40
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:88
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
Definition: CGClass.cpp:2920
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask >> Checked, SanitizerHandler Check, ArrayRef< llvm::Constant *> StaticArgs, ArrayRef< llvm::Value *> DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition: CGExpr.cpp:2999
The base class of the type hierarchy.
Definition: Type.h:1418
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1297
bool hasValue() const
Definition: APValue.h:315
bool usesSEHTry() const
Indicates the function uses __try.
Definition: Decl.h:2141
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2832
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
static bool hasRequiredFeatures(const SmallVectorImpl< StringRef > &ReqFeatures, CodeGenModule &CGM, const FunctionDecl *FD, std::string &FirstMissing)
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:693
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, StringRef Category=StringRef()) const
Imbue XRay attributes to a function, applying the always/never attribute lists in the process...
constexpr XRayInstrMask Function
Definition: XRayInstr.h:38
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2574
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant, or if it does but contains a label, return false.
QualType getElementType() const
Definition: Type.h:2867
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:344
This file provides some common utility functions for processing Lambda related AST Constructs...
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:812
QualType getReturnType() const
Definition: Decl.h:2329
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6821
Extra information about a function prototype.
Definition: Type.h:3787
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:54
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope, by being a (possibly-labelled) DeclStmt.
DiagnosticsEngine & getDiags() const
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified...
Definition: CGExpr.cpp:3296
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::Value * getPointer() const
Definition: Address.h:37
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information...
Definition: TargetInfo.h:168
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
Defines the Objective-C statement AST node classes.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1049
bool supportsIFunc() const
Identify whether this target supports IFuncs.
Definition: TargetInfo.h:1135
Represents a parameter to a function.
Definition: Decl.h:1564
static void destroyBlockInfos(CGBlockInfo *info)
Destroy a chain of block layouts.
Definition: CGBlocks.cpp:890
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
Definition: CGClass.cpp:1530
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition: CGStmt.cpp:509
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:297
void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
One of these records is kept for each identifier that is lexed.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
CGBlockInfo * FirstBlockInfo
FirstBlockInfo - The head of a singly-linked-list of block layouts.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2793
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
Address getAddress() const
Definition: CGValue.h:326
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Given that we are currently emitting a lambda, emit an l-value for one of its members.
Definition: CGExpr.cpp:3892
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:158
field_range fields() const
Definition: Decl.h:3817
Represents a member of a struct/union/class.
Definition: Decl.h:2607
SanitizerMask Mask
Bitmask of enabled sanitizers.
Definition: Sanitizers.h:173
__DEVICE__ int max(int __a, int __b)
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:49
void InitTempAlloca(Address Alloca, llvm::Value *Value)
InitTempAlloca - Provide an initial value for the given alloca which will be observable at all locati...
Definition: CGExpr.cpp:126
void disableSanitizerForInstruction(llvm::Instruction *I)
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2289
Address CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition: CGExpr.cpp:134
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
static bool hasScalarEvaluationKind(QualType T)
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition: Decl.cpp:2808
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:156
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:582
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2236
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:118
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
child_range children()
Definition: Stmt.cpp:212
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:108
unsigned getInAllocaFieldIndex() const
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3404
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:6200
The l-value was considered opaque, so the alignment was determined from a type, but that type was an ...
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:79
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
bool containsOnlyLifetimeMarkers(stable_iterator Old) const
Definition: CGCleanup.cpp:141
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition: DeclCXX.h:1198
Values of this type can never be null.
Expr * getSizeExpr() const
Definition: Type.h:3011
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:6130
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:182
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:759
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
bool isInstance() const
Definition: DeclCXX.h:2140
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
bool isAlignmentRequired(const Type *T) const
Determine if the alignment the type has was required using an alignment attribute.
llvm::SanitizerStatReport & getSanStats()
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition: ASTLambda.h:27
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3077
Address NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args)=0
Emits a kernel launch stub.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition: CGExpr.cpp:2828
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2944
Checking the &#39;this&#39; pointer for a call to a non-static member function.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:4557
void InsertHelper(llvm::Instruction *I) const
Function called by the CodeGenFunction when an instruction is created.
Definition: CGLoopInfo.cpp:764
bool hasAttr() const
Definition: DeclBase.h:542
ConditionalOperator - The ?: ternary operator.
Definition: Expr.h:3702
CanQualType getReturnType() const
bool isValid() const
Definition: Address.h:35
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:57
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1310
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1636
const TargetCodeGenInfo & getTargetCodeGenInfo()
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any...
Definition: Decl.cpp:3383
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
llvm::Value * DecodeAddrUsedInPrologue(llvm::Value *F, llvm::Value *EncodedAddr)
Decode an address used in a function prologue, encoded by EncodeAddrForUseInPrologue.
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition: CGExpr.cpp:119
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:106
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Constant * EmitAnnotationUnit(SourceLocation Loc)
Emit the annotation&#39;s translation unit.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
This represents one expression.
Definition: Expr.h:108
Emit only debug info necessary for generating line number tables (-gline-tables-only).
bool isDefaulted() const
Whether this function is defaulted per C++0x.
Definition: Decl.h:2048
static Address invalid()
Definition: Address.h:34
bool isObjCRetainableType() const
Definition: Type.cpp:4035
#define V(N, I)
Definition: ASTContext.h:2898
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements, of a variable length array type, plus that largest non-variably-sized element type.
const char * getRequiredFeatures(unsigned ID) const
Definition: Builtins.h:210
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:62
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
llvm::LLVMContext & getLLVMContext()
llvm::BasicBlock * GetIndirectGotoBlock()
void GenOpenCLArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
Definition: Type.cpp:1875
QualType getType() const
Definition: Expr.h:137
void EmitConstructorBody(FunctionArgList &Args)
EmitConstructorBody - Emits the body of the current constructor.
Definition: CGClass.cpp:818
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
Definition: CGExpr.cpp:653
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:196
llvm::Constant * EmitAnnotationString(StringRef Str)
Emit an annotation string.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:296
SourceLocation getEnd() const
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI)
Get a function type and produce the equivalent function type with the specified exception specificati...
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:2016
QualType getFunctionType(QualType ResultTy, ArrayRef< QualType > Args, const FunctionProtoType::ExtProtoInfo &EPI) const
Return a normal function type with a typed argument list.
Definition: ASTContext.h:1373
ValueDecl * getDecl()
Definition: Expr.h:1217
const LangOptions & getLangOpts() const
ASTContext & getContext() const
virtual void startNewFunction()
Definition: Mangle.h:75
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:264
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:40
The l-value was considered opaque, so the alignment was determined from a type.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value **> ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Definition: CGCleanup.cpp:417
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:161
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
QualType getCanonicalType() const
Definition: Type.h:6169
Encodes a location in the source.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
QualType getSingleStepDesugaredType(const ASTContext &Context) const
Return the specified type with one level of "sugar" removed from the type.
Definition: Type.h:956
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:164
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2109
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:295
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
QualType getElementType() const
Definition: Type.h:3223
const Decl * getDecl() const
Definition: GlobalDecl.h:77
Represents the declaration of a label.
Definition: Decl.h:468
ParsedAttr - Represents a syntactic attribute.
Definition: ParsedAttr.h:116
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:711
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
This forwards to CodeGenFunction::InsertHelper.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
TargetAttr::ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD)
Parses the target attributes passed in, and returns only the ones that are valid feature names...
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2114
void EmitStmt(const Stmt *S, ArrayRef< const Attr *> Attrs=None)
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:44
SanitizerSet SanOpts
Sanitizers enabled for this function.
constexpr XRayInstrMask Custom
Definition: XRayInstr.h:39
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
An aligned address.
Definition: Address.h:24
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
XRayInstrSet XRayInstrumentationBundle
Set of XRay instrumentation kinds to emit.
TypeClass getTypeClass() const
Definition: Type.h:1824
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:96
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::Constant * EmitAnnotationLineNo(SourceLocation L)
Emit the annotation line number.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition: Decl.h:2066
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression, because a __builtin_ms_va_list is a pointer to a char.
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, GlobalDecl GD)
const CGFunctionInfo * CurFnInfo
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:215
This is an IRBuilder insertion helper that forwards to CodeGenFunction::InsertHelper, which adds necessary metadata to instructions.
Definition: CGBuilder.h:25
Address EmitVAListRef(const Expr *E)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:358
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn&#39;t support the specified stmt yet.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location)
Emit an annotation call (intrinsic).
Dataflow Directional Tag Classes.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:580
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
void Init(const Stmt *Body)
Clear the object and pre-process for the given statement, usually function body statement.
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:90
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
FunctionDecl * getTemplateInstantiationPattern() const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition: Decl.cpp:3494
StmtClass getStmtClass() const
Definition: Stmt.h:1087
void EmitFunctionBody(const Stmt *Body)
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2237
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks, lambdas, etc.
Definition: DeclBase.cpp:991
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition: Decl.cpp:2845
void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params)
Build a parameter variable suitable for &#39;this&#39;.
Definition: CGCXXABI.cpp:121
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
bool has(XRayInstrMask K) const
Definition: XRayInstr.h:46
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:107
llvm::Module & getModule() const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
JumpDest ReturnBlock
ReturnBlock - Unified return block.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4426
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
CodeGenTypes & getTypes() const
CharUnits getIndirectAlign() const
T * getAttr() const
Definition: DeclBase.h:538
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:51
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
ExtVectorType - Extended vector type.
Definition: Type.h:3307
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:454
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Optional< NullabilityKind > getNullability(const ASTContext &context) const
Determine the nullability of the given type.
Definition: Type.cpp:3803
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat]...
Definition: APValue.h:76
void getCaptureFields(llvm::DenseMap< const VarDecl *, FieldDecl *> &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition: DeclCXX.cpp:1417
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:2062
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:524
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer...
void unprotectFromPeepholes(PeepholeProtection protection)
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn&#39;t support the specified stmt yet...
bool hasUnaligned() const
Definition: Type.h:293
Represents a C++ struct/union/class.
Definition: DeclCXX.h:300
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
Definition: CGStmt.cpp:474
bool isVoidType() const
Definition: Type.h:6613
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2222
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6157
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
LambdaCaptureDefault getLambdaCaptureDefault() const
Definition: DeclCXX.h:1230
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1244
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:571
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1773
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:152
Defines the clang::TargetInfo interface.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2516
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
CGCXXABI & getCXXABI() const
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2435
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1141
static bool shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, const ASTContext &Context)
void EmitDestructorBody(FunctionArgList &Args)
EmitDestructorBody - Emits the body of the current destructor.
Definition: CGClass.cpp:1417
bool isPointerType() const
Definition: Type.h:6354
This structure provides a set of types that are commonly used during IR emission. ...
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
struct clang::CodeGen::CodeGenFunction::MultiVersionResolverOption::Conds Conditions
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:2866
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition: CGStmt.cpp:380
void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
QualType getType() const
Definition: Decl.h:647
A trivial tuple used to represent a source range.
LValue - This represents an lvalue references.
Definition: CGValue.h:166
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1522
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:2991
SanitizerMetadata * getSanitizerMetadata()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
APSInt & getInt()
Definition: APValue.h:336
const LangOptions & getLangOpts() const
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:163
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it&#39;s a VLA, and drill down to the base elem...
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:366
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
SourceLocation getBegin() const
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
Defines enum values for all the target-independent builtin functions.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool isScalar() const
Definition: CGValue.h:51
Attr - This represents one attribute.
Definition: Attr.h:43
SourceLocation getLocation() const
Definition: DeclBase.h:429
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2956
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.