clang  9.0.0svn
CodeGenFunction.cpp
Go to the documentation of this file.
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCleanup.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/StmtCXX.h"
28 #include "clang/AST/StmtObjC.h"
29 #include "clang/Basic/Builtins.h"
31 #include "clang/Basic/TargetInfo.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
44 /// markers.
45 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
46  const LangOptions &LangOpts) {
47  if (CGOpts.DisableLifetimeMarkers)
48  return false;
49 
50  // Disable lifetime markers in msan builds.
51  // FIXME: Remove this when msan works with lifetime markers.
52  if (LangOpts.Sanitize.has(SanitizerKind::Memory))
53  return false;
54 
55  // Asan uses markers for use-after-scope checks.
56  if (CGOpts.SanitizeAddressUseAfterScope)
57  return true;
58 
59  // For now, only in optimized builds.
60  return CGOpts.OptimizationLevel != 0;
61 }
62 
63 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
64  : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
65  Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
66  CGBuilderInserterTy(this)),
67  SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
68  PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
69  CGM.getCodeGenOpts(), CGM.getLangOpts())) {
70  if (!suppressNewContext)
72 
73  llvm::FastMathFlags FMF;
74  if (CGM.getLangOpts().FastMath)
75  FMF.setFast();
76  if (CGM.getLangOpts().FiniteMathOnly) {
77  FMF.setNoNaNs();
78  FMF.setNoInfs();
79  }
80  if (CGM.getCodeGenOpts().NoNaNsFPMath) {
81  FMF.setNoNaNs();
82  }
83  if (CGM.getCodeGenOpts().NoSignedZeros) {
84  FMF.setNoSignedZeros();
85  }
86  if (CGM.getCodeGenOpts().ReciprocalMath) {
87  FMF.setAllowReciprocal();
88  }
89  if (CGM.getCodeGenOpts().Reassociate) {
90  FMF.setAllowReassoc();
91  }
92  Builder.setFastMathFlags(FMF);
93 }
94 
96  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
97 
98  // If there are any unclaimed block infos, go ahead and destroy them
99  // now. This can happen if IR-gen gets clever and skips evaluating
100  // something.
101  if (FirstBlockInfo)
103 
104  if (getLangOpts().OpenMP && CurFn)
105  CGM.getOpenMPRuntime().functionFinished(*this);
106 }
107 
109  LValueBaseInfo *BaseInfo,
110  TBAAAccessInfo *TBAAInfo) {
111  return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
112  /* forPointeeType= */ true);
113 }
114 
116  LValueBaseInfo *BaseInfo,
117  TBAAAccessInfo *TBAAInfo,
118  bool forPointeeType) {
119  if (TBAAInfo)
120  *TBAAInfo = CGM.getTBAAAccessInfo(T);
121 
122  // Honor alignment typedef attributes even on incomplete types.
123  // We also honor them straight for C++ class types, even as pointees;
124  // there's an expressivity gap here.
125  if (auto TT = T->getAs<TypedefType>()) {
126  if (auto Align = TT->getDecl()->getMaxAlignment()) {
127  if (BaseInfo)
129  return getContext().toCharUnitsFromBits(Align);
130  }
131  }
132 
133  if (BaseInfo)
135 
136  CharUnits Alignment;
137  if (T->isIncompleteType()) {
138  Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
139  } else {
140  // For C++ class pointees, we don't know whether we're pointing at a
141  // base or a complete object, so we generally need to use the
142  // non-virtual alignment.
143  const CXXRecordDecl *RD;
144  if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
145  Alignment = CGM.getClassPointerAlignment(RD);
146  } else {
147  Alignment = getContext().getTypeAlignInChars(T);
148  if (T.getQualifiers().hasUnaligned())
149  Alignment = CharUnits::One();
150  }
151 
152  // Cap to the global maximum type alignment unless the alignment
153  // was somehow explicit on the type.
154  if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
155  if (Alignment.getQuantity() > MaxAlign &&
157  Alignment = CharUnits::fromQuantity(MaxAlign);
158  }
159  }
160  return Alignment;
161 }
162 
164  LValueBaseInfo BaseInfo;
165  TBAAAccessInfo TBAAInfo;
166  CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
167  return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
168  TBAAInfo);
169 }
170 
171 /// Given a value of type T* that may not be to a complete object,
172 /// construct an l-value with the natural pointee alignment of T.
173 LValue
175  LValueBaseInfo BaseInfo;
176  TBAAAccessInfo TBAAInfo;
177  CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
178  /* forPointeeType= */ true);
179  return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
180 }
181 
182 
184  return CGM.getTypes().ConvertTypeForMem(T);
185 }
186 
188  return CGM.getTypes().ConvertType(T);
189 }
190 
192  type = type.getCanonicalType();
193  while (true) {
194  switch (type->getTypeClass()) {
195 #define TYPE(name, parent)
196 #define ABSTRACT_TYPE(name, parent)
197 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
198 #define DEPENDENT_TYPE(name, parent) case Type::name:
199 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
200 #include "clang/AST/TypeNodes.def"
201  llvm_unreachable("non-canonical or dependent type in IR-generation");
202 
203  case Type::Auto:
204  case Type::DeducedTemplateSpecialization:
205  llvm_unreachable("undeduced type in IR-generation");
206 
207  // Various scalar types.
208  case Type::Builtin:
209  case Type::Pointer:
210  case Type::BlockPointer:
211  case Type::LValueReference:
212  case Type::RValueReference:
213  case Type::MemberPointer:
214  case Type::Vector:
215  case Type::ExtVector:
216  case Type::FunctionProto:
217  case Type::FunctionNoProto:
218  case Type::Enum:
219  case Type::ObjCObjectPointer:
220  case Type::Pipe:
221  return TEK_Scalar;
222 
223  // Complexes.
224  case Type::Complex:
225  return TEK_Complex;
226 
227  // Arrays, records, and Objective-C objects.
228  case Type::ConstantArray:
229  case Type::IncompleteArray:
230  case Type::VariableArray:
231  case Type::Record:
232  case Type::ObjCObject:
233  case Type::ObjCInterface:
234  return TEK_Aggregate;
235 
236  // We operate on atomic values according to their underlying type.
237  case Type::Atomic:
238  type = cast<AtomicType>(type)->getValueType();
239  continue;
240  }
241  llvm_unreachable("unknown type kind!");
242  }
243 }
244 
246  // For cleanliness, we try to avoid emitting the return block for
247  // simple cases.
248  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
249 
250  if (CurBB) {
251  assert(!CurBB->getTerminator() && "Unexpected terminated block.");
252 
253  // We have a valid insert point, reuse it if it is empty or there are no
254  // explicit jumps to the return block.
255  if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
256  ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
257  delete ReturnBlock.getBlock();
258  ReturnBlock = JumpDest();
259  } else
261  return llvm::DebugLoc();
262  }
263 
264  // Otherwise, if the return block is the target of a single direct
265  // branch then we can just put the code in that block instead. This
266  // cleans up functions which started with a unified return block.
267  if (ReturnBlock.getBlock()->hasOneUse()) {
268  llvm::BranchInst *BI =
269  dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
270  if (BI && BI->isUnconditional() &&
271  BI->getSuccessor(0) == ReturnBlock.getBlock()) {
272  // Record/return the DebugLoc of the simple 'return' expression to be used
273  // later by the actual 'ret' instruction.
274  llvm::DebugLoc Loc = BI->getDebugLoc();
275  Builder.SetInsertPoint(BI->getParent());
276  BI->eraseFromParent();
277  delete ReturnBlock.getBlock();
278  ReturnBlock = JumpDest();
279  return Loc;
280  }
281  }
282 
283  // FIXME: We are at an unreachable point, there is no reason to emit the block
284  // unless it has uses. However, we still need a place to put the debug
285  // region.end for now.
286 
288  return llvm::DebugLoc();
289 }
290 
291 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
292  if (!BB) return;
293  if (!BB->use_empty())
294  return CGF.CurFn->getBasicBlockList().push_back(BB);
295  delete BB;
296 }
297 
299  assert(BreakContinueStack.empty() &&
300  "mismatched push/pop in break/continue stack!");
301 
302  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
303  && NumSimpleReturnExprs == NumReturnExprs
304  && ReturnBlock.getBlock()->use_empty();
305  // Usually the return expression is evaluated before the cleanup
306  // code. If the function contains only a simple return statement,
307  // such as a constant, the location before the cleanup code becomes
308  // the last useful breakpoint in the function, because the simple
309  // return expression will be evaluated after the cleanup code. To be
310  // safe, set the debug location for cleanup code to the location of
311  // the return statement. Otherwise the cleanup code should be at the
312  // end of the function's lexical scope.
313  //
314  // If there are multiple branches to the return block, the branch
315  // instructions will get the location of the return statements and
316  // all will be fine.
317  if (CGDebugInfo *DI = getDebugInfo()) {
318  if (OnlySimpleReturnStmts)
319  DI->EmitLocation(Builder, LastStopPoint);
320  else
321  DI->EmitLocation(Builder, EndLoc);
322  }
323 
324  // Pop any cleanups that might have been associated with the
325  // parameters. Do this in whatever block we're currently in; it's
326  // important to do this before we enter the return block or return
327  // edges will be *really* confused.
328  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
329  bool HasOnlyLifetimeMarkers =
331  bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
332  if (HasCleanups) {
333  // Make sure the line table doesn't jump back into the body for
334  // the ret after it's been at EndLoc.
335  if (CGDebugInfo *DI = getDebugInfo())
336  if (OnlySimpleReturnStmts)
337  DI->EmitLocation(Builder, EndLoc);
338 
340  }
341 
342  // Emit function epilog (to return).
343  llvm::DebugLoc Loc = EmitReturnBlock();
344 
345  if (ShouldInstrumentFunction()) {
346  if (CGM.getCodeGenOpts().InstrumentFunctions)
347  CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
348  if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
349  CurFn->addFnAttr("instrument-function-exit-inlined",
350  "__cyg_profile_func_exit");
351  }
352 
353  // Emit debug descriptor for function end.
354  if (CGDebugInfo *DI = getDebugInfo())
355  DI->EmitFunctionEnd(Builder, CurFn);
356 
357  // Reset the debug location to that of the simple 'return' expression, if any
358  // rather than that of the end of the function's scope '}'.
359  ApplyDebugLocation AL(*this, Loc);
360  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
362 
363  assert(EHStack.empty() &&
364  "did not remove all scopes from cleanup stack!");
365 
366  // If someone did an indirect goto, emit the indirect goto block at the end of
367  // the function.
368  if (IndirectBranch) {
369  EmitBlock(IndirectBranch->getParent());
370  Builder.ClearInsertionPoint();
371  }
372 
373  // If some of our locals escaped, insert a call to llvm.localescape in the
374  // entry block.
375  if (!EscapedLocals.empty()) {
376  // Invert the map from local to index into a simple vector. There should be
377  // no holes.
379  EscapeArgs.resize(EscapedLocals.size());
380  for (auto &Pair : EscapedLocals)
381  EscapeArgs[Pair.second] = Pair.first;
382  llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
383  &CGM.getModule(), llvm::Intrinsic::localescape);
384  CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
385  }
386 
387  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
388  llvm::Instruction *Ptr = AllocaInsertPt;
389  AllocaInsertPt = nullptr;
390  Ptr->eraseFromParent();
391 
392  // If someone took the address of a label but never did an indirect goto, we
393  // made a zero entry PHI node, which is illegal, zap it now.
394  if (IndirectBranch) {
395  llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
396  if (PN->getNumIncomingValues() == 0) {
397  PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
398  PN->eraseFromParent();
399  }
400  }
401 
402  EmitIfUsed(*this, EHResumeBlock);
403  EmitIfUsed(*this, TerminateLandingPad);
404  EmitIfUsed(*this, TerminateHandler);
405  EmitIfUsed(*this, UnreachableBlock);
406 
407  for (const auto &FuncletAndParent : TerminateFunclets)
408  EmitIfUsed(*this, FuncletAndParent.second);
409 
410  if (CGM.getCodeGenOpts().EmitDeclMetadata)
411  EmitDeclMetadata();
412 
413  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
414  I = DeferredReplacements.begin(),
415  E = DeferredReplacements.end();
416  I != E; ++I) {
417  I->first->replaceAllUsesWith(I->second);
418  I->first->eraseFromParent();
419  }
420 
421  // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
422  // PHIs if the current function is a coroutine. We don't do it for all
423  // functions as it may result in slight increase in numbers of instructions
424  // if compiled with no optimizations. We do it for coroutine as the lifetime
425  // of CleanupDestSlot alloca make correct coroutine frame building very
426  // difficult.
428  llvm::DominatorTree DT(*CurFn);
429  llvm::PromoteMemToReg(
430  cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
432  }
433 
434  // Scan function arguments for vector width.
435  for (llvm::Argument &A : CurFn->args())
436  if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
437  LargestVectorWidth = std::max(LargestVectorWidth,
438  VT->getPrimitiveSizeInBits());
439 
440  // Update vector width based on return type.
441  if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
442  LargestVectorWidth = std::max(LargestVectorWidth,
443  VT->getPrimitiveSizeInBits());
444 
445  // Add the required-vector-width attribute. This contains the max width from:
446  // 1. min-vector-width attribute used in the source program.
447  // 2. Any builtins used that have a vector width specified.
448  // 3. Values passed in and out of inline assembly.
449  // 4. Width of vector arguments and return types for this function.
450  // 5. Width of vector aguments and return types for functions called by this
451  // function.
452  CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
453 
454  // If we generated an unreachable return block, delete it now.
455  if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
456  Builder.ClearInsertionPoint();
457  ReturnBlock.getBlock()->eraseFromParent();
458  }
459  if (ReturnValue.isValid()) {
460  auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
461  if (RetAlloca && RetAlloca->use_empty()) {
462  RetAlloca->eraseFromParent();
464  }
465  }
466 }
467 
468 /// ShouldInstrumentFunction - Return true if the current function should be
469 /// instrumented with __cyg_profile_func_* calls
471  if (!CGM.getCodeGenOpts().InstrumentFunctions &&
472  !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
473  !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
474  return false;
475  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
476  return false;
477  return true;
478 }
479 
480 /// ShouldXRayInstrument - Return true if the current function should be
481 /// instrumented with XRay nop sleds.
483  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
484 }
485 
486 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
487 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
489  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
490  (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
493 }
494 
496  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
497  (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
500 }
501 
502 llvm::Constant *
504  llvm::Constant *Addr) {
505  // Addresses stored in prologue data can't require run-time fixups and must
506  // be PC-relative. Run-time fixups are undesirable because they necessitate
507  // writable text segments, which are unsafe. And absolute addresses are
508  // undesirable because they break PIE mode.
509 
510  // Add a layer of indirection through a private global. Taking its address
511  // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
512  auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
513  /*isConstant=*/true,
514  llvm::GlobalValue::PrivateLinkage, Addr);
515 
516  // Create a PC-relative address.
517  auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
518  auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
519  auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
520  return (IntPtrTy == Int32Ty)
521  ? PCRelAsInt
522  : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
523 }
524 
525 llvm::Value *
527  llvm::Value *EncodedAddr) {
528  // Reconstruct the address of the global.
529  auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
530  auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
531  auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
532  auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
533 
534  // Load the original pointer through the global.
535  return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
536  "decoded_addr");
537 }
538 
539 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
540  llvm::Function *Fn)
541 {
542  if (!FD->hasAttr<OpenCLKernelAttr>())
543  return;
544 
545  llvm::LLVMContext &Context = getLLVMContext();
546 
547  CGM.GenOpenCLArgMetadata(Fn, FD, this);
548 
549  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
550  QualType HintQTy = A->getTypeHint();
551  const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
552  bool IsSignedInteger =
553  HintQTy->isSignedIntegerType() ||
554  (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
555  llvm::Metadata *AttrMDArgs[] = {
556  llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
557  CGM.getTypes().ConvertType(A->getTypeHint()))),
558  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
559  llvm::IntegerType::get(Context, 32),
560  llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
561  Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
562  }
563 
564  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
565  llvm::Metadata *AttrMDArgs[] = {
566  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
567  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
568  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
569  Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
570  }
571 
572  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
573  llvm::Metadata *AttrMDArgs[] = {
574  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
575  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
576  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
577  Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
578  }
579 
580  if (const OpenCLIntelReqdSubGroupSizeAttr *A =
581  FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
582  llvm::Metadata *AttrMDArgs[] = {
583  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
584  Fn->setMetadata("intel_reqd_sub_group_size",
585  llvm::MDNode::get(Context, AttrMDArgs));
586  }
587 }
588 
589 /// Determine whether the function F ends with a return stmt.
590 static bool endsWithReturn(const Decl* F) {
591  const Stmt *Body = nullptr;
592  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
593  Body = FD->getBody();
594  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
595  Body = OMD->getBody();
596 
597  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
598  auto LastStmt = CS->body_rbegin();
599  if (LastStmt != CS->body_rend())
600  return isa<ReturnStmt>(*LastStmt);
601  }
602  return false;
603 }
604 
606  if (SanOpts.has(SanitizerKind::Thread)) {
607  Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
608  Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
609  }
610 }
611 
612 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
613  auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
614  if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
615  !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
616  (MD->getNumParams() != 1 && MD->getNumParams() != 2))
617  return false;
618 
619  if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
620  return false;
621 
622  if (MD->getNumParams() == 2) {
623  auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
624  if (!PT || !PT->isVoidPointerType() ||
625  !PT->getPointeeType().isConstQualified())
626  return false;
627  }
628 
629  return true;
630 }
631 
632 /// Return the UBSan prologue signature for \p FD if one is available.
633 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
634  const FunctionDecl *FD) {
635  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
636  if (!MD->isStatic())
637  return nullptr;
639 }
640 
642  QualType RetTy,
643  llvm::Function *Fn,
644  const CGFunctionInfo &FnInfo,
645  const FunctionArgList &Args,
646  SourceLocation Loc,
647  SourceLocation StartLoc) {
648  assert(!CurFn &&
649  "Do not use a CodeGenFunction object for more than one function");
650 
651  const Decl *D = GD.getDecl();
652 
653  DidCallStackSave = false;
654  CurCodeDecl = D;
655  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
656  if (FD->usesSEHTry())
657  CurSEHParent = FD;
658  CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
659  FnRetTy = RetTy;
660  CurFn = Fn;
661  CurFnInfo = &FnInfo;
662  assert(CurFn->isDeclaration() && "Function already has body?");
663 
664  // If this function has been blacklisted for any of the enabled sanitizers,
665  // disable the sanitizer for the function.
666  do {
667 #define SANITIZER(NAME, ID) \
668  if (SanOpts.empty()) \
669  break; \
670  if (SanOpts.has(SanitizerKind::ID)) \
671  if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
672  SanOpts.set(SanitizerKind::ID, false);
673 
674 #include "clang/Basic/Sanitizers.def"
675 #undef SANITIZER
676  } while (0);
677 
678  if (D) {
679  // Apply the no_sanitize* attributes to SanOpts.
680  for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
681  SanitizerMask mask = Attr->getMask();
682  SanOpts.Mask &= ~mask;
683  if (mask & SanitizerKind::Address)
684  SanOpts.set(SanitizerKind::KernelAddress, false);
685  if (mask & SanitizerKind::KernelAddress)
686  SanOpts.set(SanitizerKind::Address, false);
687  if (mask & SanitizerKind::HWAddress)
688  SanOpts.set(SanitizerKind::KernelHWAddress, false);
689  if (mask & SanitizerKind::KernelHWAddress)
690  SanOpts.set(SanitizerKind::HWAddress, false);
691  }
692  }
693 
694  // Apply sanitizer attributes to the function.
695  if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
696  Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
697  if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
698  Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
699  if (SanOpts.has(SanitizerKind::Thread))
700  Fn->addFnAttr(llvm::Attribute::SanitizeThread);
701  if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
702  Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
703  if (SanOpts.has(SanitizerKind::SafeStack))
704  Fn->addFnAttr(llvm::Attribute::SafeStack);
705  if (SanOpts.has(SanitizerKind::ShadowCallStack))
706  Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
707 
708  // Apply fuzzing attribute to the function.
709  if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
710  Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
711 
712  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
713  // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
714  if (SanOpts.has(SanitizerKind::Thread)) {
715  if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
716  IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
717  if (OMD->getMethodFamily() == OMF_dealloc ||
718  OMD->getMethodFamily() == OMF_initialize ||
719  (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
721  }
722  }
723  }
724 
725  // Ignore unrelated casts in STL allocate() since the allocator must cast
726  // from void* to T* before object initialization completes. Don't match on the
727  // namespace because not all allocators are in std::
728  if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
730  SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
731  }
732 
733  // Apply xray attributes to the function (as a string, for now)
734  if (D) {
735  if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
738  if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
739  Fn->addFnAttr("function-instrument", "xray-always");
740  if (XRayAttr->neverXRayInstrument())
741  Fn->addFnAttr("function-instrument", "xray-never");
742  if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
744  Fn->addFnAttr("xray-log-args",
745  llvm::utostr(LogArgs->getArgumentCount()));
746  }
747  } else {
749  Fn->addFnAttr(
750  "xray-instruction-threshold",
751  llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
752  }
753  }
754 
755  // Add no-jump-tables value.
756  Fn->addFnAttr("no-jump-tables",
757  llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
758 
759  // Add profile-sample-accurate value.
760  if (CGM.getCodeGenOpts().ProfileSampleAccurate)
761  Fn->addFnAttr("profile-sample-accurate");
762 
763  if (getLangOpts().OpenCL) {
764  // Add metadata for a kernel function.
765  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
766  EmitOpenCLKernelMetadata(FD, Fn);
767  }
768 
769  // If we are checking function types, emit a function type signature as
770  // prologue data.
772  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
773  if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
774  // Remove any (C++17) exception specifications, to allow calling e.g. a
775  // noexcept function through a non-noexcept pointer.
776  auto ProtoTy =
778  EST_None);
779  llvm::Constant *FTRTTIConst =
780  CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
781  llvm::Constant *FTRTTIConstEncoded =
782  EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
783  llvm::Constant *PrologueStructElems[] = {PrologueSig,
784  FTRTTIConstEncoded};
785  llvm::Constant *PrologueStructConst =
786  llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
787  Fn->setPrologueData(PrologueStructConst);
788  }
789  }
790  }
791 
792  // If we're checking nullability, we need to know whether we can check the
793  // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
794  if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
797  if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
798  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
799  RetValNullabilityPrecondition =
800  llvm::ConstantInt::getTrue(getLLVMContext());
801  }
802  }
803 
804  // If we're in C++ mode and the function name is "main", it is guaranteed
805  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
806  // used within a program").
807  if (getLangOpts().CPlusPlus)
808  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
809  if (FD->isMain())
810  Fn->addFnAttr(llvm::Attribute::NoRecurse);
811 
812  // If a custom alignment is used, force realigning to this alignment on
813  // any main function which certainly will need it.
814  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
815  if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
816  CGM.getCodeGenOpts().StackAlignment)
817  Fn->addFnAttr("stackrealign");
818 
819  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
820 
821  // Create a marker to make it easy to insert allocas into the entryblock
822  // later. Don't create this with the builder, because we don't want it
823  // folded.
824  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
825  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
826 
828 
829  Builder.SetInsertPoint(EntryBB);
830 
831  // If we're checking the return value, allocate space for a pointer to a
832  // precise source location of the checked return statement.
833  if (requiresReturnValueCheck()) {
834  ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
835  InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
836  }
837 
838  // Emit subprogram debug descriptor.
839  if (CGDebugInfo *DI = getDebugInfo()) {
840  // Reconstruct the type from the argument list so that implicit parameters,
841  // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
842  // convention.
844  if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
845  if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
846  CC = SrcFnTy->getCallConv();
847  SmallVector<QualType, 16> ArgTypes;
848  for (const VarDecl *VD : Args)
849  ArgTypes.push_back(VD->getType());
851  RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
852  DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
853  Builder);
854  }
855 
856  if (ShouldInstrumentFunction()) {
857  if (CGM.getCodeGenOpts().InstrumentFunctions)
858  CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
859  if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
860  CurFn->addFnAttr("instrument-function-entry-inlined",
861  "__cyg_profile_func_enter");
862  if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
863  CurFn->addFnAttr("instrument-function-entry-inlined",
864  "__cyg_profile_func_enter_bare");
865  }
866 
867  // Since emitting the mcount call here impacts optimizations such as function
868  // inlining, we just add an attribute to insert a mcount call in backend.
869  // The attribute "counting-function" is set to mcount function name which is
870  // architecture dependent.
871  if (CGM.getCodeGenOpts().InstrumentForProfiling) {
872  // Calls to fentry/mcount should not be generated if function has
873  // the no_instrument_function attribute.
874  if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
875  if (CGM.getCodeGenOpts().CallFEntry)
876  Fn->addFnAttr("fentry-call", "true");
877  else {
878  Fn->addFnAttr("instrument-function-entry-inlined",
879  getTarget().getMCountName());
880  }
881  }
882  }
883 
884  if (RetTy->isVoidType()) {
885  // Void type; nothing to return.
887 
888  // Count the implicit return.
889  if (!endsWithReturn(D))
890  ++NumReturnExprs;
892  // Indirect return; emit returned value directly into sret slot.
893  // This reduces code size, and affects correctness in C++.
894  auto AI = CurFn->arg_begin();
896  ++AI;
900  // Load the sret pointer from the argument struct and return into that.
901  unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
902  llvm::Function::arg_iterator EI = CurFn->arg_end();
903  --EI;
904  llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
905  Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
907  } else {
908  ReturnValue = CreateIRTemp(RetTy, "retval");
909 
910  // Tell the epilog emitter to autorelease the result. We do this
911  // now so that various specialized functions can suppress it
912  // during their IR-generation.
913  if (getLangOpts().ObjCAutoRefCount &&
915  RetTy->isObjCRetainableType())
916  AutoreleaseResult = true;
917  }
918 
920 
922 
923  // Emit OpenMP specific initialization of the device functions.
924  if (getLangOpts().OpenMP && CurCodeDecl)
925  CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
926 
928 
929  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
931  const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
932  if (MD->getParent()->isLambda() &&
933  MD->getOverloadedOperator() == OO_Call) {
934  // We're in a lambda; figure out the captures.
938  // If the lambda captures the object referred to by '*this' - either by
939  // value or by reference, make sure CXXThisValue points to the correct
940  // object.
941 
942  // Get the lvalue for the field (which is a copy of the enclosing object
943  // or contains the address of the enclosing object).
946  // If the enclosing object was captured by value, just use its address.
947  CXXThisValue = ThisFieldLValue.getAddress().getPointer();
948  } else {
949  // Load the lvalue pointed to by the field, since '*this' was captured
950  // by reference.
951  CXXThisValue =
952  EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
953  }
954  }
955  for (auto *FD : MD->getParent()->fields()) {
956  if (FD->hasCapturedVLAType()) {
957  auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
959  auto VAT = FD->getCapturedVLAType();
960  VLASizeMap[VAT->getSizeExpr()] = ExprArg;
961  }
962  }
963  } else {
964  // Not in a lambda; just use 'this' from the method.
965  // FIXME: Should we generate a new load for each use of 'this'? The
966  // fast register allocator would be happier...
967  CXXThisValue = CXXABIThisValue;
968  }
969 
970  // Check the 'this' pointer once per function, if it's available.
971  if (CXXABIThisValue) {
972  SanitizerSet SkippedChecks;
973  SkippedChecks.set(SanitizerKind::ObjectSize, true);
974  QualType ThisTy = MD->getThisType();
975 
976  // If this is the call operator of a lambda with no capture-default, it
977  // may have a static invoker function, which may call this operator with
978  // a null 'this' pointer.
979  if (isLambdaCallOperator(MD) &&
981  SkippedChecks.set(SanitizerKind::Null, true);
982 
983  EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
984  : TCK_MemberCall,
985  Loc, CXXABIThisValue, ThisTy,
986  getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
987  SkippedChecks);
988  }
989  }
990 
991  // If any of the arguments have a variably modified type, make sure to
992  // emit the type size.
993  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
994  i != e; ++i) {
995  const VarDecl *VD = *i;
996 
997  // Dig out the type as written from ParmVarDecls; it's unclear whether
998  // the standard (C99 6.9.1p10) requires this, but we're following the
999  // precedent set by gcc.
1000  QualType Ty;
1001  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1002  Ty = PVD->getOriginalType();
1003  else
1004  Ty = VD->getType();
1005 
1006  if (Ty->isVariablyModifiedType())
1008  }
1009  // Emit a location at the end of the prologue.
1010  if (CGDebugInfo *DI = getDebugInfo())
1011  DI->EmitLocation(Builder, StartLoc);
1012 
1013  // TODO: Do we need to handle this in two places like we do with
1014  // target-features/target-cpu?
1015  if (CurFuncDecl)
1016  if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1017  LargestVectorWidth = VecWidth->getVectorWidth();
1018 }
1019 
1022  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1024  else
1025  EmitStmt(Body);
1026 }
1027 
1028 /// When instrumenting to collect profile data, the counts for some blocks
1029 /// such as switch cases need to not include the fall-through counts, so
1030 /// emit a branch around the instrumentation code. When not instrumenting,
1031 /// this just calls EmitBlock().
1033  const Stmt *S) {
1034  llvm::BasicBlock *SkipCountBB = nullptr;
1036  // When instrumenting for profiling, the fallthrough to certain
1037  // statements needs to skip over the instrumentation code so that we
1038  // get an accurate count.
1039  SkipCountBB = createBasicBlock("skipcount");
1040  EmitBranch(SkipCountBB);
1041  }
1042  EmitBlock(BB);
1043  uint64_t CurrentCount = getCurrentProfileCount();
1046  if (SkipCountBB)
1047  EmitBlock(SkipCountBB);
1048 }
1049 
1050 /// Tries to mark the given function nounwind based on the
1051 /// non-existence of any throwing calls within it. We believe this is
1052 /// lightweight enough to do at -O0.
1053 static void TryMarkNoThrow(llvm::Function *F) {
1054  // LLVM treats 'nounwind' on a function as part of the type, so we
1055  // can't do this on functions that can be overwritten.
1056  if (F->isInterposable()) return;
1057 
1058  for (llvm::BasicBlock &BB : *F)
1059  for (llvm::Instruction &I : BB)
1060  if (I.mayThrow())
1061  return;
1062 
1063  F->setDoesNotThrow();
1064 }
1065 
1067  FunctionArgList &Args) {
1068  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1069  QualType ResTy = FD->getReturnType();
1070 
1071  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1072  if (MD && MD->isInstance()) {
1073  if (CGM.getCXXABI().HasThisReturn(GD))
1074  ResTy = MD->getThisType();
1075  else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1076  ResTy = CGM.getContext().VoidPtrTy;
1077  CGM.getCXXABI().buildThisParam(*this, Args);
1078  }
1079 
1080  // The base version of an inheriting constructor whose constructed base is a
1081  // virtual base is not passed any arguments (because it doesn't actually call
1082  // the inherited constructor).
1083  bool PassedParams = true;
1084  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1085  if (auto Inherited = CD->getInheritedConstructor())
1086  PassedParams =
1087  getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1088 
1089  if (PassedParams) {
1090  for (auto *Param : FD->parameters()) {
1091  Args.push_back(Param);
1092  if (!Param->hasAttr<PassObjectSizeAttr>())
1093  continue;
1094 
1095  auto *Implicit = ImplicitParamDecl::Create(
1096  getContext(), Param->getDeclContext(), Param->getLocation(),
1097  /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1098  SizeArguments[Param] = Implicit;
1099  Args.push_back(Implicit);
1100  }
1101  }
1102 
1103  if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1104  CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1105 
1106  return ResTy;
1107 }
1108 
1109 static bool
1111  const ASTContext &Context) {
1112  QualType T = FD->getReturnType();
1113  // Avoid the optimization for functions that return a record type with a
1114  // trivial destructor or another trivially copyable type.
1115  if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1116  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1117  return !ClassDecl->hasTrivialDestructor();
1118  }
1119  return !T.isTriviallyCopyableType(Context);
1120 }
1121 
1122 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1123  const CGFunctionInfo &FnInfo) {
1124  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1125  CurGD = GD;
1126 
1127  FunctionArgList Args;
1128  QualType ResTy = BuildFunctionArgList(GD, Args);
1129 
1130  // Check if we should generate debug info for this function.
1131  if (FD->hasAttr<NoDebugAttr>())
1132  DebugInfo = nullptr; // disable debug info indefinitely for this function
1133 
1134  // The function might not have a body if we're generating thunks for a
1135  // function declaration.
1136  SourceRange BodyRange;
1137  if (Stmt *Body = FD->getBody())
1138  BodyRange = Body->getSourceRange();
1139  else
1140  BodyRange = FD->getLocation();
1141  CurEHLocation = BodyRange.getEnd();
1142 
1143  // Use the location of the start of the function to determine where
1144  // the function definition is located. By default use the location
1145  // of the declaration as the location for the subprogram. A function
1146  // may lack a declaration in the source code if it is created by code
1147  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1148  SourceLocation Loc = FD->getLocation();
1149 
1150  // If this is a function specialization then use the pattern body
1151  // as the location for the function.
1152  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1153  if (SpecDecl->hasBody(SpecDecl))
1154  Loc = SpecDecl->getLocation();
1155 
1156  Stmt *Body = FD->getBody();
1157 
1158  // Initialize helper which will detect jumps which can cause invalid lifetime
1159  // markers.
1160  if (Body && ShouldEmitLifetimeMarkers)
1161  Bypasses.Init(Body);
1162 
1163  // Emit the standard function prologue.
1164  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1165 
1166  // Generate the body of the function.
1167  PGO.assignRegionCounters(GD, CurFn);
1168  if (isa<CXXDestructorDecl>(FD))
1169  EmitDestructorBody(Args);
1170  else if (isa<CXXConstructorDecl>(FD))
1171  EmitConstructorBody(Args);
1172  else if (getLangOpts().CUDA &&
1173  !getLangOpts().CUDAIsDevice &&
1174  FD->hasAttr<CUDAGlobalAttr>())
1175  CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1176  else if (isa<CXXMethodDecl>(FD) &&
1177  cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1178  // The lambda static invoker function is special, because it forwards or
1179  // clones the body of the function call operator (but is actually static).
1180  EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1181  } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1182  (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1183  cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1184  // Implicit copy-assignment gets the same special treatment as implicit
1185  // copy-constructors.
1187  } else if (Body) {
1188  EmitFunctionBody(Body);
1189  } else
1190  llvm_unreachable("no definition for emitted function");
1191 
1192  // C++11 [stmt.return]p2:
1193  // Flowing off the end of a function [...] results in undefined behavior in
1194  // a value-returning function.
1195  // C11 6.9.1p12:
1196  // If the '}' that terminates a function is reached, and the value of the
1197  // function call is used by the caller, the behavior is undefined.
1199  !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1200  bool ShouldEmitUnreachable =
1201  CGM.getCodeGenOpts().StrictReturn ||
1203  if (SanOpts.has(SanitizerKind::Return)) {
1204  SanitizerScope SanScope(this);
1205  llvm::Value *IsFalse = Builder.getFalse();
1206  EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1207  SanitizerHandler::MissingReturn,
1208  EmitCheckSourceLocation(FD->getLocation()), None);
1209  } else if (ShouldEmitUnreachable) {
1210  if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1211  EmitTrapCall(llvm::Intrinsic::trap);
1212  }
1213  if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1214  Builder.CreateUnreachable();
1215  Builder.ClearInsertionPoint();
1216  }
1217  }
1218 
1219  // Emit the standard function epilogue.
1220  FinishFunction(BodyRange.getEnd());
1221 
1222  // If we haven't marked the function nothrow through other means, do
1223  // a quick pass now to see if we can.
1224  if (!CurFn->doesNotThrow())
1226 }
1227 
1228 /// ContainsLabel - Return true if the statement contains a label in it. If
1229 /// this statement is not executed normally, it not containing a label means
1230 /// that we can just remove the code.
1231 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1232  // Null statement, not a label!
1233  if (!S) return false;
1234 
1235  // If this is a label, we have to emit the code, consider something like:
1236  // if (0) { ... foo: bar(); } goto foo;
1237  //
1238  // TODO: If anyone cared, we could track __label__'s, since we know that you
1239  // can't jump to one from outside their declared region.
1240  if (isa<LabelStmt>(S))
1241  return true;
1242 
1243  // If this is a case/default statement, and we haven't seen a switch, we have
1244  // to emit the code.
1245  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1246  return true;
1247 
1248  // If this is a switch statement, we want to ignore cases below it.
1249  if (isa<SwitchStmt>(S))
1250  IgnoreCaseStmts = true;
1251 
1252  // Scan subexpressions for verboten labels.
1253  for (const Stmt *SubStmt : S->children())
1254  if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1255  return true;
1256 
1257  return false;
1258 }
1259 
1260 /// containsBreak - Return true if the statement contains a break out of it.
1261 /// If the statement (recursively) contains a switch or loop with a break
1262 /// inside of it, this is fine.
1264  // Null statement, not a label!
1265  if (!S) return false;
1266 
1267  // If this is a switch or loop that defines its own break scope, then we can
1268  // include it and anything inside of it.
1269  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1270  isa<ForStmt>(S))
1271  return false;
1272 
1273  if (isa<BreakStmt>(S))
1274  return true;
1275 
1276  // Scan subexpressions for verboten breaks.
1277  for (const Stmt *SubStmt : S->children())
1278  if (containsBreak(SubStmt))
1279  return true;
1280 
1281  return false;
1282 }
1283 
1285  if (!S) return false;
1286 
1287  // Some statement kinds add a scope and thus never add a decl to the current
1288  // scope. Note, this list is longer than the list of statements that might
1289  // have an unscoped decl nested within them, but this way is conservatively
1290  // correct even if more statement kinds are added.
1291  if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1292  isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1293  isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1294  isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1295  return false;
1296 
1297  if (isa<DeclStmt>(S))
1298  return true;
1299 
1300  for (const Stmt *SubStmt : S->children())
1301  if (mightAddDeclToScope(SubStmt))
1302  return true;
1303 
1304  return false;
1305 }
1306 
1307 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1308 /// to a constant, or if it does but contains a label, return false. If it
1309 /// constant folds return true and set the boolean result in Result.
1311  bool &ResultBool,
1312  bool AllowLabels) {
1313  llvm::APSInt ResultInt;
1314  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1315  return false;
1316 
1317  ResultBool = ResultInt.getBoolValue();
1318  return true;
1319 }
1320 
1321 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1322 /// to a constant, or if it does but contains a label, return false. If it
1323 /// constant folds return true and set the folded value.
1325  llvm::APSInt &ResultInt,
1326  bool AllowLabels) {
1327  // FIXME: Rename and handle conversion of other evaluatable things
1328  // to bool.
1329  Expr::EvalResult Result;
1330  if (!Cond->EvaluateAsInt(Result, getContext()))
1331  return false; // Not foldable, not integer or not fully evaluatable.
1332 
1333  llvm::APSInt Int = Result.Val.getInt();
1334  if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1335  return false; // Contains a label.
1336 
1337  ResultInt = Int;
1338  return true;
1339 }
1340 
1341 
1342 
1343 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1344 /// statement) to the specified blocks. Based on the condition, this might try
1345 /// to simplify the codegen of the conditional based on the branch.
1346 ///
1348  llvm::BasicBlock *TrueBlock,
1349  llvm::BasicBlock *FalseBlock,
1350  uint64_t TrueCount) {
1351  Cond = Cond->IgnoreParens();
1352 
1353  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1354 
1355  // Handle X && Y in a condition.
1356  if (CondBOp->getOpcode() == BO_LAnd) {
1357  // If we have "1 && X", simplify the code. "0 && X" would have constant
1358  // folded if the case was simple enough.
1359  bool ConstantBool = false;
1360  if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1361  ConstantBool) {
1362  // br(1 && X) -> br(X).
1363  incrementProfileCounter(CondBOp);
1364  return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1365  TrueCount);
1366  }
1367 
1368  // If we have "X && 1", simplify the code to use an uncond branch.
1369  // "X && 0" would have been constant folded to 0.
1370  if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1371  ConstantBool) {
1372  // br(X && 1) -> br(X).
1373  return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1374  TrueCount);
1375  }
1376 
1377  // Emit the LHS as a conditional. If the LHS conditional is false, we
1378  // want to jump to the FalseBlock.
1379  llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1380  // The counter tells us how often we evaluate RHS, and all of TrueCount
1381  // can be propagated to that branch.
1382  uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1383 
1384  ConditionalEvaluation eval(*this);
1385  {
1386  ApplyDebugLocation DL(*this, Cond);
1387  EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1388  EmitBlock(LHSTrue);
1389  }
1390 
1391  incrementProfileCounter(CondBOp);
1392  setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1393 
1394  // Any temporaries created here are conditional.
1395  eval.begin(*this);
1396  EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1397  eval.end(*this);
1398 
1399  return;
1400  }
1401 
1402  if (CondBOp->getOpcode() == BO_LOr) {
1403  // If we have "0 || X", simplify the code. "1 || X" would have constant
1404  // folded if the case was simple enough.
1405  bool ConstantBool = false;
1406  if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1407  !ConstantBool) {
1408  // br(0 || X) -> br(X).
1409  incrementProfileCounter(CondBOp);
1410  return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1411  TrueCount);
1412  }
1413 
1414  // If we have "X || 0", simplify the code to use an uncond branch.
1415  // "X || 1" would have been constant folded to 1.
1416  if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1417  !ConstantBool) {
1418  // br(X || 0) -> br(X).
1419  return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1420  TrueCount);
1421  }
1422 
1423  // Emit the LHS as a conditional. If the LHS conditional is true, we
1424  // want to jump to the TrueBlock.
1425  llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1426  // We have the count for entry to the RHS and for the whole expression
1427  // being true, so we can divy up True count between the short circuit and
1428  // the RHS.
1429  uint64_t LHSCount =
1430  getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1431  uint64_t RHSCount = TrueCount - LHSCount;
1432 
1433  ConditionalEvaluation eval(*this);
1434  {
1435  ApplyDebugLocation DL(*this, Cond);
1436  EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1437  EmitBlock(LHSFalse);
1438  }
1439 
1440  incrementProfileCounter(CondBOp);
1441  setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1442 
1443  // Any temporaries created here are conditional.
1444  eval.begin(*this);
1445  EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1446 
1447  eval.end(*this);
1448 
1449  return;
1450  }
1451  }
1452 
1453  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1454  // br(!x, t, f) -> br(x, f, t)
1455  if (CondUOp->getOpcode() == UO_LNot) {
1456  // Negate the count.
1457  uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1458  // Negate the condition and swap the destination blocks.
1459  return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1460  FalseCount);
1461  }
1462  }
1463 
1464  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1465  // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1466  llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1467  llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1468 
1469  ConditionalEvaluation cond(*this);
1470  EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1471  getProfileCount(CondOp));
1472 
1473  // When computing PGO branch weights, we only know the overall count for
1474  // the true block. This code is essentially doing tail duplication of the
1475  // naive code-gen, introducing new edges for which counts are not
1476  // available. Divide the counts proportionally between the LHS and RHS of
1477  // the conditional operator.
1478  uint64_t LHSScaledTrueCount = 0;
1479  if (TrueCount) {
1480  double LHSRatio =
1481  getProfileCount(CondOp) / (double)getCurrentProfileCount();
1482  LHSScaledTrueCount = TrueCount * LHSRatio;
1483  }
1484 
1485  cond.begin(*this);
1486  EmitBlock(LHSBlock);
1487  incrementProfileCounter(CondOp);
1488  {
1489  ApplyDebugLocation DL(*this, Cond);
1490  EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1491  LHSScaledTrueCount);
1492  }
1493  cond.end(*this);
1494 
1495  cond.begin(*this);
1496  EmitBlock(RHSBlock);
1497  EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1498  TrueCount - LHSScaledTrueCount);
1499  cond.end(*this);
1500 
1501  return;
1502  }
1503 
1504  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1505  // Conditional operator handling can give us a throw expression as a
1506  // condition for a case like:
1507  // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1508  // Fold this to:
1509  // br(c, throw x, br(y, t, f))
1510  EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1511  return;
1512  }
1513 
1514  // If the branch has a condition wrapped by __builtin_unpredictable,
1515  // create metadata that specifies that the branch is unpredictable.
1516  // Don't bother if not optimizing because that metadata would not be used.
1517  llvm::MDNode *Unpredictable = nullptr;
1518  auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1519  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1520  auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1521  if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1522  llvm::MDBuilder MDHelper(getLLVMContext());
1523  Unpredictable = MDHelper.createUnpredictable();
1524  }
1525  }
1526 
1527  // Create branch weights based on the number of times we get here and the
1528  // number of times the condition should be true.
1529  uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1530  llvm::MDNode *Weights =
1531  createProfileWeights(TrueCount, CurrentCount - TrueCount);
1532 
1533  // Emit the code with the fully general case.
1534  llvm::Value *CondV;
1535  {
1536  ApplyDebugLocation DL(*this, Cond);
1537  CondV = EvaluateExprAsBool(Cond);
1538  }
1539  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1540 }
1541 
1542 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1543 /// specified stmt yet.
1544 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1545  CGM.ErrorUnsupported(S, Type);
1546 }
1547 
1548 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1549 /// variable-length array whose elements have a non-zero bit-pattern.
1550 ///
1551 /// \param baseType the inner-most element type of the array
1552 /// \param src - a char* pointing to the bit-pattern for a single
1553 /// base element of the array
1554 /// \param sizeInChars - the total size of the VLA, in chars
1555 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1556  Address dest, Address src,
1557  llvm::Value *sizeInChars) {
1558  CGBuilderTy &Builder = CGF.Builder;
1559 
1560  CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1561  llvm::Value *baseSizeInChars
1562  = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1563 
1564  Address begin =
1565  Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1566  llvm::Value *end =
1567  Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1568 
1569  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1570  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1571  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1572 
1573  // Make a loop over the VLA. C99 guarantees that the VLA element
1574  // count must be nonzero.
1575  CGF.EmitBlock(loopBB);
1576 
1577  llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1578  cur->addIncoming(begin.getPointer(), originBB);
1579 
1580  CharUnits curAlign =
1581  dest.getAlignment().alignmentOfArrayElement(baseSize);
1582 
1583  // memcpy the individual element bit-pattern.
1584  Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1585  /*volatile*/ false);
1586 
1587  // Go to the next element.
1588  llvm::Value *next =
1589  Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1590 
1591  // Leave if that's the end of the VLA.
1592  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1593  Builder.CreateCondBr(done, contBB, loopBB);
1594  cur->addIncoming(next, loopBB);
1595 
1596  CGF.EmitBlock(contBB);
1597 }
1598 
1599 void
1601  // Ignore empty classes in C++.
1602  if (getLangOpts().CPlusPlus) {
1603  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1604  if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1605  return;
1606  }
1607  }
1608 
1609  // Cast the dest ptr to the appropriate i8 pointer type.
1610  if (DestPtr.getElementType() != Int8Ty)
1611  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1612 
1613  // Get size and alignment info for this aggregate.
1615 
1616  llvm::Value *SizeVal;
1617  const VariableArrayType *vla;
1618 
1619  // Don't bother emitting a zero-byte memset.
1620  if (size.isZero()) {
1621  // But note that getTypeInfo returns 0 for a VLA.
1622  if (const VariableArrayType *vlaType =
1623  dyn_cast_or_null<VariableArrayType>(
1624  getContext().getAsArrayType(Ty))) {
1625  auto VlaSize = getVLASize(vlaType);
1626  SizeVal = VlaSize.NumElts;
1627  CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1628  if (!eltSize.isOne())
1629  SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1630  vla = vlaType;
1631  } else {
1632  return;
1633  }
1634  } else {
1635  SizeVal = CGM.getSize(size);
1636  vla = nullptr;
1637  }
1638 
1639  // If the type contains a pointer to data member we can't memset it to zero.
1640  // Instead, create a null constant and copy it to the destination.
1641  // TODO: there are other patterns besides zero that we can usefully memset,
1642  // like -1, which happens to be the pattern used by member-pointers.
1643  if (!CGM.getTypes().isZeroInitializable(Ty)) {
1644  // For a VLA, emit a single element, then splat that over the VLA.
1645  if (vla) Ty = getContext().getBaseElementType(vla);
1646 
1647  llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1648 
1649  llvm::GlobalVariable *NullVariable =
1650  new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1651  /*isConstant=*/true,
1652  llvm::GlobalVariable::PrivateLinkage,
1653  NullConstant, Twine());
1654  CharUnits NullAlign = DestPtr.getAlignment();
1655  NullVariable->setAlignment(NullAlign.getQuantity());
1656  Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1657  NullAlign);
1658 
1659  if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1660 
1661  // Get and call the appropriate llvm.memcpy overload.
1662  Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1663  return;
1664  }
1665 
1666  // Otherwise, just memset the whole thing to zero. This is legal
1667  // because in LLVM, all default initializers (other than the ones we just
1668  // handled above) are guaranteed to have a bit pattern of all zeros.
1669  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1670 }
1671 
1672 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1673  // Make sure that there is a block for the indirect goto.
1674  if (!IndirectBranch)
1676 
1677  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1678 
1679  // Make sure the indirect branch includes all of the address-taken blocks.
1680  IndirectBranch->addDestination(BB);
1681  return llvm::BlockAddress::get(CurFn, BB);
1682 }
1683 
1685  // If we already made the indirect branch for indirect goto, return its block.
1686  if (IndirectBranch) return IndirectBranch->getParent();
1687 
1688  CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1689 
1690  // Create the PHI node that indirect gotos will add entries to.
1691  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1692  "indirect.goto.dest");
1693 
1694  // Create the indirect branch instruction.
1695  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1696  return IndirectBranch->getParent();
1697 }
1698 
1699 /// Computes the length of an array in elements, as well as the base
1700 /// element type and a properly-typed first element pointer.
1702  QualType &baseType,
1703  Address &addr) {
1704  const ArrayType *arrayType = origArrayType;
1705 
1706  // If it's a VLA, we have to load the stored size. Note that
1707  // this is the size of the VLA in bytes, not its size in elements.
1708  llvm::Value *numVLAElements = nullptr;
1709  if (isa<VariableArrayType>(arrayType)) {
1710  numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1711 
1712  // Walk into all VLAs. This doesn't require changes to addr,
1713  // which has type T* where T is the first non-VLA element type.
1714  do {
1715  QualType elementType = arrayType->getElementType();
1716  arrayType = getContext().getAsArrayType(elementType);
1717 
1718  // If we only have VLA components, 'addr' requires no adjustment.
1719  if (!arrayType) {
1720  baseType = elementType;
1721  return numVLAElements;
1722  }
1723  } while (isa<VariableArrayType>(arrayType));
1724 
1725  // We get out here only if we find a constant array type
1726  // inside the VLA.
1727  }
1728 
1729  // We have some number of constant-length arrays, so addr should
1730  // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1731  // down to the first element of addr.
1732  SmallVector<llvm::Value*, 8> gepIndices;
1733 
1734  // GEP down to the array type.
1735  llvm::ConstantInt *zero = Builder.getInt32(0);
1736  gepIndices.push_back(zero);
1737 
1738  uint64_t countFromCLAs = 1;
1739  QualType eltType;
1740 
1741  llvm::ArrayType *llvmArrayType =
1742  dyn_cast<llvm::ArrayType>(addr.getElementType());
1743  while (llvmArrayType) {
1744  assert(isa<ConstantArrayType>(arrayType));
1745  assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1746  == llvmArrayType->getNumElements());
1747 
1748  gepIndices.push_back(zero);
1749  countFromCLAs *= llvmArrayType->getNumElements();
1750  eltType = arrayType->getElementType();
1751 
1752  llvmArrayType =
1753  dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1754  arrayType = getContext().getAsArrayType(arrayType->getElementType());
1755  assert((!llvmArrayType || arrayType) &&
1756  "LLVM and Clang types are out-of-synch");
1757  }
1758 
1759  if (arrayType) {
1760  // From this point onwards, the Clang array type has been emitted
1761  // as some other type (probably a packed struct). Compute the array
1762  // size, and just emit the 'begin' expression as a bitcast.
1763  while (arrayType) {
1764  countFromCLAs *=
1765  cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1766  eltType = arrayType->getElementType();
1767  arrayType = getContext().getAsArrayType(eltType);
1768  }
1769 
1770  llvm::Type *baseType = ConvertType(eltType);
1771  addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1772  } else {
1773  // Create the actual GEP.
1774  addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1775  gepIndices, "array.begin"),
1776  addr.getAlignment());
1777  }
1778 
1779  baseType = eltType;
1780 
1781  llvm::Value *numElements
1782  = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1783 
1784  // If we had any VLA dimensions, factor them in.
1785  if (numVLAElements)
1786  numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1787 
1788  return numElements;
1789 }
1790 
1793  assert(vla && "type was not a variable array type!");
1794  return getVLASize(vla);
1795 }
1796 
1799  // The number of elements so far; always size_t.
1800  llvm::Value *numElements = nullptr;
1801 
1802  QualType elementType;
1803  do {
1804  elementType = type->getElementType();
1805  llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1806  assert(vlaSize && "no size for VLA!");
1807  assert(vlaSize->getType() == SizeTy);
1808 
1809  if (!numElements) {
1810  numElements = vlaSize;
1811  } else {
1812  // It's undefined behavior if this wraps around, so mark it that way.
1813  // FIXME: Teach -fsanitize=undefined to trap this.
1814  numElements = Builder.CreateNUWMul(numElements, vlaSize);
1815  }
1816  } while ((type = getContext().getAsVariableArrayType(elementType)));
1817 
1818  return { numElements, elementType };
1819 }
1820 
1824  assert(vla && "type was not a variable array type!");
1825  return getVLAElements1D(vla);
1826 }
1827 
1830  llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1831  assert(VlaSize && "no size for VLA!");
1832  assert(VlaSize->getType() == SizeTy);
1833  return { VlaSize, Vla->getElementType() };
1834 }
1835 
1837  assert(type->isVariablyModifiedType() &&
1838  "Must pass variably modified type to EmitVLASizes!");
1839 
1841 
1842  // We're going to walk down into the type and look for VLA
1843  // expressions.
1844  do {
1845  assert(type->isVariablyModifiedType());
1846 
1847  const Type *ty = type.getTypePtr();
1848  switch (ty->getTypeClass()) {
1849 
1850 #define TYPE(Class, Base)
1851 #define ABSTRACT_TYPE(Class, Base)
1852 #define NON_CANONICAL_TYPE(Class, Base)
1853 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1854 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1855 #include "clang/AST/TypeNodes.def"
1856  llvm_unreachable("unexpected dependent type!");
1857 
1858  // These types are never variably-modified.
1859  case Type::Builtin:
1860  case Type::Complex:
1861  case Type::Vector:
1862  case Type::ExtVector:
1863  case Type::Record:
1864  case Type::Enum:
1865  case Type::Elaborated:
1866  case Type::TemplateSpecialization:
1867  case Type::ObjCTypeParam:
1868  case Type::ObjCObject:
1869  case Type::ObjCInterface:
1870  case Type::ObjCObjectPointer:
1871  llvm_unreachable("type class is never variably-modified!");
1872 
1873  case Type::Adjusted:
1874  type = cast<AdjustedType>(ty)->getAdjustedType();
1875  break;
1876 
1877  case Type::Decayed:
1878  type = cast<DecayedType>(ty)->getPointeeType();
1879  break;
1880 
1881  case Type::Pointer:
1882  type = cast<PointerType>(ty)->getPointeeType();
1883  break;
1884 
1885  case Type::BlockPointer:
1886  type = cast<BlockPointerType>(ty)->getPointeeType();
1887  break;
1888 
1889  case Type::LValueReference:
1890  case Type::RValueReference:
1891  type = cast<ReferenceType>(ty)->getPointeeType();
1892  break;
1893 
1894  case Type::MemberPointer:
1895  type = cast<MemberPointerType>(ty)->getPointeeType();
1896  break;
1897 
1898  case Type::ConstantArray:
1899  case Type::IncompleteArray:
1900  // Losing element qualification here is fine.
1901  type = cast<ArrayType>(ty)->getElementType();
1902  break;
1903 
1904  case Type::VariableArray: {
1905  // Losing element qualification here is fine.
1906  const VariableArrayType *vat = cast<VariableArrayType>(ty);
1907 
1908  // Unknown size indication requires no size computation.
1909  // Otherwise, evaluate and record it.
1910  if (const Expr *size = vat->getSizeExpr()) {
1911  // It's possible that we might have emitted this already,
1912  // e.g. with a typedef and a pointer to it.
1913  llvm::Value *&entry = VLASizeMap[size];
1914  if (!entry) {
1915  llvm::Value *Size = EmitScalarExpr(size);
1916 
1917  // C11 6.7.6.2p5:
1918  // If the size is an expression that is not an integer constant
1919  // expression [...] each time it is evaluated it shall have a value
1920  // greater than zero.
1921  if (SanOpts.has(SanitizerKind::VLABound) &&
1922  size->getType()->isSignedIntegerType()) {
1923  SanitizerScope SanScope(this);
1924  llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1925  llvm::Constant *StaticArgs[] = {
1926  EmitCheckSourceLocation(size->getBeginLoc()),
1927  EmitCheckTypeDescriptor(size->getType())};
1928  EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
1929  SanitizerKind::VLABound),
1930  SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
1931  }
1932 
1933  // Always zexting here would be wrong if it weren't
1934  // undefined behavior to have a negative bound.
1935  entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1936  }
1937  }
1938  type = vat->getElementType();
1939  break;
1940  }
1941 
1942  case Type::FunctionProto:
1943  case Type::FunctionNoProto:
1944  type = cast<FunctionType>(ty)->getReturnType();
1945  break;
1946 
1947  case Type::Paren:
1948  case Type::TypeOf:
1949  case Type::UnaryTransform:
1950  case Type::Attributed:
1951  case Type::SubstTemplateTypeParm:
1952  case Type::PackExpansion:
1953  case Type::MacroQualified:
1954  // Keep walking after single level desugaring.
1955  type = type.getSingleStepDesugaredType(getContext());
1956  break;
1957 
1958  case Type::Typedef:
1959  case Type::Decltype:
1960  case Type::Auto:
1961  case Type::DeducedTemplateSpecialization:
1962  // Stop walking: nothing to do.
1963  return;
1964 
1965  case Type::TypeOfExpr:
1966  // Stop walking: emit typeof expression.
1967  EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1968  return;
1969 
1970  case Type::Atomic:
1971  type = cast<AtomicType>(ty)->getValueType();
1972  break;
1973 
1974  case Type::Pipe:
1975  type = cast<PipeType>(ty)->getElementType();
1976  break;
1977  }
1978  } while (type->isVariablyModifiedType());
1979 }
1980 
1982  if (getContext().getBuiltinVaListType()->isArrayType())
1983  return EmitPointerWithAlignment(E);
1984  return EmitLValue(E).getAddress();
1985 }
1986 
1988  return EmitLValue(E).getAddress();
1989 }
1990 
1992  const APValue &Init) {
1993  assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
1994  if (CGDebugInfo *Dbg = getDebugInfo())
1995  if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
1996  Dbg->EmitGlobalVariable(E->getDecl(), Init);
1997 }
1998 
2001  // At the moment, the only aggressive peephole we do in IR gen
2002  // is trunc(zext) folding, but if we add more, we can easily
2003  // extend this protection.
2004 
2005  if (!rvalue.isScalar()) return PeepholeProtection();
2006  llvm::Value *value = rvalue.getScalarVal();
2007  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2008 
2009  // Just make an extra bitcast.
2010  assert(HaveInsertPoint());
2011  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2012  Builder.GetInsertBlock());
2013 
2014  PeepholeProtection protection;
2015  protection.Inst = inst;
2016  return protection;
2017 }
2018 
2020  if (!protection.Inst) return;
2021 
2022  // In theory, we could try to duplicate the peepholes now, but whatever.
2023  protection.Inst->eraseFromParent();
2024 }
2025 
2027  QualType Ty, SourceLocation Loc,
2028  SourceLocation AssumptionLoc,
2029  llvm::Value *Alignment,
2030  llvm::Value *OffsetValue) {
2031  llvm::Value *TheCheck;
2032  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2033  CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2034  if (SanOpts.has(SanitizerKind::Alignment)) {
2035  EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2036  OffsetValue, TheCheck, Assumption);
2037  }
2038 }
2039 
2041  QualType Ty, SourceLocation Loc,
2042  SourceLocation AssumptionLoc,
2043  unsigned Alignment,
2044  llvm::Value *OffsetValue) {
2045  llvm::Value *TheCheck;
2046  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2047  CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2048  if (SanOpts.has(SanitizerKind::Alignment)) {
2049  llvm::Value *AlignmentVal = llvm::ConstantInt::get(IntPtrTy, Alignment);
2050  EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, AlignmentVal,
2051  OffsetValue, TheCheck, Assumption);
2052  }
2053 }
2054 
2056  const Expr *E,
2057  SourceLocation AssumptionLoc,
2058  unsigned Alignment,
2059  llvm::Value *OffsetValue) {
2060  if (auto *CE = dyn_cast<CastExpr>(E))
2061  E = CE->getSubExprAsWritten();
2062  QualType Ty = E->getType();
2063  SourceLocation Loc = E->getExprLoc();
2064 
2065  EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2066  OffsetValue);
2067 }
2068 
2069 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2070  llvm::Value *AnnotatedVal,
2071  StringRef AnnotationStr,
2072  SourceLocation Location) {
2073  llvm::Value *Args[4] = {
2074  AnnotatedVal,
2077  CGM.EmitAnnotationLineNo(Location)
2078  };
2079  return Builder.CreateCall(AnnotationFn, Args);
2080 }
2081 
2083  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2084  // FIXME We create a new bitcast for every annotation because that's what
2085  // llvm-gcc was doing.
2086  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2087  EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2088  Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2089  I->getAnnotation(), D->getLocation());
2090 }
2091 
2093  Address Addr) {
2094  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2095  llvm::Value *V = Addr.getPointer();
2096  llvm::Type *VTy = V->getType();
2097  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2098  CGM.Int8PtrTy);
2099 
2100  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2101  // FIXME Always emit the cast inst so we can differentiate between
2102  // annotation on the first field of a struct and annotation on the struct
2103  // itself.
2104  if (VTy != CGM.Int8PtrTy)
2106  V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2107  V = Builder.CreateBitCast(V, VTy);
2108  }
2109 
2110  return Address(V, Addr.getAlignment());
2111 }
2112 
2114 
2116  : CGF(CGF) {
2117  assert(!CGF->IsSanitizerScope);
2118  CGF->IsSanitizerScope = true;
2119 }
2120 
2122  CGF->IsSanitizerScope = false;
2123 }
2124 
2125 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2126  const llvm::Twine &Name,
2127  llvm::BasicBlock *BB,
2128  llvm::BasicBlock::iterator InsertPt) const {
2130  if (IsSanitizerScope)
2132 }
2133 
2135  llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2136  llvm::BasicBlock::iterator InsertPt) const {
2137  llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2138  if (CGF)
2139  CGF->InsertHelper(I, Name, BB, InsertPt);
2140 }
2141 
2142 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2143  CodeGenModule &CGM, const FunctionDecl *FD,
2144  std::string &FirstMissing) {
2145  // If there aren't any required features listed then go ahead and return.
2146  if (ReqFeatures.empty())
2147  return false;
2148 
2149  // Now build up the set of caller features and verify that all the required
2150  // features are there.
2151  llvm::StringMap<bool> CallerFeatureMap;
2152  CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
2153 
2154  // If we have at least one of the features in the feature list return
2155  // true, otherwise return false.
2156  return std::all_of(
2157  ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2158  SmallVector<StringRef, 1> OrFeatures;
2159  Feature.split(OrFeatures, '|');
2160  return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2161  if (!CallerFeatureMap.lookup(Feature)) {
2162  FirstMissing = Feature.str();
2163  return false;
2164  }
2165  return true;
2166  });
2167  });
2168 }
2169 
2170 // Emits an error if we don't have a valid set of target features for the
2171 // called function.
2173  const FunctionDecl *TargetDecl) {
2174  // Early exit if this is an indirect call.
2175  if (!TargetDecl)
2176  return;
2177 
2178  // Get the current enclosing function if it exists. If it doesn't
2179  // we can't check the target features anyhow.
2180  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
2181  if (!FD)
2182  return;
2183 
2184  // Grab the required features for the call. For a builtin this is listed in
2185  // the td file with the default cpu, for an always_inline function this is any
2186  // listed cpu and any listed features.
2187  unsigned BuiltinID = TargetDecl->getBuiltinID();
2188  std::string MissingFeature;
2189  if (BuiltinID) {
2190  SmallVector<StringRef, 1> ReqFeatures;
2191  const char *FeatureList =
2193  // Return if the builtin doesn't have any required features.
2194  if (!FeatureList || StringRef(FeatureList) == "")
2195  return;
2196  StringRef(FeatureList).split(ReqFeatures, ',');
2197  if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2198  CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2199  << TargetDecl->getDeclName()
2201 
2202  } else if (TargetDecl->hasAttr<TargetAttr>() ||
2203  TargetDecl->hasAttr<CPUSpecificAttr>()) {
2204  // Get the required features for the callee.
2205 
2206  const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2207  TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2208 
2209  SmallVector<StringRef, 1> ReqFeatures;
2210  llvm::StringMap<bool> CalleeFeatureMap;
2211  CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2212 
2213  for (const auto &F : ParsedAttr.Features) {
2214  if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2215  ReqFeatures.push_back(StringRef(F).substr(1));
2216  }
2217 
2218  for (const auto &F : CalleeFeatureMap) {
2219  // Only positive features are "required".
2220  if (F.getValue())
2221  ReqFeatures.push_back(F.getKey());
2222  }
2223  if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2224  CGM.getDiags().Report(E->getBeginLoc(), diag::err_function_needs_feature)
2225  << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2226  }
2227 }
2228 
2229 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2230  if (!CGM.getCodeGenOpts().SanitizeStats)
2231  return;
2232 
2233  llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2234  IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2235  CGM.getSanStats().create(IRB, SSK);
2236 }
2237 
2238 llvm::Value *
2239 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2240  llvm::Value *Condition = nullptr;
2241 
2242  if (!RO.Conditions.Architecture.empty())
2243  Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2244 
2245  if (!RO.Conditions.Features.empty()) {
2246  llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2247  Condition =
2248  Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2249  }
2250  return Condition;
2251 }
2252 
2254  llvm::Function *Resolver,
2256  llvm::Function *FuncToReturn,
2257  bool SupportsIFunc) {
2258  if (SupportsIFunc) {
2259  Builder.CreateRet(FuncToReturn);
2260  return;
2261  }
2262 
2264  llvm::for_each(Resolver->args(),
2265  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2266 
2267  llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2268  Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2269 
2270  if (Resolver->getReturnType()->isVoidTy())
2271  Builder.CreateRetVoid();
2272  else
2273  Builder.CreateRet(Result);
2274 }
2275 
2277  llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2278  assert((getContext().getTargetInfo().getTriple().getArch() ==
2279  llvm::Triple::x86 ||
2280  getContext().getTargetInfo().getTriple().getArch() ==
2281  llvm::Triple::x86_64) &&
2282  "Only implemented for x86 targets");
2283 
2284  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2285 
2286  // Main function's basic block.
2287  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2288  Builder.SetInsertPoint(CurBlock);
2289  EmitX86CpuInit();
2290 
2291  for (const MultiVersionResolverOption &RO : Options) {
2292  Builder.SetInsertPoint(CurBlock);
2293  llvm::Value *Condition = FormResolverCondition(RO);
2294 
2295  // The 'default' or 'generic' case.
2296  if (!Condition) {
2297  assert(&RO == Options.end() - 1 &&
2298  "Default or Generic case must be last");
2300  SupportsIFunc);
2301  return;
2302  }
2303 
2304  llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2305  CGBuilderTy RetBuilder(*this, RetBlock);
2306  CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2307  SupportsIFunc);
2308  CurBlock = createBasicBlock("resolver_else", Resolver);
2309  Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2310  }
2311 
2312  // If no generic/default, emit an unreachable.
2313  Builder.SetInsertPoint(CurBlock);
2314  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2315  TrapCall->setDoesNotReturn();
2316  TrapCall->setDoesNotThrow();
2317  Builder.CreateUnreachable();
2318  Builder.ClearInsertionPoint();
2319 }
2320 
2321 // Loc - where the diagnostic will point, where in the source code this
2322 // alignment has failed.
2323 // SecondaryLoc - if present (will be present if sufficiently different from
2324 // Loc), the diagnostic will additionally point a "Note:" to this location.
2325 // It should be the location where the __attribute__((assume_aligned))
2326 // was written e.g.
2328  llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2329  SourceLocation SecondaryLoc, llvm::Value *Alignment,
2330  llvm::Value *OffsetValue, llvm::Value *TheCheck,
2331  llvm::Instruction *Assumption) {
2332  assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2333  cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2334  llvm::Intrinsic::getDeclaration(
2335  Builder.GetInsertBlock()->getParent()->getParent(),
2336  llvm::Intrinsic::assume) &&
2337  "Assumption should be a call to llvm.assume().");
2338  assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2339  "Assumption should be the last instruction of the basic block, "
2340  "since the basic block is still being generated.");
2341 
2342  if (!SanOpts.has(SanitizerKind::Alignment))
2343  return;
2344 
2345  // Don't check pointers to volatile data. The behavior here is implementation-
2346  // defined.
2347  if (Ty->getPointeeType().isVolatileQualified())
2348  return;
2349 
2350  // We need to temorairly remove the assumption so we can insert the
2351  // sanitizer check before it, else the check will be dropped by optimizations.
2352  Assumption->removeFromParent();
2353 
2354  {
2355  SanitizerScope SanScope(this);
2356 
2357  if (!OffsetValue)
2358  OffsetValue = Builder.getInt1(0); // no offset.
2359 
2360  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2361  EmitCheckSourceLocation(SecondaryLoc),
2363  llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2364  EmitCheckValue(Alignment),
2365  EmitCheckValue(OffsetValue)};
2366  EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2367  SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2368  }
2369 
2370  // We are now in the (new, empty) "cont" basic block.
2371  // Reintroduce the assumption.
2372  Builder.Insert(Assumption);
2373  // FIXME: Assumption still has it's original basic block as it's Parent.
2374 }
2375 
2377  if (CGDebugInfo *DI = getDebugInfo())
2378  return DI->SourceLocToDebugLoc(Location);
2379 
2380  return llvm::DebugLoc();
2381 }
const llvm::DataLayout & getDataLayout() const
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:178
Defines the clang::ASTContext interface.
Represents a function declaration or definition.
Definition: Decl.h:1748
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
Other implicit parameter.
Definition: Decl.h:1524
no exception specification
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2554
CanQualType VoidPtrTy
Definition: ASTContext.h:1039
A (possibly-)qualified type.
Definition: Type.h:643
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler...
Definition: CGExpr.cpp:2777
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EncodeAddrForUseInPrologue(llvm::Function *F, llvm::Constant *Addr)
Encode an address into a form suitable for use in a function prologue.
XRayInstrMask Mask
Definition: XRayInstr.h:64
DominatorTree GraphTraits specialization so the DominatorTree can be iterable by generic graph iterat...
Definition: Dominators.h:29
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
CharUnits getClassPointerAlignment(const CXXRecordDecl *CD)
Returns the assumed alignment of an opaque pointer to the given class.
Definition: CGClass.cpp:36
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer...
Stmt - This represents one statement.
Definition: Stmt.h:66
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3372
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:184
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:505
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program...
Definition: Decl.cpp:2837
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
QualType getThisType() const
Returns the type of the this pointer.
Definition: DeclCXX.cpp:2253
Checking the &#39;this&#39; pointer for a constructor call.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1036
constexpr XRayInstrMask Typed
Definition: XRayInstr.h:40
__v8hi zero
Definition: emmintrin.h:1397
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:88
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
Definition: CGClass.cpp:2910
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask >> Checked, SanitizerHandler Check, ArrayRef< llvm::Constant *> StaticArgs, ArrayRef< llvm::Value *> DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition: CGExpr.cpp:2991
The base class of the type hierarchy.
Definition: Type.h:1418
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1296
bool hasValue() const
Definition: APValue.h:311
bool usesSEHTry() const
Indicates the function uses __try.
Definition: Decl.h:2141
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2829
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
static bool hasRequiredFeatures(const SmallVectorImpl< StringRef > &ReqFeatures, CodeGenModule &CGM, const FunctionDecl *FD, std::string &FirstMissing)
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:690
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, StringRef Category=StringRef()) const
Imbue XRay attributes to a function, applying the always/never attribute lists in the process...
constexpr XRayInstrMask Function
Definition: XRayInstr.h:38
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2561
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant, or if it does but contains a label, return false.
QualType getElementType() const
Definition: Type.h:2864
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:344
This file provides some common utility functions for processing Lambda related AST Constructs...
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:812
QualType getReturnType() const
Definition: Decl.h:2329
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6818
Extra information about a function prototype.
Definition: Type.h:3784
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:54
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope, by being a (possibly-labelled) DeclStmt.
DiagnosticsEngine & getDiags() const
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified...
Definition: CGExpr.cpp:3288
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::Value * getPointer() const
Definition: Address.h:37
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information...
Definition: TargetInfo.h:168
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
Defines the Objective-C statement AST node classes.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1044
bool supportsIFunc() const
Identify whether this target supports IFuncs.
Definition: TargetInfo.h:1127
Represents a parameter to a function.
Definition: Decl.h:1564
static void destroyBlockInfos(CGBlockInfo *info)
Destroy a chain of block layouts.
Definition: CGBlocks.cpp:890
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
Definition: CGClass.cpp:1524
long i
Definition: xmmintrin.h:1456
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition: CGStmt.cpp:505
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:297
void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
One of these records is kept for each identifier that is lexed.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
CGBlockInfo * FirstBlockInfo
FirstBlockInfo - The head of a singly-linked-list of block layouts.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2790
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
Address getAddress() const
Definition: CGValue.h:326
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Given that we are currently emitting a lambda, emit an l-value for one of its members.
Definition: CGExpr.cpp:3866
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:158
field_range fields() const
Definition: Decl.h:3810
Represents a member of a struct/union/class.
Definition: Decl.h:2605
SanitizerMask Mask
Bitmask of enabled sanitizers.
Definition: Sanitizers.h:173
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:49
void InitTempAlloca(Address Alloca, llvm::Value *Value)
InitTempAlloca - Provide an initial value for the given alloca which will be observable at all locati...
Definition: CGExpr.cpp:125
void disableSanitizerForInstruction(llvm::Instruction *I)
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2289
Address CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition: CGExpr.cpp:133
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
static bool hasScalarEvaluationKind(QualType T)
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition: Decl.cpp:2808
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:156
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:582
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2236
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:118
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
child_range children()
Definition: Stmt.cpp:212
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:108
unsigned getInAllocaFieldIndex() const
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3338
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:6197
The l-value was considered opaque, so the alignment was determined from a type, but that type was an ...
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:78
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
bool containsOnlyLifetimeMarkers(stable_iterator Old) const
Definition: CGCleanup.cpp:141
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition: DeclCXX.h:1196
Values of this type can never be null.
Expr * getSizeExpr() const
Definition: Type.h:3008
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:6127
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:181
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:759
__v16qu mask
Definition: emmintrin.h:2133
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
bool isInstance() const
Definition: DeclCXX.h:2135
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
bool isAlignmentRequired(const Type *T) const
Determine if the alignment the type has was required using an alignment attribute.
llvm::SanitizerStatReport & getSanStats()
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition: ASTLambda.h:27
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3077
Address NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args)=0
Emits a kernel launch stub.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition: CGExpr.cpp:2820
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2832
Checking the &#39;this&#39; pointer for a call to a non-static member function.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:4524
void InsertHelper(llvm::Instruction *I) const
Function called by the CodeGenFunction when an instruction is created.
Definition: CGLoopInfo.cpp:728
bool hasAttr() const
Definition: DeclBase.h:542
ConditionalOperator - The ?: ternary operator.
Definition: Expr.h:3636
CanQualType getReturnType() const
bool isValid() const
Definition: Address.h:35
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:57
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1290
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1636
const TargetCodeGenInfo & getTargetCodeGenInfo()
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any...
Definition: Decl.cpp:3383
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
llvm::Value * DecodeAddrUsedInPrologue(llvm::Value *F, llvm::Value *EncodedAddr)
Decode an address used in a function prologue, encoded by EncodeAddrForUseInPrologue.
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition: CGExpr.cpp:118
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:106
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Constant * EmitAnnotationUnit(SourceLocation Loc)
Emit the annotation&#39;s translation unit.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
This represents one expression.
Definition: Expr.h:108
Emit only debug info necessary for generating line number tables (-gline-tables-only).
bool isDefaulted() const
Whether this function is defaulted per C++0x.
Definition: Decl.h:2048
static Address invalid()
Definition: Address.h:34
bool isObjCRetainableType() const
Definition: Type.cpp:4028
#define V(N, I)
Definition: ASTContext.h:2905
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements, of a variable length array type, plus that largest non-variably-sized element type.
const char * getRequiredFeatures(unsigned ID) const
Definition: Builtins.h:210
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:62
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
llvm::LLVMContext & getLLVMContext()
llvm::BasicBlock * GetIndirectGotoBlock()
void GenOpenCLArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
Definition: Type.cpp:1875
QualType getType() const
Definition: Expr.h:137
void EmitConstructorBody(FunctionArgList &Args)
EmitConstructorBody - Emits the body of the current constructor.
Definition: CGClass.cpp:815
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
Definition: CGExpr.cpp:652
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:196
llvm::Constant * EmitAnnotationString(StringRef Str)
Emit an annotation string.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:296
SourceLocation getEnd() const
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI)
Get a function type and produce the equivalent function type with the specified exception specificati...
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1949
QualType getFunctionType(QualType ResultTy, ArrayRef< QualType > Args, const FunctionProtoType::ExtProtoInfo &EPI) const
Return a normal function type with a typed argument list.
Definition: ASTContext.h:1379
ValueDecl * getDecl()
Definition: Expr.h:1157
const LangOptions & getLangOpts() const
ASTContext & getContext() const
virtual void startNewFunction()
Definition: Mangle.h:75
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:264
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:40
The l-value was considered opaque, so the alignment was determined from a type.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value **> ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Definition: CGCleanup.cpp:417
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:161
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
QualType getCanonicalType() const
Definition: Type.h:6166
Encodes a location in the source.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
QualType getSingleStepDesugaredType(const ASTContext &Context) const
Return the specified type with one level of "sugar" removed from the type.
Definition: Type.h:956
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:163
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2109
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:295
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
QualType getElementType() const
Definition: Type.h:3220
const Decl * getDecl() const
Definition: GlobalDecl.h:76
Represents the declaration of a label.
Definition: Decl.h:468
ParsedAttr - Represents a syntactic attribute.
Definition: ParsedAttr.h:116
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:697
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
This forwards to CodeGenFunction::InsertHelper.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
TargetAttr::ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD)
Parses the target attributes passed in, and returns only the ones that are valid feature names...
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2109
void EmitStmt(const Stmt *S, ArrayRef< const Attr *> Attrs=None)
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:44
SanitizerSet SanOpts
Sanitizers enabled for this function.
constexpr XRayInstrMask Custom
Definition: XRayInstr.h:39
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
An aligned address.
Definition: Address.h:24
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
XRayInstrSet XRayInstrumentationBundle
Set of XRay instrumentation kinds to emit.
TypeClass getTypeClass() const
Definition: Type.h:1824
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:96
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::Constant * EmitAnnotationLineNo(SourceLocation L)
Emit the annotation line number.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition: Decl.h:2066
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression, because a __builtin_ms_va_list is a pointer to a char.
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, GlobalDecl GD)
const CGFunctionInfo * CurFnInfo
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:215
This is an IRBuilder insertion helper that forwards to CodeGenFunction::InsertHelper, which adds necessary metadata to instructions.
Definition: CGBuilder.h:25
Address EmitVAListRef(const Expr *E)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:358
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn&#39;t support the specified stmt yet.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location)
Emit an annotation call (intrinsic).
Dataflow Directional Tag Classes.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:580
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
void Init(const Stmt *Body)
Clear the object and pre-process for the given statement, usually function body statement.
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:90
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
FunctionDecl * getTemplateInstantiationPattern() const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition: Decl.cpp:3494
void EmitFunctionBody(const Stmt *Body)
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2232
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks, lambdas, etc.
Definition: DeclBase.cpp:990
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition: Decl.cpp:2845
void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params)
Build a parameter variable suitable for &#39;this&#39;.
Definition: CGCXXABI.cpp:121
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
bool has(XRayInstrMask K) const
Definition: XRayInstr.h:46
llvm::Module & getModule() const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
JumpDest ReturnBlock
ReturnBlock - Unified return block.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4423
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
CodeGenTypes & getTypes() const
CharUnits getIndirectAlign() const
T * getAttr() const
Definition: DeclBase.h:538
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:51
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
ExtVectorType - Extended vector type.
Definition: Type.h:3304
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:450
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Optional< NullabilityKind > getNullability(const ASTContext &context) const
Determine the nullability of the given type.
Definition: Type.cpp:3799
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat]...
Definition: APValue.h:76
void getCaptureFields(llvm::DenseMap< const VarDecl *, FieldDecl *> &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition: DeclCXX.cpp:1404
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:2062
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:524
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
void unprotectFromPeepholes(PeepholeProtection protection)
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn&#39;t support the specified stmt yet...
bool hasUnaligned() const
Definition: Type.h:293
Represents a C++ struct/union/class.
Definition: DeclCXX.h:300
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
Definition: CGStmt.cpp:470
bool isVoidType() const
Definition: Type.h:6610
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2219
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6154
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
LambdaCaptureDefault getLambdaCaptureDefault() const
Definition: DeclCXX.h:1228
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1243
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:568
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1772
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:152
Defines the clang::TargetInfo interface.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2449
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
CGCXXABI & getCXXABI() const
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2441
__DEVICE__ int max(int __a, int __b)
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1081
static bool shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, const ASTContext &Context)
void EmitDestructorBody(FunctionArgList &Args)
EmitDestructorBody - Emits the body of the current destructor.
Definition: CGClass.cpp:1414
bool isPointerType() const
Definition: Type.h:6351
This structure provides a set of types that are commonly used during IR emission. ...
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
struct clang::CodeGen::CodeGenFunction::MultiVersionResolverOption::Conds Conditions
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:2858
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition: CGStmt.cpp:380
void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
QualType getType() const
Definition: Decl.h:647
A trivial tuple used to represent a source range.
LValue - This represents an lvalue references.
Definition: CGValue.h:166
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1430
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:2988
SanitizerMetadata * getSanitizerMetadata()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
APSInt & getInt()
Definition: APValue.h:332
const LangOptions & getLangOpts() const
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it&#39;s a VLA, and drill down to the base elem...
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:366
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
SourceLocation getBegin() const
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
Defines enum values for all the target-independent builtin functions.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool isScalar() const
Definition: CGValue.h:51
Attr - This represents one attribute.
Definition: Attr.h:43
SourceLocation getLocation() const
Definition: DeclBase.h:429
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2844
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.