clang  9.0.0svn
CodeGenFunction.cpp
Go to the documentation of this file.
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCleanup.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/StmtCXX.h"
28 #include "clang/AST/StmtObjC.h"
29 #include "clang/Basic/Builtins.h"
31 #include "clang/Basic/TargetInfo.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
44 /// markers.
45 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
46  const LangOptions &LangOpts) {
47  if (CGOpts.DisableLifetimeMarkers)
48  return false;
49 
50  // Disable lifetime markers in msan builds.
51  // FIXME: Remove this when msan works with lifetime markers.
52  if (LangOpts.Sanitize.has(SanitizerKind::Memory))
53  return false;
54 
55  // Asan uses markers for use-after-scope checks.
56  if (CGOpts.SanitizeAddressUseAfterScope)
57  return true;
58 
59  // For now, only in optimized builds.
60  return CGOpts.OptimizationLevel != 0;
61 }
62 
63 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
64  : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
65  Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
66  CGBuilderInserterTy(this)),
67  SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
68  PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
69  CGM.getCodeGenOpts(), CGM.getLangOpts())) {
70  if (!suppressNewContext)
72 
73  llvm::FastMathFlags FMF;
74  if (CGM.getLangOpts().FastMath)
75  FMF.setFast();
76  if (CGM.getLangOpts().FiniteMathOnly) {
77  FMF.setNoNaNs();
78  FMF.setNoInfs();
79  }
80  if (CGM.getCodeGenOpts().NoNaNsFPMath) {
81  FMF.setNoNaNs();
82  }
83  if (CGM.getCodeGenOpts().NoSignedZeros) {
84  FMF.setNoSignedZeros();
85  }
86  if (CGM.getCodeGenOpts().ReciprocalMath) {
87  FMF.setAllowReciprocal();
88  }
89  if (CGM.getCodeGenOpts().Reassociate) {
90  FMF.setAllowReassoc();
91  }
92  Builder.setFastMathFlags(FMF);
93 }
94 
96  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
97 
98  // If there are any unclaimed block infos, go ahead and destroy them
99  // now. This can happen if IR-gen gets clever and skips evaluating
100  // something.
101  if (FirstBlockInfo)
103 
104  if (getLangOpts().OpenMP && CurFn)
105  CGM.getOpenMPRuntime().functionFinished(*this);
106 }
107 
109  LValueBaseInfo *BaseInfo,
110  TBAAAccessInfo *TBAAInfo) {
111  return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
112  /* forPointeeType= */ true);
113 }
114 
116  LValueBaseInfo *BaseInfo,
117  TBAAAccessInfo *TBAAInfo,
118  bool forPointeeType) {
119  if (TBAAInfo)
120  *TBAAInfo = CGM.getTBAAAccessInfo(T);
121 
122  // Honor alignment typedef attributes even on incomplete types.
123  // We also honor them straight for C++ class types, even as pointees;
124  // there's an expressivity gap here.
125  if (auto TT = T->getAs<TypedefType>()) {
126  if (auto Align = TT->getDecl()->getMaxAlignment()) {
127  if (BaseInfo)
129  return getContext().toCharUnitsFromBits(Align);
130  }
131  }
132 
133  if (BaseInfo)
135 
136  CharUnits Alignment;
137  if (T->isIncompleteType()) {
138  Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
139  } else {
140  // For C++ class pointees, we don't know whether we're pointing at a
141  // base or a complete object, so we generally need to use the
142  // non-virtual alignment.
143  const CXXRecordDecl *RD;
144  if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
145  Alignment = CGM.getClassPointerAlignment(RD);
146  } else {
147  Alignment = getContext().getTypeAlignInChars(T);
148  if (T.getQualifiers().hasUnaligned())
149  Alignment = CharUnits::One();
150  }
151 
152  // Cap to the global maximum type alignment unless the alignment
153  // was somehow explicit on the type.
154  if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
155  if (Alignment.getQuantity() > MaxAlign &&
157  Alignment = CharUnits::fromQuantity(MaxAlign);
158  }
159  }
160  return Alignment;
161 }
162 
164  LValueBaseInfo BaseInfo;
165  TBAAAccessInfo TBAAInfo;
166  CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
167  return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
168  TBAAInfo);
169 }
170 
171 /// Given a value of type T* that may not be to a complete object,
172 /// construct an l-value with the natural pointee alignment of T.
173 LValue
175  LValueBaseInfo BaseInfo;
176  TBAAAccessInfo TBAAInfo;
177  CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
178  /* forPointeeType= */ true);
179  return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
180 }
181 
182 
184  return CGM.getTypes().ConvertTypeForMem(T);
185 }
186 
188  return CGM.getTypes().ConvertType(T);
189 }
190 
192  type = type.getCanonicalType();
193  while (true) {
194  switch (type->getTypeClass()) {
195 #define TYPE(name, parent)
196 #define ABSTRACT_TYPE(name, parent)
197 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
198 #define DEPENDENT_TYPE(name, parent) case Type::name:
199 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
200 #include "clang/AST/TypeNodes.def"
201  llvm_unreachable("non-canonical or dependent type in IR-generation");
202 
203  case Type::Auto:
204  case Type::DeducedTemplateSpecialization:
205  llvm_unreachable("undeduced type in IR-generation");
206 
207  // Various scalar types.
208  case Type::Builtin:
209  case Type::Pointer:
210  case Type::BlockPointer:
211  case Type::LValueReference:
212  case Type::RValueReference:
213  case Type::MemberPointer:
214  case Type::Vector:
215  case Type::ExtVector:
216  case Type::FunctionProto:
217  case Type::FunctionNoProto:
218  case Type::Enum:
219  case Type::ObjCObjectPointer:
220  case Type::Pipe:
221  return TEK_Scalar;
222 
223  // Complexes.
224  case Type::Complex:
225  return TEK_Complex;
226 
227  // Arrays, records, and Objective-C objects.
228  case Type::ConstantArray:
229  case Type::IncompleteArray:
230  case Type::VariableArray:
231  case Type::Record:
232  case Type::ObjCObject:
233  case Type::ObjCInterface:
234  return TEK_Aggregate;
235 
236  // We operate on atomic values according to their underlying type.
237  case Type::Atomic:
238  type = cast<AtomicType>(type)->getValueType();
239  continue;
240  }
241  llvm_unreachable("unknown type kind!");
242  }
243 }
244 
246  // For cleanliness, we try to avoid emitting the return block for
247  // simple cases.
248  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
249 
250  if (CurBB) {
251  assert(!CurBB->getTerminator() && "Unexpected terminated block.");
252 
253  // We have a valid insert point, reuse it if it is empty or there are no
254  // explicit jumps to the return block.
255  if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
256  ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
257  delete ReturnBlock.getBlock();
258  ReturnBlock = JumpDest();
259  } else
261  return llvm::DebugLoc();
262  }
263 
264  // Otherwise, if the return block is the target of a single direct
265  // branch then we can just put the code in that block instead. This
266  // cleans up functions which started with a unified return block.
267  if (ReturnBlock.getBlock()->hasOneUse()) {
268  llvm::BranchInst *BI =
269  dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
270  if (BI && BI->isUnconditional() &&
271  BI->getSuccessor(0) == ReturnBlock.getBlock()) {
272  // Record/return the DebugLoc of the simple 'return' expression to be used
273  // later by the actual 'ret' instruction.
274  llvm::DebugLoc Loc = BI->getDebugLoc();
275  Builder.SetInsertPoint(BI->getParent());
276  BI->eraseFromParent();
277  delete ReturnBlock.getBlock();
278  ReturnBlock = JumpDest();
279  return Loc;
280  }
281  }
282 
283  // FIXME: We are at an unreachable point, there is no reason to emit the block
284  // unless it has uses. However, we still need a place to put the debug
285  // region.end for now.
286 
288  return llvm::DebugLoc();
289 }
290 
291 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
292  if (!BB) return;
293  if (!BB->use_empty())
294  return CGF.CurFn->getBasicBlockList().push_back(BB);
295  delete BB;
296 }
297 
299  assert(BreakContinueStack.empty() &&
300  "mismatched push/pop in break/continue stack!");
301 
302  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
303  && NumSimpleReturnExprs == NumReturnExprs
304  && ReturnBlock.getBlock()->use_empty();
305  // Usually the return expression is evaluated before the cleanup
306  // code. If the function contains only a simple return statement,
307  // such as a constant, the location before the cleanup code becomes
308  // the last useful breakpoint in the function, because the simple
309  // return expression will be evaluated after the cleanup code. To be
310  // safe, set the debug location for cleanup code to the location of
311  // the return statement. Otherwise the cleanup code should be at the
312  // end of the function's lexical scope.
313  //
314  // If there are multiple branches to the return block, the branch
315  // instructions will get the location of the return statements and
316  // all will be fine.
317  if (CGDebugInfo *DI = getDebugInfo()) {
318  if (OnlySimpleReturnStmts)
319  DI->EmitLocation(Builder, LastStopPoint);
320  else
321  DI->EmitLocation(Builder, EndLoc);
322  }
323 
324  // Pop any cleanups that might have been associated with the
325  // parameters. Do this in whatever block we're currently in; it's
326  // important to do this before we enter the return block or return
327  // edges will be *really* confused.
328  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
329  bool HasOnlyLifetimeMarkers =
331  bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
332  if (HasCleanups) {
333  // Make sure the line table doesn't jump back into the body for
334  // the ret after it's been at EndLoc.
335  if (CGDebugInfo *DI = getDebugInfo())
336  if (OnlySimpleReturnStmts)
337  DI->EmitLocation(Builder, EndLoc);
338 
340  }
341 
342  // Emit function epilog (to return).
343  llvm::DebugLoc Loc = EmitReturnBlock();
344 
345  if (ShouldInstrumentFunction()) {
346  if (CGM.getCodeGenOpts().InstrumentFunctions)
347  CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
348  if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
349  CurFn->addFnAttr("instrument-function-exit-inlined",
350  "__cyg_profile_func_exit");
351  }
352 
353  // Emit debug descriptor for function end.
354  if (CGDebugInfo *DI = getDebugInfo())
355  DI->EmitFunctionEnd(Builder, CurFn);
356 
357  // Reset the debug location to that of the simple 'return' expression, if any
358  // rather than that of the end of the function's scope '}'.
359  ApplyDebugLocation AL(*this, Loc);
360  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
362 
363  assert(EHStack.empty() &&
364  "did not remove all scopes from cleanup stack!");
365 
366  // If someone did an indirect goto, emit the indirect goto block at the end of
367  // the function.
368  if (IndirectBranch) {
369  EmitBlock(IndirectBranch->getParent());
370  Builder.ClearInsertionPoint();
371  }
372 
373  // If some of our locals escaped, insert a call to llvm.localescape in the
374  // entry block.
375  if (!EscapedLocals.empty()) {
376  // Invert the map from local to index into a simple vector. There should be
377  // no holes.
379  EscapeArgs.resize(EscapedLocals.size());
380  for (auto &Pair : EscapedLocals)
381  EscapeArgs[Pair.second] = Pair.first;
382  llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
383  &CGM.getModule(), llvm::Intrinsic::localescape);
384  CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
385  }
386 
387  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
388  llvm::Instruction *Ptr = AllocaInsertPt;
389  AllocaInsertPt = nullptr;
390  Ptr->eraseFromParent();
391 
392  // If someone took the address of a label but never did an indirect goto, we
393  // made a zero entry PHI node, which is illegal, zap it now.
394  if (IndirectBranch) {
395  llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
396  if (PN->getNumIncomingValues() == 0) {
397  PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
398  PN->eraseFromParent();
399  }
400  }
401 
402  EmitIfUsed(*this, EHResumeBlock);
403  EmitIfUsed(*this, TerminateLandingPad);
404  EmitIfUsed(*this, TerminateHandler);
405  EmitIfUsed(*this, UnreachableBlock);
406 
407  for (const auto &FuncletAndParent : TerminateFunclets)
408  EmitIfUsed(*this, FuncletAndParent.second);
409 
410  if (CGM.getCodeGenOpts().EmitDeclMetadata)
411  EmitDeclMetadata();
412 
413  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
414  I = DeferredReplacements.begin(),
415  E = DeferredReplacements.end();
416  I != E; ++I) {
417  I->first->replaceAllUsesWith(I->second);
418  I->first->eraseFromParent();
419  }
420 
421  // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
422  // PHIs if the current function is a coroutine. We don't do it for all
423  // functions as it may result in slight increase in numbers of instructions
424  // if compiled with no optimizations. We do it for coroutine as the lifetime
425  // of CleanupDestSlot alloca make correct coroutine frame building very
426  // difficult.
428  llvm::DominatorTree DT(*CurFn);
429  llvm::PromoteMemToReg(
430  cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
432  }
433 
434  // Scan function arguments for vector width.
435  for (llvm::Argument &A : CurFn->args())
436  if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
437  LargestVectorWidth = std::max(LargestVectorWidth,
438  VT->getPrimitiveSizeInBits());
439 
440  // Update vector width based on return type.
441  if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
442  LargestVectorWidth = std::max(LargestVectorWidth,
443  VT->getPrimitiveSizeInBits());
444 
445  // Add the required-vector-width attribute. This contains the max width from:
446  // 1. min-vector-width attribute used in the source program.
447  // 2. Any builtins used that have a vector width specified.
448  // 3. Values passed in and out of inline assembly.
449  // 4. Width of vector arguments and return types for this function.
450  // 5. Width of vector aguments and return types for functions called by this
451  // function.
452  CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
453 
454  // If we generated an unreachable return block, delete it now.
455  if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
456  Builder.ClearInsertionPoint();
457  ReturnBlock.getBlock()->eraseFromParent();
458  }
459  if (ReturnValue.isValid()) {
460  auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
461  if (RetAlloca && RetAlloca->use_empty()) {
462  RetAlloca->eraseFromParent();
464  }
465  }
466 }
467 
468 /// ShouldInstrumentFunction - Return true if the current function should be
469 /// instrumented with __cyg_profile_func_* calls
471  if (!CGM.getCodeGenOpts().InstrumentFunctions &&
472  !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
473  !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
474  return false;
475  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
476  return false;
477  return true;
478 }
479 
480 /// ShouldXRayInstrument - Return true if the current function should be
481 /// instrumented with XRay nop sleds.
483  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
484 }
485 
486 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
487 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
489  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
490  (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
493 }
494 
496  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
497  (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
500 }
501 
502 llvm::Constant *
504  llvm::Constant *Addr) {
505  // Addresses stored in prologue data can't require run-time fixups and must
506  // be PC-relative. Run-time fixups are undesirable because they necessitate
507  // writable text segments, which are unsafe. And absolute addresses are
508  // undesirable because they break PIE mode.
509 
510  // Add a layer of indirection through a private global. Taking its address
511  // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
512  auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
513  /*isConstant=*/true,
514  llvm::GlobalValue::PrivateLinkage, Addr);
515 
516  // Create a PC-relative address.
517  auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
518  auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
519  auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
520  return (IntPtrTy == Int32Ty)
521  ? PCRelAsInt
522  : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
523 }
524 
525 llvm::Value *
527  llvm::Value *EncodedAddr) {
528  // Reconstruct the address of the global.
529  auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
530  auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
531  auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
532  auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
533 
534  // Load the original pointer through the global.
535  return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
536  "decoded_addr");
537 }
538 
539 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
540  llvm::Function *Fn)
541 {
542  if (!FD->hasAttr<OpenCLKernelAttr>())
543  return;
544 
545  llvm::LLVMContext &Context = getLLVMContext();
546 
547  CGM.GenOpenCLArgMetadata(Fn, FD, this);
548 
549  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
550  QualType HintQTy = A->getTypeHint();
551  const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
552  bool IsSignedInteger =
553  HintQTy->isSignedIntegerType() ||
554  (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
555  llvm::Metadata *AttrMDArgs[] = {
556  llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
557  CGM.getTypes().ConvertType(A->getTypeHint()))),
558  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
559  llvm::IntegerType::get(Context, 32),
560  llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
561  Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
562  }
563 
564  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
565  llvm::Metadata *AttrMDArgs[] = {
566  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
567  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
568  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
569  Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
570  }
571 
572  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
573  llvm::Metadata *AttrMDArgs[] = {
574  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
575  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
576  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
577  Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
578  }
579 
580  if (const OpenCLIntelReqdSubGroupSizeAttr *A =
581  FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
582  llvm::Metadata *AttrMDArgs[] = {
583  llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
584  Fn->setMetadata("intel_reqd_sub_group_size",
585  llvm::MDNode::get(Context, AttrMDArgs));
586  }
587 }
588 
589 /// Determine whether the function F ends with a return stmt.
590 static bool endsWithReturn(const Decl* F) {
591  const Stmt *Body = nullptr;
592  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
593  Body = FD->getBody();
594  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
595  Body = OMD->getBody();
596 
597  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
598  auto LastStmt = CS->body_rbegin();
599  if (LastStmt != CS->body_rend())
600  return isa<ReturnStmt>(*LastStmt);
601  }
602  return false;
603 }
604 
606  if (SanOpts.has(SanitizerKind::Thread)) {
607  Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
608  Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
609  }
610 }
611 
612 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
613  auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
614  if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
615  !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
616  (MD->getNumParams() != 1 && MD->getNumParams() != 2))
617  return false;
618 
619  if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
620  return false;
621 
622  if (MD->getNumParams() == 2) {
623  auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
624  if (!PT || !PT->isVoidPointerType() ||
625  !PT->getPointeeType().isConstQualified())
626  return false;
627  }
628 
629  return true;
630 }
631 
632 /// Return the UBSan prologue signature for \p FD if one is available.
633 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
634  const FunctionDecl *FD) {
635  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
636  if (!MD->isStatic())
637  return nullptr;
639 }
640 
642  QualType RetTy,
643  llvm::Function *Fn,
644  const CGFunctionInfo &FnInfo,
645  const FunctionArgList &Args,
646  SourceLocation Loc,
647  SourceLocation StartLoc) {
648  assert(!CurFn &&
649  "Do not use a CodeGenFunction object for more than one function");
650 
651  const Decl *D = GD.getDecl();
652 
653  DidCallStackSave = false;
654  CurCodeDecl = D;
655  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
656  if (FD->usesSEHTry())
657  CurSEHParent = FD;
658  CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
659  FnRetTy = RetTy;
660  CurFn = Fn;
661  CurFnInfo = &FnInfo;
662  assert(CurFn->isDeclaration() && "Function already has body?");
663 
664  // If this function has been blacklisted for any of the enabled sanitizers,
665  // disable the sanitizer for the function.
666  do {
667 #define SANITIZER(NAME, ID) \
668  if (SanOpts.empty()) \
669  break; \
670  if (SanOpts.has(SanitizerKind::ID)) \
671  if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
672  SanOpts.set(SanitizerKind::ID, false);
673 
674 #include "clang/Basic/Sanitizers.def"
675 #undef SANITIZER
676  } while (0);
677 
678  if (D) {
679  // Apply the no_sanitize* attributes to SanOpts.
680  for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
681  SanitizerMask mask = Attr->getMask();
682  SanOpts.Mask &= ~mask;
683  if (mask & SanitizerKind::Address)
684  SanOpts.set(SanitizerKind::KernelAddress, false);
685  if (mask & SanitizerKind::KernelAddress)
686  SanOpts.set(SanitizerKind::Address, false);
687  if (mask & SanitizerKind::HWAddress)
688  SanOpts.set(SanitizerKind::KernelHWAddress, false);
689  if (mask & SanitizerKind::KernelHWAddress)
690  SanOpts.set(SanitizerKind::HWAddress, false);
691  }
692  }
693 
694  // Apply sanitizer attributes to the function.
695  if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
696  Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
697  if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
698  Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
699  if (SanOpts.has(SanitizerKind::Thread))
700  Fn->addFnAttr(llvm::Attribute::SanitizeThread);
701  if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
702  Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
703  if (SanOpts.has(SanitizerKind::SafeStack))
704  Fn->addFnAttr(llvm::Attribute::SafeStack);
705  if (SanOpts.has(SanitizerKind::ShadowCallStack))
706  Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
707 
708  // Apply fuzzing attribute to the function.
709  if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
710  Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
711 
712  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
713  // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
714  if (SanOpts.has(SanitizerKind::Thread)) {
715  if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
716  IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
717  if (OMD->getMethodFamily() == OMF_dealloc ||
718  OMD->getMethodFamily() == OMF_initialize ||
719  (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
721  }
722  }
723  }
724 
725  // Ignore unrelated casts in STL allocate() since the allocator must cast
726  // from void* to T* before object initialization completes. Don't match on the
727  // namespace because not all allocators are in std::
728  if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
730  SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
731  }
732 
733  // Apply xray attributes to the function (as a string, for now)
734  if (D) {
735  if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
738  if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
739  Fn->addFnAttr("function-instrument", "xray-always");
740  if (XRayAttr->neverXRayInstrument())
741  Fn->addFnAttr("function-instrument", "xray-never");
742  if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
744  Fn->addFnAttr("xray-log-args",
745  llvm::utostr(LogArgs->getArgumentCount()));
746  }
747  } else {
749  Fn->addFnAttr(
750  "xray-instruction-threshold",
751  llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
752  }
753  }
754 
755  // Add no-jump-tables value.
756  Fn->addFnAttr("no-jump-tables",
757  llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
758 
759  // Add profile-sample-accurate value.
760  if (CGM.getCodeGenOpts().ProfileSampleAccurate)
761  Fn->addFnAttr("profile-sample-accurate");
762 
763  if (getLangOpts().OpenCL) {
764  // Add metadata for a kernel function.
765  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
766  EmitOpenCLKernelMetadata(FD, Fn);
767  }
768 
769  // If we are checking function types, emit a function type signature as
770  // prologue data.
772  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
773  if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
774  // Remove any (C++17) exception specifications, to allow calling e.g. a
775  // noexcept function through a non-noexcept pointer.
776  auto ProtoTy =
778  EST_None);
779  llvm::Constant *FTRTTIConst =
780  CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
781  llvm::Constant *FTRTTIConstEncoded =
782  EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
783  llvm::Constant *PrologueStructElems[] = {PrologueSig,
784  FTRTTIConstEncoded};
785  llvm::Constant *PrologueStructConst =
786  llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
787  Fn->setPrologueData(PrologueStructConst);
788  }
789  }
790  }
791 
792  // If we're checking nullability, we need to know whether we can check the
793  // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
794  if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
797  if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
798  CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
799  RetValNullabilityPrecondition =
800  llvm::ConstantInt::getTrue(getLLVMContext());
801  }
802  }
803 
804  // If we're in C++ mode and the function name is "main", it is guaranteed
805  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
806  // used within a program").
807  if (getLangOpts().CPlusPlus)
808  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
809  if (FD->isMain())
810  Fn->addFnAttr(llvm::Attribute::NoRecurse);
811 
812  // If a custom alignment is used, force realigning to this alignment on
813  // any main function which certainly will need it.
814  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
815  if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
816  CGM.getCodeGenOpts().StackAlignment)
817  Fn->addFnAttr("stackrealign");
818 
819  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
820 
821  // Create a marker to make it easy to insert allocas into the entryblock
822  // later. Don't create this with the builder, because we don't want it
823  // folded.
824  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
825  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
826 
828 
829  Builder.SetInsertPoint(EntryBB);
830 
831  // If we're checking the return value, allocate space for a pointer to a
832  // precise source location of the checked return statement.
833  if (requiresReturnValueCheck()) {
834  ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
835  InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
836  }
837 
838  // Emit subprogram debug descriptor.
839  if (CGDebugInfo *DI = getDebugInfo()) {
840  // Reconstruct the type from the argument list so that implicit parameters,
841  // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
842  // convention.
844  if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
845  if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
846  CC = SrcFnTy->getCallConv();
847  SmallVector<QualType, 16> ArgTypes;
848  for (const VarDecl *VD : Args)
849  ArgTypes.push_back(VD->getType());
851  RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
852  DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
853  Builder);
854  }
855 
856  if (ShouldInstrumentFunction()) {
857  if (CGM.getCodeGenOpts().InstrumentFunctions)
858  CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
859  if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
860  CurFn->addFnAttr("instrument-function-entry-inlined",
861  "__cyg_profile_func_enter");
862  if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
863  CurFn->addFnAttr("instrument-function-entry-inlined",
864  "__cyg_profile_func_enter_bare");
865  }
866 
867  // Since emitting the mcount call here impacts optimizations such as function
868  // inlining, we just add an attribute to insert a mcount call in backend.
869  // The attribute "counting-function" is set to mcount function name which is
870  // architecture dependent.
871  if (CGM.getCodeGenOpts().InstrumentForProfiling) {
872  // Calls to fentry/mcount should not be generated if function has
873  // the no_instrument_function attribute.
874  if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
875  if (CGM.getCodeGenOpts().CallFEntry)
876  Fn->addFnAttr("fentry-call", "true");
877  else {
878  Fn->addFnAttr("instrument-function-entry-inlined",
879  getTarget().getMCountName());
880  }
881  }
882  }
883 
884  if (RetTy->isVoidType()) {
885  // Void type; nothing to return.
887 
888  // Count the implicit return.
889  if (!endsWithReturn(D))
890  ++NumReturnExprs;
892  // Indirect return; emit returned value directly into sret slot.
893  // This reduces code size, and affects correctness in C++.
894  auto AI = CurFn->arg_begin();
896  ++AI;
904  }
907  // Load the sret pointer from the argument struct and return into that.
908  unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
909  llvm::Function::arg_iterator EI = CurFn->arg_end();
910  --EI;
911  llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
913  Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
915  } else {
916  ReturnValue = CreateIRTemp(RetTy, "retval");
917 
918  // Tell the epilog emitter to autorelease the result. We do this
919  // now so that various specialized functions can suppress it
920  // during their IR-generation.
921  if (getLangOpts().ObjCAutoRefCount &&
923  RetTy->isObjCRetainableType())
924  AutoreleaseResult = true;
925  }
926 
928 
930 
931  // Emit OpenMP specific initialization of the device functions.
932  if (getLangOpts().OpenMP && CurCodeDecl)
933  CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
934 
936 
937  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
939  const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
940  if (MD->getParent()->isLambda() &&
941  MD->getOverloadedOperator() == OO_Call) {
942  // We're in a lambda; figure out the captures.
946  // If the lambda captures the object referred to by '*this' - either by
947  // value or by reference, make sure CXXThisValue points to the correct
948  // object.
949 
950  // Get the lvalue for the field (which is a copy of the enclosing object
951  // or contains the address of the enclosing object).
954  // If the enclosing object was captured by value, just use its address.
955  CXXThisValue = ThisFieldLValue.getAddress().getPointer();
956  } else {
957  // Load the lvalue pointed to by the field, since '*this' was captured
958  // by reference.
959  CXXThisValue =
960  EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
961  }
962  }
963  for (auto *FD : MD->getParent()->fields()) {
964  if (FD->hasCapturedVLAType()) {
965  auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
967  auto VAT = FD->getCapturedVLAType();
968  VLASizeMap[VAT->getSizeExpr()] = ExprArg;
969  }
970  }
971  } else {
972  // Not in a lambda; just use 'this' from the method.
973  // FIXME: Should we generate a new load for each use of 'this'? The
974  // fast register allocator would be happier...
975  CXXThisValue = CXXABIThisValue;
976  }
977 
978  // Check the 'this' pointer once per function, if it's available.
979  if (CXXABIThisValue) {
980  SanitizerSet SkippedChecks;
981  SkippedChecks.set(SanitizerKind::ObjectSize, true);
982  QualType ThisTy = MD->getThisType();
983 
984  // If this is the call operator of a lambda with no capture-default, it
985  // may have a static invoker function, which may call this operator with
986  // a null 'this' pointer.
987  if (isLambdaCallOperator(MD) &&
989  SkippedChecks.set(SanitizerKind::Null, true);
990 
991  EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
992  : TCK_MemberCall,
993  Loc, CXXABIThisValue, ThisTy,
994  getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
995  SkippedChecks);
996  }
997  }
998 
999  // If any of the arguments have a variably modified type, make sure to
1000  // emit the type size.
1001  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1002  i != e; ++i) {
1003  const VarDecl *VD = *i;
1004 
1005  // Dig out the type as written from ParmVarDecls; it's unclear whether
1006  // the standard (C99 6.9.1p10) requires this, but we're following the
1007  // precedent set by gcc.
1008  QualType Ty;
1009  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1010  Ty = PVD->getOriginalType();
1011  else
1012  Ty = VD->getType();
1013 
1014  if (Ty->isVariablyModifiedType())
1016  }
1017  // Emit a location at the end of the prologue.
1018  if (CGDebugInfo *DI = getDebugInfo())
1019  DI->EmitLocation(Builder, StartLoc);
1020 
1021  // TODO: Do we need to handle this in two places like we do with
1022  // target-features/target-cpu?
1023  if (CurFuncDecl)
1024  if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1025  LargestVectorWidth = VecWidth->getVectorWidth();
1026 }
1027 
1030  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1032  else
1033  EmitStmt(Body);
1034 }
1035 
1036 /// When instrumenting to collect profile data, the counts for some blocks
1037 /// such as switch cases need to not include the fall-through counts, so
1038 /// emit a branch around the instrumentation code. When not instrumenting,
1039 /// this just calls EmitBlock().
1041  const Stmt *S) {
1042  llvm::BasicBlock *SkipCountBB = nullptr;
1044  // When instrumenting for profiling, the fallthrough to certain
1045  // statements needs to skip over the instrumentation code so that we
1046  // get an accurate count.
1047  SkipCountBB = createBasicBlock("skipcount");
1048  EmitBranch(SkipCountBB);
1049  }
1050  EmitBlock(BB);
1051  uint64_t CurrentCount = getCurrentProfileCount();
1054  if (SkipCountBB)
1055  EmitBlock(SkipCountBB);
1056 }
1057 
1058 /// Tries to mark the given function nounwind based on the
1059 /// non-existence of any throwing calls within it. We believe this is
1060 /// lightweight enough to do at -O0.
1061 static void TryMarkNoThrow(llvm::Function *F) {
1062  // LLVM treats 'nounwind' on a function as part of the type, so we
1063  // can't do this on functions that can be overwritten.
1064  if (F->isInterposable()) return;
1065 
1066  for (llvm::BasicBlock &BB : *F)
1067  for (llvm::Instruction &I : BB)
1068  if (I.mayThrow())
1069  return;
1070 
1071  F->setDoesNotThrow();
1072 }
1073 
1075  FunctionArgList &Args) {
1076  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1077  QualType ResTy = FD->getReturnType();
1078 
1079  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1080  if (MD && MD->isInstance()) {
1081  if (CGM.getCXXABI().HasThisReturn(GD))
1082  ResTy = MD->getThisType();
1083  else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1084  ResTy = CGM.getContext().VoidPtrTy;
1085  CGM.getCXXABI().buildThisParam(*this, Args);
1086  }
1087 
1088  // The base version of an inheriting constructor whose constructed base is a
1089  // virtual base is not passed any arguments (because it doesn't actually call
1090  // the inherited constructor).
1091  bool PassedParams = true;
1092  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1093  if (auto Inherited = CD->getInheritedConstructor())
1094  PassedParams =
1095  getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1096 
1097  if (PassedParams) {
1098  for (auto *Param : FD->parameters()) {
1099  Args.push_back(Param);
1100  if (!Param->hasAttr<PassObjectSizeAttr>())
1101  continue;
1102 
1103  auto *Implicit = ImplicitParamDecl::Create(
1104  getContext(), Param->getDeclContext(), Param->getLocation(),
1105  /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1106  SizeArguments[Param] = Implicit;
1107  Args.push_back(Implicit);
1108  }
1109  }
1110 
1111  if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1112  CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1113 
1114  return ResTy;
1115 }
1116 
1117 static bool
1119  const ASTContext &Context) {
1120  QualType T = FD->getReturnType();
1121  // Avoid the optimization for functions that return a record type with a
1122  // trivial destructor or another trivially copyable type.
1123  if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1124  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1125  return !ClassDecl->hasTrivialDestructor();
1126  }
1127  return !T.isTriviallyCopyableType(Context);
1128 }
1129 
1130 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1131  const CGFunctionInfo &FnInfo) {
1132  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1133  CurGD = GD;
1134 
1135  FunctionArgList Args;
1136  QualType ResTy = BuildFunctionArgList(GD, Args);
1137 
1138  // Check if we should generate debug info for this function.
1139  if (FD->hasAttr<NoDebugAttr>())
1140  DebugInfo = nullptr; // disable debug info indefinitely for this function
1141 
1142  // The function might not have a body if we're generating thunks for a
1143  // function declaration.
1144  SourceRange BodyRange;
1145  if (Stmt *Body = FD->getBody())
1146  BodyRange = Body->getSourceRange();
1147  else
1148  BodyRange = FD->getLocation();
1149  CurEHLocation = BodyRange.getEnd();
1150 
1151  // Use the location of the start of the function to determine where
1152  // the function definition is located. By default use the location
1153  // of the declaration as the location for the subprogram. A function
1154  // may lack a declaration in the source code if it is created by code
1155  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1156  SourceLocation Loc = FD->getLocation();
1157 
1158  // If this is a function specialization then use the pattern body
1159  // as the location for the function.
1160  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1161  if (SpecDecl->hasBody(SpecDecl))
1162  Loc = SpecDecl->getLocation();
1163 
1164  Stmt *Body = FD->getBody();
1165 
1166  // Initialize helper which will detect jumps which can cause invalid lifetime
1167  // markers.
1168  if (Body && ShouldEmitLifetimeMarkers)
1169  Bypasses.Init(Body);
1170 
1171  // Emit the standard function prologue.
1172  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1173 
1174  // Generate the body of the function.
1175  PGO.assignRegionCounters(GD, CurFn);
1176  if (isa<CXXDestructorDecl>(FD))
1177  EmitDestructorBody(Args);
1178  else if (isa<CXXConstructorDecl>(FD))
1179  EmitConstructorBody(Args);
1180  else if (getLangOpts().CUDA &&
1181  !getLangOpts().CUDAIsDevice &&
1182  FD->hasAttr<CUDAGlobalAttr>())
1183  CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1184  else if (isa<CXXMethodDecl>(FD) &&
1185  cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1186  // The lambda static invoker function is special, because it forwards or
1187  // clones the body of the function call operator (but is actually static).
1188  EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1189  } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1190  (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1191  cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1192  // Implicit copy-assignment gets the same special treatment as implicit
1193  // copy-constructors.
1195  } else if (Body) {
1196  EmitFunctionBody(Body);
1197  } else
1198  llvm_unreachable("no definition for emitted function");
1199 
1200  // C++11 [stmt.return]p2:
1201  // Flowing off the end of a function [...] results in undefined behavior in
1202  // a value-returning function.
1203  // C11 6.9.1p12:
1204  // If the '}' that terminates a function is reached, and the value of the
1205  // function call is used by the caller, the behavior is undefined.
1207  !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1208  bool ShouldEmitUnreachable =
1209  CGM.getCodeGenOpts().StrictReturn ||
1211  if (SanOpts.has(SanitizerKind::Return)) {
1212  SanitizerScope SanScope(this);
1213  llvm::Value *IsFalse = Builder.getFalse();
1214  EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1215  SanitizerHandler::MissingReturn,
1216  EmitCheckSourceLocation(FD->getLocation()), None);
1217  } else if (ShouldEmitUnreachable) {
1218  if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1219  EmitTrapCall(llvm::Intrinsic::trap);
1220  }
1221  if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1222  Builder.CreateUnreachable();
1223  Builder.ClearInsertionPoint();
1224  }
1225  }
1226 
1227  // Emit the standard function epilogue.
1228  FinishFunction(BodyRange.getEnd());
1229 
1230  // If we haven't marked the function nothrow through other means, do
1231  // a quick pass now to see if we can.
1232  if (!CurFn->doesNotThrow())
1234 }
1235 
1236 /// ContainsLabel - Return true if the statement contains a label in it. If
1237 /// this statement is not executed normally, it not containing a label means
1238 /// that we can just remove the code.
1239 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1240  // Null statement, not a label!
1241  if (!S) return false;
1242 
1243  // If this is a label, we have to emit the code, consider something like:
1244  // if (0) { ... foo: bar(); } goto foo;
1245  //
1246  // TODO: If anyone cared, we could track __label__'s, since we know that you
1247  // can't jump to one from outside their declared region.
1248  if (isa<LabelStmt>(S))
1249  return true;
1250 
1251  // If this is a case/default statement, and we haven't seen a switch, we have
1252  // to emit the code.
1253  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1254  return true;
1255 
1256  // If this is a switch statement, we want to ignore cases below it.
1257  if (isa<SwitchStmt>(S))
1258  IgnoreCaseStmts = true;
1259 
1260  // Scan subexpressions for verboten labels.
1261  for (const Stmt *SubStmt : S->children())
1262  if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1263  return true;
1264 
1265  return false;
1266 }
1267 
1268 /// containsBreak - Return true if the statement contains a break out of it.
1269 /// If the statement (recursively) contains a switch or loop with a break
1270 /// inside of it, this is fine.
1272  // Null statement, not a label!
1273  if (!S) return false;
1274 
1275  // If this is a switch or loop that defines its own break scope, then we can
1276  // include it and anything inside of it.
1277  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1278  isa<ForStmt>(S))
1279  return false;
1280 
1281  if (isa<BreakStmt>(S))
1282  return true;
1283 
1284  // Scan subexpressions for verboten breaks.
1285  for (const Stmt *SubStmt : S->children())
1286  if (containsBreak(SubStmt))
1287  return true;
1288 
1289  return false;
1290 }
1291 
1293  if (!S) return false;
1294 
1295  // Some statement kinds add a scope and thus never add a decl to the current
1296  // scope. Note, this list is longer than the list of statements that might
1297  // have an unscoped decl nested within them, but this way is conservatively
1298  // correct even if more statement kinds are added.
1299  if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1300  isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1301  isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1302  isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1303  return false;
1304 
1305  if (isa<DeclStmt>(S))
1306  return true;
1307 
1308  for (const Stmt *SubStmt : S->children())
1309  if (mightAddDeclToScope(SubStmt))
1310  return true;
1311 
1312  return false;
1313 }
1314 
1315 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1316 /// to a constant, or if it does but contains a label, return false. If it
1317 /// constant folds return true and set the boolean result in Result.
1319  bool &ResultBool,
1320  bool AllowLabels) {
1321  llvm::APSInt ResultInt;
1322  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1323  return false;
1324 
1325  ResultBool = ResultInt.getBoolValue();
1326  return true;
1327 }
1328 
1329 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1330 /// to a constant, or if it does but contains a label, return false. If it
1331 /// constant folds return true and set the folded value.
1333  llvm::APSInt &ResultInt,
1334  bool AllowLabels) {
1335  // FIXME: Rename and handle conversion of other evaluatable things
1336  // to bool.
1337  Expr::EvalResult Result;
1338  if (!Cond->EvaluateAsInt(Result, getContext()))
1339  return false; // Not foldable, not integer or not fully evaluatable.
1340 
1341  llvm::APSInt Int = Result.Val.getInt();
1342  if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1343  return false; // Contains a label.
1344 
1345  ResultInt = Int;
1346  return true;
1347 }
1348 
1349 
1350 
1351 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1352 /// statement) to the specified blocks. Based on the condition, this might try
1353 /// to simplify the codegen of the conditional based on the branch.
1354 ///
1356  llvm::BasicBlock *TrueBlock,
1357  llvm::BasicBlock *FalseBlock,
1358  uint64_t TrueCount) {
1359  Cond = Cond->IgnoreParens();
1360 
1361  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1362 
1363  // Handle X && Y in a condition.
1364  if (CondBOp->getOpcode() == BO_LAnd) {
1365  // If we have "1 && X", simplify the code. "0 && X" would have constant
1366  // folded if the case was simple enough.
1367  bool ConstantBool = false;
1368  if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1369  ConstantBool) {
1370  // br(1 && X) -> br(X).
1371  incrementProfileCounter(CondBOp);
1372  return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1373  TrueCount);
1374  }
1375 
1376  // If we have "X && 1", simplify the code to use an uncond branch.
1377  // "X && 0" would have been constant folded to 0.
1378  if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1379  ConstantBool) {
1380  // br(X && 1) -> br(X).
1381  return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1382  TrueCount);
1383  }
1384 
1385  // Emit the LHS as a conditional. If the LHS conditional is false, we
1386  // want to jump to the FalseBlock.
1387  llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1388  // The counter tells us how often we evaluate RHS, and all of TrueCount
1389  // can be propagated to that branch.
1390  uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1391 
1392  ConditionalEvaluation eval(*this);
1393  {
1394  ApplyDebugLocation DL(*this, Cond);
1395  EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1396  EmitBlock(LHSTrue);
1397  }
1398 
1399  incrementProfileCounter(CondBOp);
1400  setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1401 
1402  // Any temporaries created here are conditional.
1403  eval.begin(*this);
1404  EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1405  eval.end(*this);
1406 
1407  return;
1408  }
1409 
1410  if (CondBOp->getOpcode() == BO_LOr) {
1411  // If we have "0 || X", simplify the code. "1 || X" would have constant
1412  // folded if the case was simple enough.
1413  bool ConstantBool = false;
1414  if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1415  !ConstantBool) {
1416  // br(0 || X) -> br(X).
1417  incrementProfileCounter(CondBOp);
1418  return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1419  TrueCount);
1420  }
1421 
1422  // If we have "X || 0", simplify the code to use an uncond branch.
1423  // "X || 1" would have been constant folded to 1.
1424  if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1425  !ConstantBool) {
1426  // br(X || 0) -> br(X).
1427  return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1428  TrueCount);
1429  }
1430 
1431  // Emit the LHS as a conditional. If the LHS conditional is true, we
1432  // want to jump to the TrueBlock.
1433  llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1434  // We have the count for entry to the RHS and for the whole expression
1435  // being true, so we can divy up True count between the short circuit and
1436  // the RHS.
1437  uint64_t LHSCount =
1438  getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1439  uint64_t RHSCount = TrueCount - LHSCount;
1440 
1441  ConditionalEvaluation eval(*this);
1442  {
1443  ApplyDebugLocation DL(*this, Cond);
1444  EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1445  EmitBlock(LHSFalse);
1446  }
1447 
1448  incrementProfileCounter(CondBOp);
1449  setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1450 
1451  // Any temporaries created here are conditional.
1452  eval.begin(*this);
1453  EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1454 
1455  eval.end(*this);
1456 
1457  return;
1458  }
1459  }
1460 
1461  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1462  // br(!x, t, f) -> br(x, f, t)
1463  if (CondUOp->getOpcode() == UO_LNot) {
1464  // Negate the count.
1465  uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1466  // Negate the condition and swap the destination blocks.
1467  return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1468  FalseCount);
1469  }
1470  }
1471 
1472  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1473  // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1474  llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1475  llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1476 
1477  ConditionalEvaluation cond(*this);
1478  EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1479  getProfileCount(CondOp));
1480 
1481  // When computing PGO branch weights, we only know the overall count for
1482  // the true block. This code is essentially doing tail duplication of the
1483  // naive code-gen, introducing new edges for which counts are not
1484  // available. Divide the counts proportionally between the LHS and RHS of
1485  // the conditional operator.
1486  uint64_t LHSScaledTrueCount = 0;
1487  if (TrueCount) {
1488  double LHSRatio =
1489  getProfileCount(CondOp) / (double)getCurrentProfileCount();
1490  LHSScaledTrueCount = TrueCount * LHSRatio;
1491  }
1492 
1493  cond.begin(*this);
1494  EmitBlock(LHSBlock);
1495  incrementProfileCounter(CondOp);
1496  {
1497  ApplyDebugLocation DL(*this, Cond);
1498  EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1499  LHSScaledTrueCount);
1500  }
1501  cond.end(*this);
1502 
1503  cond.begin(*this);
1504  EmitBlock(RHSBlock);
1505  EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1506  TrueCount - LHSScaledTrueCount);
1507  cond.end(*this);
1508 
1509  return;
1510  }
1511 
1512  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1513  // Conditional operator handling can give us a throw expression as a
1514  // condition for a case like:
1515  // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1516  // Fold this to:
1517  // br(c, throw x, br(y, t, f))
1518  EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1519  return;
1520  }
1521 
1522  // If the branch has a condition wrapped by __builtin_unpredictable,
1523  // create metadata that specifies that the branch is unpredictable.
1524  // Don't bother if not optimizing because that metadata would not be used.
1525  llvm::MDNode *Unpredictable = nullptr;
1526  auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1527  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1528  auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1529  if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1530  llvm::MDBuilder MDHelper(getLLVMContext());
1531  Unpredictable = MDHelper.createUnpredictable();
1532  }
1533  }
1534 
1535  // Create branch weights based on the number of times we get here and the
1536  // number of times the condition should be true.
1537  uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1538  llvm::MDNode *Weights =
1539  createProfileWeights(TrueCount, CurrentCount - TrueCount);
1540 
1541  // Emit the code with the fully general case.
1542  llvm::Value *CondV;
1543  {
1544  ApplyDebugLocation DL(*this, Cond);
1545  CondV = EvaluateExprAsBool(Cond);
1546  }
1547  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1548 }
1549 
1550 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1551 /// specified stmt yet.
1552 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1553  CGM.ErrorUnsupported(S, Type);
1554 }
1555 
1556 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1557 /// variable-length array whose elements have a non-zero bit-pattern.
1558 ///
1559 /// \param baseType the inner-most element type of the array
1560 /// \param src - a char* pointing to the bit-pattern for a single
1561 /// base element of the array
1562 /// \param sizeInChars - the total size of the VLA, in chars
1563 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1564  Address dest, Address src,
1565  llvm::Value *sizeInChars) {
1566  CGBuilderTy &Builder = CGF.Builder;
1567 
1568  CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1569  llvm::Value *baseSizeInChars
1570  = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1571 
1572  Address begin =
1573  Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1574  llvm::Value *end =
1575  Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1576 
1577  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1578  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1579  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1580 
1581  // Make a loop over the VLA. C99 guarantees that the VLA element
1582  // count must be nonzero.
1583  CGF.EmitBlock(loopBB);
1584 
1585  llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1586  cur->addIncoming(begin.getPointer(), originBB);
1587 
1588  CharUnits curAlign =
1589  dest.getAlignment().alignmentOfArrayElement(baseSize);
1590 
1591  // memcpy the individual element bit-pattern.
1592  Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1593  /*volatile*/ false);
1594 
1595  // Go to the next element.
1596  llvm::Value *next =
1597  Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1598 
1599  // Leave if that's the end of the VLA.
1600  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1601  Builder.CreateCondBr(done, contBB, loopBB);
1602  cur->addIncoming(next, loopBB);
1603 
1604  CGF.EmitBlock(contBB);
1605 }
1606 
1607 void
1609  // Ignore empty classes in C++.
1610  if (getLangOpts().CPlusPlus) {
1611  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1612  if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1613  return;
1614  }
1615  }
1616 
1617  // Cast the dest ptr to the appropriate i8 pointer type.
1618  if (DestPtr.getElementType() != Int8Ty)
1619  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1620 
1621  // Get size and alignment info for this aggregate.
1623 
1624  llvm::Value *SizeVal;
1625  const VariableArrayType *vla;
1626 
1627  // Don't bother emitting a zero-byte memset.
1628  if (size.isZero()) {
1629  // But note that getTypeInfo returns 0 for a VLA.
1630  if (const VariableArrayType *vlaType =
1631  dyn_cast_or_null<VariableArrayType>(
1632  getContext().getAsArrayType(Ty))) {
1633  auto VlaSize = getVLASize(vlaType);
1634  SizeVal = VlaSize.NumElts;
1635  CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1636  if (!eltSize.isOne())
1637  SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1638  vla = vlaType;
1639  } else {
1640  return;
1641  }
1642  } else {
1643  SizeVal = CGM.getSize(size);
1644  vla = nullptr;
1645  }
1646 
1647  // If the type contains a pointer to data member we can't memset it to zero.
1648  // Instead, create a null constant and copy it to the destination.
1649  // TODO: there are other patterns besides zero that we can usefully memset,
1650  // like -1, which happens to be the pattern used by member-pointers.
1651  if (!CGM.getTypes().isZeroInitializable(Ty)) {
1652  // For a VLA, emit a single element, then splat that over the VLA.
1653  if (vla) Ty = getContext().getBaseElementType(vla);
1654 
1655  llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1656 
1657  llvm::GlobalVariable *NullVariable =
1658  new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1659  /*isConstant=*/true,
1660  llvm::GlobalVariable::PrivateLinkage,
1661  NullConstant, Twine());
1662  CharUnits NullAlign = DestPtr.getAlignment();
1663  NullVariable->setAlignment(NullAlign.getQuantity());
1664  Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1665  NullAlign);
1666 
1667  if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1668 
1669  // Get and call the appropriate llvm.memcpy overload.
1670  Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1671  return;
1672  }
1673 
1674  // Otherwise, just memset the whole thing to zero. This is legal
1675  // because in LLVM, all default initializers (other than the ones we just
1676  // handled above) are guaranteed to have a bit pattern of all zeros.
1677  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1678 }
1679 
1680 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1681  // Make sure that there is a block for the indirect goto.
1682  if (!IndirectBranch)
1684 
1685  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1686 
1687  // Make sure the indirect branch includes all of the address-taken blocks.
1688  IndirectBranch->addDestination(BB);
1689  return llvm::BlockAddress::get(CurFn, BB);
1690 }
1691 
1693  // If we already made the indirect branch for indirect goto, return its block.
1694  if (IndirectBranch) return IndirectBranch->getParent();
1695 
1696  CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1697 
1698  // Create the PHI node that indirect gotos will add entries to.
1699  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1700  "indirect.goto.dest");
1701 
1702  // Create the indirect branch instruction.
1703  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1704  return IndirectBranch->getParent();
1705 }
1706 
1707 /// Computes the length of an array in elements, as well as the base
1708 /// element type and a properly-typed first element pointer.
1710  QualType &baseType,
1711  Address &addr) {
1712  const ArrayType *arrayType = origArrayType;
1713 
1714  // If it's a VLA, we have to load the stored size. Note that
1715  // this is the size of the VLA in bytes, not its size in elements.
1716  llvm::Value *numVLAElements = nullptr;
1717  if (isa<VariableArrayType>(arrayType)) {
1718  numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1719 
1720  // Walk into all VLAs. This doesn't require changes to addr,
1721  // which has type T* where T is the first non-VLA element type.
1722  do {
1723  QualType elementType = arrayType->getElementType();
1724  arrayType = getContext().getAsArrayType(elementType);
1725 
1726  // If we only have VLA components, 'addr' requires no adjustment.
1727  if (!arrayType) {
1728  baseType = elementType;
1729  return numVLAElements;
1730  }
1731  } while (isa<VariableArrayType>(arrayType));
1732 
1733  // We get out here only if we find a constant array type
1734  // inside the VLA.
1735  }
1736 
1737  // We have some number of constant-length arrays, so addr should
1738  // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1739  // down to the first element of addr.
1740  SmallVector<llvm::Value*, 8> gepIndices;
1741 
1742  // GEP down to the array type.
1743  llvm::ConstantInt *zero = Builder.getInt32(0);
1744  gepIndices.push_back(zero);
1745 
1746  uint64_t countFromCLAs = 1;
1747  QualType eltType;
1748 
1749  llvm::ArrayType *llvmArrayType =
1750  dyn_cast<llvm::ArrayType>(addr.getElementType());
1751  while (llvmArrayType) {
1752  assert(isa<ConstantArrayType>(arrayType));
1753  assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1754  == llvmArrayType->getNumElements());
1755 
1756  gepIndices.push_back(zero);
1757  countFromCLAs *= llvmArrayType->getNumElements();
1758  eltType = arrayType->getElementType();
1759 
1760  llvmArrayType =
1761  dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1762  arrayType = getContext().getAsArrayType(arrayType->getElementType());
1763  assert((!llvmArrayType || arrayType) &&
1764  "LLVM and Clang types are out-of-synch");
1765  }
1766 
1767  if (arrayType) {
1768  // From this point onwards, the Clang array type has been emitted
1769  // as some other type (probably a packed struct). Compute the array
1770  // size, and just emit the 'begin' expression as a bitcast.
1771  while (arrayType) {
1772  countFromCLAs *=
1773  cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1774  eltType = arrayType->getElementType();
1775  arrayType = getContext().getAsArrayType(eltType);
1776  }
1777 
1778  llvm::Type *baseType = ConvertType(eltType);
1779  addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1780  } else {
1781  // Create the actual GEP.
1782  addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1783  gepIndices, "array.begin"),
1784  addr.getAlignment());
1785  }
1786 
1787  baseType = eltType;
1788 
1789  llvm::Value *numElements
1790  = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1791 
1792  // If we had any VLA dimensions, factor them in.
1793  if (numVLAElements)
1794  numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1795 
1796  return numElements;
1797 }
1798 
1801  assert(vla && "type was not a variable array type!");
1802  return getVLASize(vla);
1803 }
1804 
1807  // The number of elements so far; always size_t.
1808  llvm::Value *numElements = nullptr;
1809 
1810  QualType elementType;
1811  do {
1812  elementType = type->getElementType();
1813  llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1814  assert(vlaSize && "no size for VLA!");
1815  assert(vlaSize->getType() == SizeTy);
1816 
1817  if (!numElements) {
1818  numElements = vlaSize;
1819  } else {
1820  // It's undefined behavior if this wraps around, so mark it that way.
1821  // FIXME: Teach -fsanitize=undefined to trap this.
1822  numElements = Builder.CreateNUWMul(numElements, vlaSize);
1823  }
1824  } while ((type = getContext().getAsVariableArrayType(elementType)));
1825 
1826  return { numElements, elementType };
1827 }
1828 
1832  assert(vla && "type was not a variable array type!");
1833  return getVLAElements1D(vla);
1834 }
1835 
1838  llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1839  assert(VlaSize && "no size for VLA!");
1840  assert(VlaSize->getType() == SizeTy);
1841  return { VlaSize, Vla->getElementType() };
1842 }
1843 
1845  assert(type->isVariablyModifiedType() &&
1846  "Must pass variably modified type to EmitVLASizes!");
1847 
1849 
1850  // We're going to walk down into the type and look for VLA
1851  // expressions.
1852  do {
1853  assert(type->isVariablyModifiedType());
1854 
1855  const Type *ty = type.getTypePtr();
1856  switch (ty->getTypeClass()) {
1857 
1858 #define TYPE(Class, Base)
1859 #define ABSTRACT_TYPE(Class, Base)
1860 #define NON_CANONICAL_TYPE(Class, Base)
1861 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1862 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1863 #include "clang/AST/TypeNodes.def"
1864  llvm_unreachable("unexpected dependent type!");
1865 
1866  // These types are never variably-modified.
1867  case Type::Builtin:
1868  case Type::Complex:
1869  case Type::Vector:
1870  case Type::ExtVector:
1871  case Type::Record:
1872  case Type::Enum:
1873  case Type::Elaborated:
1874  case Type::TemplateSpecialization:
1875  case Type::ObjCTypeParam:
1876  case Type::ObjCObject:
1877  case Type::ObjCInterface:
1878  case Type::ObjCObjectPointer:
1879  llvm_unreachable("type class is never variably-modified!");
1880 
1881  case Type::Adjusted:
1882  type = cast<AdjustedType>(ty)->getAdjustedType();
1883  break;
1884 
1885  case Type::Decayed:
1886  type = cast<DecayedType>(ty)->getPointeeType();
1887  break;
1888 
1889  case Type::Pointer:
1890  type = cast<PointerType>(ty)->getPointeeType();
1891  break;
1892 
1893  case Type::BlockPointer:
1894  type = cast<BlockPointerType>(ty)->getPointeeType();
1895  break;
1896 
1897  case Type::LValueReference:
1898  case Type::RValueReference:
1899  type = cast<ReferenceType>(ty)->getPointeeType();
1900  break;
1901 
1902  case Type::MemberPointer:
1903  type = cast<MemberPointerType>(ty)->getPointeeType();
1904  break;
1905 
1906  case Type::ConstantArray:
1907  case Type::IncompleteArray:
1908  // Losing element qualification here is fine.
1909  type = cast<ArrayType>(ty)->getElementType();
1910  break;
1911 
1912  case Type::VariableArray: {
1913  // Losing element qualification here is fine.
1914  const VariableArrayType *vat = cast<VariableArrayType>(ty);
1915 
1916  // Unknown size indication requires no size computation.
1917  // Otherwise, evaluate and record it.
1918  if (const Expr *size = vat->getSizeExpr()) {
1919  // It's possible that we might have emitted this already,
1920  // e.g. with a typedef and a pointer to it.
1921  llvm::Value *&entry = VLASizeMap[size];
1922  if (!entry) {
1923  llvm::Value *Size = EmitScalarExpr(size);
1924 
1925  // C11 6.7.6.2p5:
1926  // If the size is an expression that is not an integer constant
1927  // expression [...] each time it is evaluated it shall have a value
1928  // greater than zero.
1929  if (SanOpts.has(SanitizerKind::VLABound) &&
1930  size->getType()->isSignedIntegerType()) {
1931  SanitizerScope SanScope(this);
1932  llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1933  llvm::Constant *StaticArgs[] = {
1934  EmitCheckSourceLocation(size->getBeginLoc()),
1935  EmitCheckTypeDescriptor(size->getType())};
1936  EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
1937  SanitizerKind::VLABound),
1938  SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
1939  }
1940 
1941  // Always zexting here would be wrong if it weren't
1942  // undefined behavior to have a negative bound.
1943  entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1944  }
1945  }
1946  type = vat->getElementType();
1947  break;
1948  }
1949 
1950  case Type::FunctionProto:
1951  case Type::FunctionNoProto:
1952  type = cast<FunctionType>(ty)->getReturnType();
1953  break;
1954 
1955  case Type::Paren:
1956  case Type::TypeOf:
1957  case Type::UnaryTransform:
1958  case Type::Attributed:
1959  case Type::SubstTemplateTypeParm:
1960  case Type::PackExpansion:
1961  case Type::MacroQualified:
1962  // Keep walking after single level desugaring.
1963  type = type.getSingleStepDesugaredType(getContext());
1964  break;
1965 
1966  case Type::Typedef:
1967  case Type::Decltype:
1968  case Type::Auto:
1969  case Type::DeducedTemplateSpecialization:
1970  // Stop walking: nothing to do.
1971  return;
1972 
1973  case Type::TypeOfExpr:
1974  // Stop walking: emit typeof expression.
1975  EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1976  return;
1977 
1978  case Type::Atomic:
1979  type = cast<AtomicType>(ty)->getValueType();
1980  break;
1981 
1982  case Type::Pipe:
1983  type = cast<PipeType>(ty)->getElementType();
1984  break;
1985  }
1986  } while (type->isVariablyModifiedType());
1987 }
1988 
1990  if (getContext().getBuiltinVaListType()->isArrayType())
1991  return EmitPointerWithAlignment(E);
1992  return EmitLValue(E).getAddress();
1993 }
1994 
1996  return EmitLValue(E).getAddress();
1997 }
1998 
2000  const APValue &Init) {
2001  assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2002  if (CGDebugInfo *Dbg = getDebugInfo())
2003  if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
2004  Dbg->EmitGlobalVariable(E->getDecl(), Init);
2005 }
2006 
2009  // At the moment, the only aggressive peephole we do in IR gen
2010  // is trunc(zext) folding, but if we add more, we can easily
2011  // extend this protection.
2012 
2013  if (!rvalue.isScalar()) return PeepholeProtection();
2014  llvm::Value *value = rvalue.getScalarVal();
2015  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2016 
2017  // Just make an extra bitcast.
2018  assert(HaveInsertPoint());
2019  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2020  Builder.GetInsertBlock());
2021 
2022  PeepholeProtection protection;
2023  protection.Inst = inst;
2024  return protection;
2025 }
2026 
2028  if (!protection.Inst) return;
2029 
2030  // In theory, we could try to duplicate the peepholes now, but whatever.
2031  protection.Inst->eraseFromParent();
2032 }
2033 
2035  QualType Ty, SourceLocation Loc,
2036  SourceLocation AssumptionLoc,
2037  llvm::Value *Alignment,
2038  llvm::Value *OffsetValue) {
2039  llvm::Value *TheCheck;
2040  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2041  CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2042  if (SanOpts.has(SanitizerKind::Alignment)) {
2043  EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2044  OffsetValue, TheCheck, Assumption);
2045  }
2046 }
2047 
2049  QualType Ty, SourceLocation Loc,
2050  SourceLocation AssumptionLoc,
2051  unsigned Alignment,
2052  llvm::Value *OffsetValue) {
2053  llvm::Value *TheCheck;
2054  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2055  CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2056  if (SanOpts.has(SanitizerKind::Alignment)) {
2057  llvm::Value *AlignmentVal = llvm::ConstantInt::get(IntPtrTy, Alignment);
2058  EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, AlignmentVal,
2059  OffsetValue, TheCheck, Assumption);
2060  }
2061 }
2062 
2064  const Expr *E,
2065  SourceLocation AssumptionLoc,
2066  unsigned Alignment,
2067  llvm::Value *OffsetValue) {
2068  if (auto *CE = dyn_cast<CastExpr>(E))
2069  E = CE->getSubExprAsWritten();
2070  QualType Ty = E->getType();
2071  SourceLocation Loc = E->getExprLoc();
2072 
2073  EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2074  OffsetValue);
2075 }
2076 
2077 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2078  llvm::Value *AnnotatedVal,
2079  StringRef AnnotationStr,
2080  SourceLocation Location) {
2081  llvm::Value *Args[4] = {
2082  AnnotatedVal,
2085  CGM.EmitAnnotationLineNo(Location)
2086  };
2087  return Builder.CreateCall(AnnotationFn, Args);
2088 }
2089 
2091  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2092  // FIXME We create a new bitcast for every annotation because that's what
2093  // llvm-gcc was doing.
2094  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2095  EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2096  Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2097  I->getAnnotation(), D->getLocation());
2098 }
2099 
2101  Address Addr) {
2102  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2103  llvm::Value *V = Addr.getPointer();
2104  llvm::Type *VTy = V->getType();
2105  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2106  CGM.Int8PtrTy);
2107 
2108  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2109  // FIXME Always emit the cast inst so we can differentiate between
2110  // annotation on the first field of a struct and annotation on the struct
2111  // itself.
2112  if (VTy != CGM.Int8PtrTy)
2114  V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2115  V = Builder.CreateBitCast(V, VTy);
2116  }
2117 
2118  return Address(V, Addr.getAlignment());
2119 }
2120 
2122 
2124  : CGF(CGF) {
2125  assert(!CGF->IsSanitizerScope);
2126  CGF->IsSanitizerScope = true;
2127 }
2128 
2130  CGF->IsSanitizerScope = false;
2131 }
2132 
2133 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2134  const llvm::Twine &Name,
2135  llvm::BasicBlock *BB,
2136  llvm::BasicBlock::iterator InsertPt) const {
2138  if (IsSanitizerScope)
2140 }
2141 
2143  llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2144  llvm::BasicBlock::iterator InsertPt) const {
2145  llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2146  if (CGF)
2147  CGF->InsertHelper(I, Name, BB, InsertPt);
2148 }
2149 
2150 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2151  CodeGenModule &CGM, const FunctionDecl *FD,
2152  std::string &FirstMissing) {
2153  // If there aren't any required features listed then go ahead and return.
2154  if (ReqFeatures.empty())
2155  return false;
2156 
2157  // Now build up the set of caller features and verify that all the required
2158  // features are there.
2159  llvm::StringMap<bool> CallerFeatureMap;
2160  CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
2161 
2162  // If we have at least one of the features in the feature list return
2163  // true, otherwise return false.
2164  return std::all_of(
2165  ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2166  SmallVector<StringRef, 1> OrFeatures;
2167  Feature.split(OrFeatures, '|');
2168  return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2169  if (!CallerFeatureMap.lookup(Feature)) {
2170  FirstMissing = Feature.str();
2171  return false;
2172  }
2173  return true;
2174  });
2175  });
2176 }
2177 
2178 // Emits an error if we don't have a valid set of target features for the
2179 // called function.
2181  const FunctionDecl *TargetDecl) {
2182  return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2183 }
2184 
2185 // Emits an error if we don't have a valid set of target features for the
2186 // called function.
2188  const FunctionDecl *TargetDecl) {
2189  // Early exit if this is an indirect call.
2190  if (!TargetDecl)
2191  return;
2192 
2193  // Get the current enclosing function if it exists. If it doesn't
2194  // we can't check the target features anyhow.
2195  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
2196  if (!FD)
2197  return;
2198 
2199  // Grab the required features for the call. For a builtin this is listed in
2200  // the td file with the default cpu, for an always_inline function this is any
2201  // listed cpu and any listed features.
2202  unsigned BuiltinID = TargetDecl->getBuiltinID();
2203  std::string MissingFeature;
2204  if (BuiltinID) {
2205  SmallVector<StringRef, 1> ReqFeatures;
2206  const char *FeatureList =
2208  // Return if the builtin doesn't have any required features.
2209  if (!FeatureList || StringRef(FeatureList) == "")
2210  return;
2211  StringRef(FeatureList).split(ReqFeatures, ',');
2212  if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2213  CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2214  << TargetDecl->getDeclName()
2216 
2217  } else if (TargetDecl->hasAttr<TargetAttr>() ||
2218  TargetDecl->hasAttr<CPUSpecificAttr>()) {
2219  // Get the required features for the callee.
2220 
2221  const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2222  TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2223 
2224  SmallVector<StringRef, 1> ReqFeatures;
2225  llvm::StringMap<bool> CalleeFeatureMap;
2226  CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2227 
2228  for (const auto &F : ParsedAttr.Features) {
2229  if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2230  ReqFeatures.push_back(StringRef(F).substr(1));
2231  }
2232 
2233  for (const auto &F : CalleeFeatureMap) {
2234  // Only positive features are "required".
2235  if (F.getValue())
2236  ReqFeatures.push_back(F.getKey());
2237  }
2238  if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2239  CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2240  << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2241  }
2242 }
2243 
2244 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2245  if (!CGM.getCodeGenOpts().SanitizeStats)
2246  return;
2247 
2248  llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2249  IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2250  CGM.getSanStats().create(IRB, SSK);
2251 }
2252 
2253 llvm::Value *
2254 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2255  llvm::Value *Condition = nullptr;
2256 
2257  if (!RO.Conditions.Architecture.empty())
2258  Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2259 
2260  if (!RO.Conditions.Features.empty()) {
2261  llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2262  Condition =
2263  Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2264  }
2265  return Condition;
2266 }
2267 
2269  llvm::Function *Resolver,
2271  llvm::Function *FuncToReturn,
2272  bool SupportsIFunc) {
2273  if (SupportsIFunc) {
2274  Builder.CreateRet(FuncToReturn);
2275  return;
2276  }
2277 
2279  llvm::for_each(Resolver->args(),
2280  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2281 
2282  llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2283  Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2284 
2285  if (Resolver->getReturnType()->isVoidTy())
2286  Builder.CreateRetVoid();
2287  else
2288  Builder.CreateRet(Result);
2289 }
2290 
2292  llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2293  assert((getContext().getTargetInfo().getTriple().getArch() ==
2294  llvm::Triple::x86 ||
2295  getContext().getTargetInfo().getTriple().getArch() ==
2296  llvm::Triple::x86_64) &&
2297  "Only implemented for x86 targets");
2298 
2299  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2300 
2301  // Main function's basic block.
2302  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2303  Builder.SetInsertPoint(CurBlock);
2304  EmitX86CpuInit();
2305 
2306  for (const MultiVersionResolverOption &RO : Options) {
2307  Builder.SetInsertPoint(CurBlock);
2308  llvm::Value *Condition = FormResolverCondition(RO);
2309 
2310  // The 'default' or 'generic' case.
2311  if (!Condition) {
2312  assert(&RO == Options.end() - 1 &&
2313  "Default or Generic case must be last");
2315  SupportsIFunc);
2316  return;
2317  }
2318 
2319  llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2320  CGBuilderTy RetBuilder(*this, RetBlock);
2321  CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2322  SupportsIFunc);
2323  CurBlock = createBasicBlock("resolver_else", Resolver);
2324  Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2325  }
2326 
2327  // If no generic/default, emit an unreachable.
2328  Builder.SetInsertPoint(CurBlock);
2329  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2330  TrapCall->setDoesNotReturn();
2331  TrapCall->setDoesNotThrow();
2332  Builder.CreateUnreachable();
2333  Builder.ClearInsertionPoint();
2334 }
2335 
2336 // Loc - where the diagnostic will point, where in the source code this
2337 // alignment has failed.
2338 // SecondaryLoc - if present (will be present if sufficiently different from
2339 // Loc), the diagnostic will additionally point a "Note:" to this location.
2340 // It should be the location where the __attribute__((assume_aligned))
2341 // was written e.g.
2343  llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2344  SourceLocation SecondaryLoc, llvm::Value *Alignment,
2345  llvm::Value *OffsetValue, llvm::Value *TheCheck,
2346  llvm::Instruction *Assumption) {
2347  assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2348  cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2349  llvm::Intrinsic::getDeclaration(
2350  Builder.GetInsertBlock()->getParent()->getParent(),
2351  llvm::Intrinsic::assume) &&
2352  "Assumption should be a call to llvm.assume().");
2353  assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2354  "Assumption should be the last instruction of the basic block, "
2355  "since the basic block is still being generated.");
2356 
2357  if (!SanOpts.has(SanitizerKind::Alignment))
2358  return;
2359 
2360  // Don't check pointers to volatile data. The behavior here is implementation-
2361  // defined.
2362  if (Ty->getPointeeType().isVolatileQualified())
2363  return;
2364 
2365  // We need to temorairly remove the assumption so we can insert the
2366  // sanitizer check before it, else the check will be dropped by optimizations.
2367  Assumption->removeFromParent();
2368 
2369  {
2370  SanitizerScope SanScope(this);
2371 
2372  if (!OffsetValue)
2373  OffsetValue = Builder.getInt1(0); // no offset.
2374 
2375  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2376  EmitCheckSourceLocation(SecondaryLoc),
2378  llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2379  EmitCheckValue(Alignment),
2380  EmitCheckValue(OffsetValue)};
2381  EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2382  SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2383  }
2384 
2385  // We are now in the (new, empty) "cont" basic block.
2386  // Reintroduce the assumption.
2387  Builder.Insert(Assumption);
2388  // FIXME: Assumption still has it's original basic block as it's Parent.
2389 }
2390 
2392  if (CGDebugInfo *DI = getDebugInfo())
2393  return DI->SourceLocToDebugLoc(Location);
2394 
2395  return llvm::DebugLoc();
2396 }
const llvm::DataLayout & getDataLayout() const
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:178
Defines the clang::ASTContext interface.
Represents a function declaration or definition.
Definition: Decl.h:1748
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
Other implicit parameter.
Definition: Decl.h:1524
no exception specification
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2554
CanQualType VoidPtrTy
Definition: ASTContext.h:1042
A (possibly-)qualified type.
Definition: Type.h:643
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
const CodeGenOptions & getCodeGenOpts() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler...
Definition: CGExpr.cpp:2784
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EncodeAddrForUseInPrologue(llvm::Function *F, llvm::Constant *Addr)
Encode an address into a form suitable for use in a function prologue.
XRayInstrMask Mask
Definition: XRayInstr.h:64
DominatorTree GraphTraits specialization so the DominatorTree can be iterable by generic graph iterat...
Definition: Dominators.h:29
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params)=0
Insert any ABI-specific implicit parameters into the parameter list for a function.
CharUnits getClassPointerAlignment(const CXXRecordDecl *CD)
Returns the assumed alignment of an opaque pointer to the given class.
Definition: CGClass.cpp:36
Stmt - This represents one statement.
Definition: Stmt.h:66
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:3372
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:184
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:505
bool isMain() const
Determines whether this function is "main", which is the entry point into an executable program...
Definition: Decl.cpp:2837
static bool endsWithReturn(const Decl *F)
Determine whether the function F ends with a return stmt.
QualType getThisType() const
Returns the type of the this pointer.
Definition: DeclCXX.cpp:2265
Checking the &#39;this&#39; pointer for a constructor call.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1036
constexpr XRayInstrMask Typed
Definition: XRayInstr.h:40
__v8hi zero
Definition: emmintrin.h:1397
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:88
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD)
Definition: CGClass.cpp:2910
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask >> Checked, SanitizerHandler Check, ArrayRef< llvm::Constant *> StaticArgs, ArrayRef< llvm::Value *> DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition: CGExpr.cpp:2998
The base class of the type hierarchy.
Definition: Type.h:1418
bool ShouldInstrumentFunction()
ShouldInstrumentFunction - Return true if the current function should be instrumented with __cyg_prof...
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1296
bool hasValue() const
Definition: APValue.h:315
bool usesSEHTry() const
Indicates the function uses __try.
Definition: Decl.h:2141
void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo)
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:2829
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
static bool hasRequiredFeatures(const SmallVectorImpl< StringRef > &ReqFeatures, CodeGenModule &CGM, const FunctionDecl *FD, std::string &FirstMissing)
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:378
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:693
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
CGBuilder insert helper.
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc, StringRef Category=StringRef()) const
Imbue XRay attributes to a function, applying the always/never attribute lists in the process...
constexpr XRayInstrMask Function
Definition: XRayInstr.h:38
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2566
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant, or if it does but contains a label, return false.
QualType getElementType() const
Definition: Type.h:2864
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:344
This file provides some common utility functions for processing Lambda related AST Constructs...
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Represents a variable declaration or definition.
Definition: Decl.h:812
QualType getReturnType() const
Definition: Decl.h:2329
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6818
Extra information about a function prototype.
Definition: Type.h:3784
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, const LangOptions &LangOpts)
shouldEmitLifetimeMarkers - Decide whether we need emit the life-time markers.
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:54
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope, by being a (possibly-labelled) DeclStmt.
DiagnosticsEngine & getDiags() const
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified...
Definition: CGExpr.cpp:3295
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
void setCurrentProfileCount(uint64_t Count)
Set the profiler&#39;s current count.
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::Value * getPointer() const
Definition: Address.h:37
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information...
Definition: TargetInfo.h:168
void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn)
Annotate the function with an attribute that disables TSan checking at runtime.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
Defines the Objective-C statement AST node classes.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1044
bool supportsIFunc() const
Identify whether this target supports IFuncs.
Definition: TargetInfo.h:1127
Represents a parameter to a function.
Definition: Decl.h:1564
static void destroyBlockInfos(CGBlockInfo *info)
Destroy a chain of block layouts.
Definition: CGBlocks.cpp:890
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
Definition: CGClass.cpp:1524
long i
Definition: xmmintrin.h:1456
EHScopeStack::stable_iterator PrologueCleanupDepth
PrologueCleanupDepth - The cleanup depth enclosing all the cleanups associated with the parameters...
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition: CGStmt.cpp:505
llvm::DenseMap< const VarDecl *, FieldDecl * > LambdaCaptureFields
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
PeepholeProtection protectFromPeepholes(RValue rvalue)
protectFromPeepholes - Protect a value that we&#39;re intending to store to the side, but which will prob...
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:297
void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption)
One of these records is kept for each identifier that is lexed.
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
CGBlockInfo * FirstBlockInfo
FirstBlockInfo - The head of a singly-linked-list of block layouts.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2790
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, llvm::Function *Resolver, CGBuilderTy &Builder, llvm::Function *FuncToReturn, bool SupportsIFunc)
Address getAddress() const
Definition: CGValue.h:326
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Given that we are currently emitting a lambda, emit an l-value for one of its members.
Definition: CGExpr.cpp:3873
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:158
field_range fields() const
Definition: Decl.h:3815
Represents a member of a struct/union/class.
Definition: Decl.h:2605
SanitizerMask Mask
Bitmask of enabled sanitizers.
Definition: Sanitizers.h:173
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:49
void InitTempAlloca(Address Alloca, llvm::Value *Value)
InitTempAlloca - Provide an initial value for the given alloca which will be observable at all locati...
Definition: CGExpr.cpp:125
void disableSanitizerForInstruction(llvm::Instruction *I)
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2289
Address CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition: CGExpr.cpp:133
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF)=0
Emit the ABI-specific prolog for the function.
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
static bool hasScalarEvaluationKind(QualType T)
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition: Decl.cpp:2808
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:156
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:582
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2236
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:118
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type...
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
child_range children()
Definition: Stmt.cpp:212
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:108
unsigned getInAllocaFieldIndex() const
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3405
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:6197
The l-value was considered opaque, so the alignment was determined from a type, but that type was an ...
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:78
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
bool containsOnlyLifetimeMarkers(stable_iterator Old) const
Definition: CGCleanup.cpp:141
bool isLambda() const
Determine whether this class describes a lambda function object.
Definition: DeclCXX.h:1198
Values of this type can never be null.
Expr * getSizeExpr() const
Definition: Type.h:3008
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:6127
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:181
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
Definition: CodeGenPGO.cpp:759
__v16qu mask
Definition: emmintrin.h:2133
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
bool isInstance() const
Definition: DeclCXX.h:2140
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
bool isAlignmentRequired(const Type *T) const
Determine if the alignment the type has was required using an alignment attribute.
llvm::SanitizerStatReport & getSanStats()
llvm::DebugLoc EmitReturnBlock()
Emit the unified return block, trying to avoid its emission when possible.
bool isLambdaCallOperator(const CXXMethodDecl *MD)
Definition: ASTLambda.h:27
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3077
Address NormalCleanupDest
i32s containing the indexes of the cleanup destinations.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args)=0
Emits a kernel launch stub.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition: CGExpr.cpp:2827
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2925
Checking the &#39;this&#39; pointer for a call to a non-static member function.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:4557
void InsertHelper(llvm::Instruction *I) const
Function called by the CodeGenFunction when an instruction is created.
Definition: CGLoopInfo.cpp:728
bool hasAttr() const
Definition: DeclBase.h:542
ConditionalOperator - The ?: ternary operator.
Definition: Expr.h:3703
CanQualType getReturnType() const
bool isValid() const
Definition: Address.h:35
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:57
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1310
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1636
const TargetCodeGenInfo & getTargetCodeGenInfo()
OverloadedOperatorKind getOverloadedOperator() const
getOverloadedOperator - Which C++ overloaded operator this function represents, if any...
Definition: Decl.cpp:3383
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB)
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
llvm::Value * DecodeAddrUsedInPrologue(llvm::Value *F, llvm::Value *EncodedAddr)
Decode an address used in a function prologue, encoded by EncodeAddrForUseInPrologue.
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition: CGExpr.cpp:118
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns &#39;th...
Definition: CGCXXABI.h:106
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Constant * EmitAnnotationUnit(SourceLocation Loc)
Emit the annotation&#39;s translation unit.
llvm::BasicBlock * EHResumeBlock
EHResumeBlock - Unified block containing a call to llvm.eh.resume.
This represents one expression.
Definition: Expr.h:108
Emit only debug info necessary for generating line number tables (-gline-tables-only).
bool isDefaulted() const
Whether this function is defaulted per C++0x.
Definition: Decl.h:2048
static Address invalid()
Definition: Address.h:34
bool isObjCRetainableType() const
Definition: Type.cpp:4028
#define V(N, I)
Definition: ASTContext.h:2907
static void TryMarkNoThrow(llvm::Function *F)
Tries to mark the given function nounwind based on the non-existence of any throwing calls within it...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements, of a variable length array type, plus that largest non-variably-sized element type.
const char * getRequiredFeatures(unsigned ID) const
Definition: Builtins.h:210
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:62
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
llvm::LLVMContext & getLLVMContext()
llvm::BasicBlock * GetIndirectGotoBlock()
void GenOpenCLArgMetadata(llvm::Function *FN, const FunctionDecl *FD=nullptr, CodeGenFunction *CGF=nullptr)
OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument information in the program executab...
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
Definition: Type.cpp:1875
QualType getType() const
Definition: Expr.h:137
void EmitConstructorBody(FunctionArgList &Args)
EmitConstructorBody - Emits the body of the current constructor.
Definition: CGClass.cpp:815
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
Definition: CGExpr.cpp:652
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:196
llvm::Constant * EmitAnnotationString(StringRef Str)
Emit an annotation string.
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:296
SourceLocation getEnd() const
QualType getFunctionTypeWithExceptionSpec(QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI)
Get a function type and produce the equivalent function type with the specified exception specificati...
An object which temporarily prevents a value from being destroyed by aggressive peephole optimization...
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:2016
QualType getFunctionType(QualType ResultTy, ArrayRef< QualType > Args, const FunctionProtoType::ExtProtoInfo &EPI) const
Return a normal function type with a typed argument list.
Definition: ASTContext.h:1382
ValueDecl * getDecl()
Definition: Expr.h:1217
const LangOptions & getLangOpts() const
ASTContext & getContext() const
virtual void startNewFunction()
Definition: Mangle.h:75
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:264
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:40
The l-value was considered opaque, so the alignment was determined from a type.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list< llvm::Value **> ValuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Definition: CGCleanup.cpp:417
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:161
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
QualType getCanonicalType() const
Definition: Type.h:6166
Encodes a location in the source.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
QualType getSingleStepDesugaredType(const ASTContext &Context) const
Return the specified type with one level of "sugar" removed from the type.
Definition: Type.h:956
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:163
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2109
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:295
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
QualType getElementType() const
Definition: Type.h:3220
const Decl * getDecl() const
Definition: GlobalDecl.h:76
Represents the declaration of a label.
Definition: Decl.h:468
ParsedAttr - Represents a syntactic attribute.
Definition: ParsedAttr.h:116
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:699
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const
This forwards to CodeGenFunction::InsertHelper.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
TargetAttr::ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD)
Parses the target attributes passed in, and returns only the ones that are valid feature names...
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:2114
void EmitStmt(const Stmt *S, ArrayRef< const Attr *> Attrs=None)
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:44
SanitizerSet SanOpts
Sanitizers enabled for this function.
constexpr XRayInstrMask Custom
Definition: XRayInstr.h:39
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
An aligned address.
Definition: Address.h:24
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
Address ReturnValuePointer
ReturnValuePointer - The temporary alloca to hold a pointer to sret.
XRayInstrSet XRayInstrumentationBundle
Set of XRay instrumentation kinds to emit.
TypeClass getTypeClass() const
Definition: Type.h:1824
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:96
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::Constant * EmitAnnotationLineNo(SourceLocation L)
Emit the annotation line number.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
bool hasImplicitReturnZero() const
Whether falling off this function implicitly returns null/zero.
Definition: Decl.h:2066
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression, because a __builtin_ms_va_list is a pointer to a char.
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, GlobalDecl GD)
const CGFunctionInfo * CurFnInfo
void EmitStartEHSpec(const Decl *D)
EmitStartEHSpec - Emit the start of the exception spec.
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:215
This is an IRBuilder insertion helper that forwards to CodeGenFunction::InsertHelper, which adds necessary metadata to instructions.
Definition: CGBuilder.h:25
Address EmitVAListRef(const Expr *E)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:358
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn&#39;t support the specified stmt yet.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, Address dest, Address src, llvm::Value *sizeInChars)
emitNonZeroVLAInit - Emit the "zero" initialization of a variable-length array whose elements have a ...
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location)
Emit an annotation call (intrinsic).
Dataflow Directional Tag Classes.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:580
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx)
void Init(const Stmt *Body)
Clear the object and pre-process for the given statement, usually function body statement.
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:90
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
FunctionDecl * getTemplateInstantiationPattern() const
Retrieve the function declaration from which this function could be instantiated, if it is an instant...
Definition: Decl.cpp:3494
void EmitFunctionBody(const Stmt *Body)
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:2237
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks, lambdas, etc.
Definition: DeclBase.cpp:990
bool isMSVCRTEntryPoint() const
Determines whether this function is a MSVCRT user defined entry point.
Definition: Decl.cpp:2845
void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params)
Build a parameter variable suitable for &#39;this&#39;.
Definition: CGCXXABI.cpp:121
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
llvm::SmallVector< char, 256 > LifetimeExtendedCleanupStack
bool has(XRayInstrMask K) const
Definition: XRayInstr.h:46
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:107
llvm::Module & getModule() const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
JumpDest ReturnBlock
ReturnBlock - Unified return block.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4423
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
CodeGenTypes & getTypes() const
CharUnits getIndirectAlign() const
T * getAttr() const
Definition: DeclBase.h:538
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:51
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
ExtVectorType - Extended vector type.
Definition: Type.h:3304
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:450
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Optional< NullabilityKind > getNullability(const ASTContext &context) const
Determine the nullability of the given type.
Definition: Type.cpp:3799
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat]...
Definition: APValue.h:76
void getCaptureFields(llvm::DenseMap< const VarDecl *, FieldDecl *> &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
Definition: DeclCXX.cpp:1416
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:2062
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:524
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer...
void unprotectFromPeepholes(PeepholeProtection protection)
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn&#39;t support the specified stmt yet...
bool hasUnaligned() const
Definition: Type.h:293
Represents a C++ struct/union/class.
Definition: DeclCXX.h:300
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
Definition: CGStmt.cpp:470
bool isVoidType() const
Definition: Type.h:6610
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2219
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6154
static llvm::Constant * getPrologueSignature(CodeGenModule &CGM, const FunctionDecl *FD)
Return the UBSan prologue signature for FD if one is available.
LambdaCaptureDefault getLambdaCaptureDefault() const
Definition: DeclCXX.h:1230
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1243
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:571
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1772
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:152
Defines the clang::TargetInfo interface.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2516
void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef< MultiVersionResolverOption > Options)
CGCXXABI & getCXXABI() const
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2444
__DEVICE__ int max(int __a, int __b)
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1141
static bool shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, const ASTContext &Context)
void EmitDestructorBody(FunctionArgList &Args)
EmitDestructorBody - Emits the body of the current destructor.
Definition: CGClass.cpp:1414
bool isPointerType() const
Definition: Type.h:6351
This structure provides a set of types that are commonly used during IR emission. ...
void EmitEndEHSpec(const Decl *D)
EmitEndEHSpec - Emit the end of the exception spec.
struct clang::CodeGen::CodeGenFunction::MultiVersionResolverOption::Conds Conditions
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:2865
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition: CGStmt.cpp:380
void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
QualType getType() const
Definition: Decl.h:647
A trivial tuple used to represent a source range.
LValue - This represents an lvalue references.
Definition: CGValue.h:166
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.cpp:1522
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V)
Emit local annotations for the local variable V, declared by D.
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:2988
SanitizerMetadata * getSanitizerMetadata()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
APSInt & getInt()
Definition: APValue.h:336
const LangOptions & getLangOpts() const
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:163
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it&#39;s a VLA, and drill down to the base elem...
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:366
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
SourceLocation getBegin() const
bool isZeroInitializable(QualType T)
IsZeroInitializable - Return whether a type can be zero-initialized (in the C++ sense) with an LLVM z...
Defines enum values for all the target-independent builtin functions.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool isScalar() const
Definition: CGValue.h:51
Attr - This represents one attribute.
Definition: Attr.h:43
SourceLocation getLocation() const
Definition: DeclBase.h:429
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2937
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.