clang  10.0.0svn
CodeGenModule.cpp
Go to the documentation of this file.
1 //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-module state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenModule.h"
14 #include "CGBlocks.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGCall.h"
18 #include "CGDebugInfo.h"
19 #include "CGObjCRuntime.h"
20 #include "CGOpenCLRuntime.h"
21 #include "CGOpenMPRuntime.h"
22 #include "CGOpenMPRuntimeNVPTX.h"
23 #include "CodeGenFunction.h"
24 #include "CodeGenPGO.h"
25 #include "ConstantEmitter.h"
26 #include "CoverageMappingGen.h"
27 #include "TargetInfo.h"
28 #include "clang/AST/ASTContext.h"
29 #include "clang/AST/CharUnits.h"
30 #include "clang/AST/DeclCXX.h"
31 #include "clang/AST/DeclObjC.h"
32 #include "clang/AST/DeclTemplate.h"
33 #include "clang/AST/Mangle.h"
34 #include "clang/AST/RecordLayout.h"
36 #include "clang/AST/StmtVisitor.h"
37 #include "clang/Basic/Builtins.h"
38 #include "clang/Basic/CharInfo.h"
40 #include "clang/Basic/Diagnostic.h"
41 #include "clang/Basic/Module.h"
43 #include "clang/Basic/TargetInfo.h"
44 #include "clang/Basic/Version.h"
47 #include "llvm/ADT/StringSwitch.h"
48 #include "llvm/ADT/Triple.h"
49 #include "llvm/Analysis/TargetLibraryInfo.h"
50 #include "llvm/IR/CallingConv.h"
51 #include "llvm/IR/DataLayout.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/Module.h"
55 #include "llvm/IR/ProfileSummary.h"
56 #include "llvm/ProfileData/InstrProfReader.h"
57 #include "llvm/Support/CodeGen.h"
58 #include "llvm/Support/ConvertUTF.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/MD5.h"
61 #include "llvm/Support/TimeProfiler.h"
62 
63 using namespace clang;
64 using namespace CodeGen;
65 
66 static llvm::cl::opt<bool> LimitedCoverage(
67  "limited-coverage-experimental", llvm::cl::ZeroOrMore, llvm::cl::Hidden,
68  llvm::cl::desc("Emit limited coverage mapping information (experimental)"),
69  llvm::cl::init(false));
70 
71 static const char AnnotationSection[] = "llvm.metadata";
72 
74  switch (CGM.getTarget().getCXXABI().getKind()) {
77  case TargetCXXABI::iOS:
83  return CreateItaniumCXXABI(CGM);
85  return CreateMicrosoftCXXABI(CGM);
86  }
87 
88  llvm_unreachable("invalid C++ ABI kind");
89 }
90 
91 CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
92  const PreprocessorOptions &PPO,
93  const CodeGenOptions &CGO, llvm::Module &M,
94  DiagnosticsEngine &diags,
95  CoverageSourceInfo *CoverageInfo)
96  : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
97  PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
98  Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
99  VMContext(M.getContext()), Types(*this), VTables(*this),
100  SanitizerMD(new SanitizerMetadata(*this)) {
101 
102  // Initialize the type cache.
103  llvm::LLVMContext &LLVMContext = M.getContext();
104  VoidTy = llvm::Type::getVoidTy(LLVMContext);
105  Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
106  Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
107  Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
108  Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
109  HalfTy = llvm::Type::getHalfTy(LLVMContext);
110  FloatTy = llvm::Type::getFloatTy(LLVMContext);
111  DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
114  C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
118  C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
119  IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
120  IntPtrTy = llvm::IntegerType::get(LLVMContext,
122  Int8PtrTy = Int8Ty->getPointerTo(0);
123  Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
124  AllocaInt8PtrTy = Int8Ty->getPointerTo(
125  M.getDataLayout().getAllocaAddrSpace());
127 
129 
130  if (LangOpts.ObjC)
131  createObjCRuntime();
132  if (LangOpts.OpenCL)
133  createOpenCLRuntime();
134  if (LangOpts.OpenMP)
135  createOpenMPRuntime();
136  if (LangOpts.CUDA)
137  createCUDARuntime();
138 
139  // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
140  if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
141  (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
142  TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
143  getCXXABI().getMangleContext()));
144 
145  // If debug info or coverage generation is enabled, create the CGDebugInfo
146  // object.
147  if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
148  CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
149  DebugInfo.reset(new CGDebugInfo(*this));
150 
151  Block.GlobalUniqueCount = 0;
152 
153  if (C.getLangOpts().ObjC)
154  ObjCData.reset(new ObjCEntrypoints());
155 
156  if (CodeGenOpts.hasProfileClangUse()) {
157  auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
158  CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
159  if (auto E = ReaderOrErr.takeError()) {
160  unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
161  "Could not read profile %0: %1");
162  llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
163  getDiags().Report(DiagID) << CodeGenOpts.ProfileInstrumentUsePath
164  << EI.message();
165  });
166  } else
167  PGOReader = std::move(ReaderOrErr.get());
168  }
169 
170  // If coverage mapping generation is enabled, create the
171  // CoverageMappingModuleGen object.
172  if (CodeGenOpts.CoverageMapping)
173  CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
174 }
175 
177 
178 void CodeGenModule::createObjCRuntime() {
179  // This is just isGNUFamily(), but we want to force implementors of
180  // new ABIs to decide how best to do this.
181  switch (LangOpts.ObjCRuntime.getKind()) {
183  case ObjCRuntime::GCC:
184  case ObjCRuntime::ObjFW:
185  ObjCRuntime.reset(CreateGNUObjCRuntime(*this));
186  return;
187 
189  case ObjCRuntime::MacOSX:
190  case ObjCRuntime::iOS:
192  ObjCRuntime.reset(CreateMacObjCRuntime(*this));
193  return;
194  }
195  llvm_unreachable("bad runtime kind");
196 }
197 
198 void CodeGenModule::createOpenCLRuntime() {
199  OpenCLRuntime.reset(new CGOpenCLRuntime(*this));
200 }
201 
202 void CodeGenModule::createOpenMPRuntime() {
203  // Select a specialized code generation class based on the target, if any.
204  // If it does not exist use the default implementation.
205  switch (getTriple().getArch()) {
206  case llvm::Triple::nvptx:
207  case llvm::Triple::nvptx64:
208  assert(getLangOpts().OpenMPIsDevice &&
209  "OpenMP NVPTX is only prepared to deal with device code.");
210  OpenMPRuntime.reset(new CGOpenMPRuntimeNVPTX(*this));
211  break;
212  default:
213  if (LangOpts.OpenMPSimd)
214  OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
215  else
216  OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
217  break;
218  }
219 }
220 
221 void CodeGenModule::createCUDARuntime() {
222  CUDARuntime.reset(CreateNVCUDARuntime(*this));
223 }
224 
225 void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
226  Replacements[Name] = C;
227 }
228 
229 void CodeGenModule::applyReplacements() {
230  for (auto &I : Replacements) {
231  StringRef MangledName = I.first();
232  llvm::Constant *Replacement = I.second;
233  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
234  if (!Entry)
235  continue;
236  auto *OldF = cast<llvm::Function>(Entry);
237  auto *NewF = dyn_cast<llvm::Function>(Replacement);
238  if (!NewF) {
239  if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
240  NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
241  } else {
242  auto *CE = cast<llvm::ConstantExpr>(Replacement);
243  assert(CE->getOpcode() == llvm::Instruction::BitCast ||
244  CE->getOpcode() == llvm::Instruction::GetElementPtr);
245  NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
246  }
247  }
248 
249  // Replace old with new, but keep the old order.
250  OldF->replaceAllUsesWith(Replacement);
251  if (NewF) {
252  NewF->removeFromParent();
253  OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
254  NewF);
255  }
256  OldF->eraseFromParent();
257  }
258 }
259 
260 void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
261  GlobalValReplacements.push_back(std::make_pair(GV, C));
262 }
263 
264 void CodeGenModule::applyGlobalValReplacements() {
265  for (auto &I : GlobalValReplacements) {
266  llvm::GlobalValue *GV = I.first;
267  llvm::Constant *C = I.second;
268 
269  GV->replaceAllUsesWith(C);
270  GV->eraseFromParent();
271  }
272 }
273 
274 // This is only used in aliases that we created and we know they have a
275 // linear structure.
276 static const llvm::GlobalObject *getAliasedGlobal(
277  const llvm::GlobalIndirectSymbol &GIS) {
278  llvm::SmallPtrSet<const llvm::GlobalIndirectSymbol*, 4> Visited;
279  const llvm::Constant *C = &GIS;
280  for (;;) {
281  C = C->stripPointerCasts();
282  if (auto *GO = dyn_cast<llvm::GlobalObject>(C))
283  return GO;
284  // stripPointerCasts will not walk over weak aliases.
285  auto *GIS2 = dyn_cast<llvm::GlobalIndirectSymbol>(C);
286  if (!GIS2)
287  return nullptr;
288  if (!Visited.insert(GIS2).second)
289  return nullptr;
290  C = GIS2->getIndirectSymbol();
291  }
292 }
293 
294 void CodeGenModule::checkAliases() {
295  // Check if the constructed aliases are well formed. It is really unfortunate
296  // that we have to do this in CodeGen, but we only construct mangled names
297  // and aliases during codegen.
298  bool Error = false;
299  DiagnosticsEngine &Diags = getDiags();
300  for (const GlobalDecl &GD : Aliases) {
301  const auto *D = cast<ValueDecl>(GD.getDecl());
302  SourceLocation Location;
303  bool IsIFunc = D->hasAttr<IFuncAttr>();
304  if (const Attr *A = D->getDefiningAttr())
305  Location = A->getLocation();
306  else
307  llvm_unreachable("Not an alias or ifunc?");
308  StringRef MangledName = getMangledName(GD);
309  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
310  auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
311  const llvm::GlobalValue *GV = getAliasedGlobal(*Alias);
312  if (!GV) {
313  Error = true;
314  Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
315  } else if (GV->isDeclaration()) {
316  Error = true;
317  Diags.Report(Location, diag::err_alias_to_undefined)
318  << IsIFunc << IsIFunc;
319  } else if (IsIFunc) {
320  // Check resolver function type.
321  llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(
322  GV->getType()->getPointerElementType());
323  assert(FTy);
324  if (!FTy->getReturnType()->isPointerTy())
325  Diags.Report(Location, diag::err_ifunc_resolver_return);
326  }
327 
328  llvm::Constant *Aliasee = Alias->getIndirectSymbol();
329  llvm::GlobalValue *AliaseeGV;
330  if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
331  AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
332  else
333  AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
334 
335  if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
336  StringRef AliasSection = SA->getName();
337  if (AliasSection != AliaseeGV->getSection())
338  Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
339  << AliasSection << IsIFunc << IsIFunc;
340  }
341 
342  // We have to handle alias to weak aliases in here. LLVM itself disallows
343  // this since the object semantics would not match the IL one. For
344  // compatibility with gcc we implement it by just pointing the alias
345  // to its aliasee's aliasee. We also warn, since the user is probably
346  // expecting the link to be weak.
347  if (auto GA = dyn_cast<llvm::GlobalIndirectSymbol>(AliaseeGV)) {
348  if (GA->isInterposable()) {
349  Diags.Report(Location, diag::warn_alias_to_weak_alias)
350  << GV->getName() << GA->getName() << IsIFunc;
351  Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
352  GA->getIndirectSymbol(), Alias->getType());
353  Alias->setIndirectSymbol(Aliasee);
354  }
355  }
356  }
357  if (!Error)
358  return;
359 
360  for (const GlobalDecl &GD : Aliases) {
361  StringRef MangledName = getMangledName(GD);
362  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
363  auto *Alias = dyn_cast<llvm::GlobalIndirectSymbol>(Entry);
364  Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
365  Alias->eraseFromParent();
366  }
367 }
368 
370  DeferredDeclsToEmit.clear();
371  if (OpenMPRuntime)
372  OpenMPRuntime->clear();
373 }
374 
376  StringRef MainFile) {
377  if (!hasDiagnostics())
378  return;
379  if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
380  if (MainFile.empty())
381  MainFile = "<stdin>";
382  Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
383  } else {
384  if (Mismatched > 0)
385  Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched;
386 
387  if (Missing > 0)
388  Diags.Report(diag::warn_profile_data_missing) << Visited << Missing;
389  }
390 }
391 
393  EmitDeferred();
394  EmitVTablesOpportunistically();
395  applyGlobalValReplacements();
396  applyReplacements();
397  checkAliases();
398  emitMultiVersionFunctions();
399  EmitCXXGlobalInitFunc();
400  EmitCXXGlobalDtorFunc();
401  registerGlobalDtorsWithAtExit();
402  EmitCXXThreadLocalInitFunc();
403  if (ObjCRuntime)
404  if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
405  AddGlobalCtor(ObjCInitFunction);
406  if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice &&
407  CUDARuntime) {
408  if (llvm::Function *CudaCtorFunction =
409  CUDARuntime->makeModuleCtorFunction())
410  AddGlobalCtor(CudaCtorFunction);
411  }
412  if (OpenMPRuntime) {
413  if (llvm::Function *OpenMPRequiresDirectiveRegFun =
414  OpenMPRuntime->emitRequiresDirectiveRegFun()) {
415  AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
416  }
417  if (llvm::Function *OpenMPRegistrationFunction =
418  OpenMPRuntime->emitRegistrationFunction()) {
419  auto ComdatKey = OpenMPRegistrationFunction->hasComdat() ?
420  OpenMPRegistrationFunction : nullptr;
421  AddGlobalCtor(OpenMPRegistrationFunction, 0, ComdatKey);
422  }
423  OpenMPRuntime->clear();
424  }
425  if (PGOReader) {
426  getModule().setProfileSummary(
427  PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
428  llvm::ProfileSummary::PSK_Instr);
429  if (PGOStats.hasDiagnostics())
430  PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
431  }
432  EmitCtorList(GlobalCtors, "llvm.global_ctors");
433  EmitCtorList(GlobalDtors, "llvm.global_dtors");
435  EmitStaticExternCAliases();
437  if (CoverageMapping)
438  CoverageMapping->emit();
439  if (CodeGenOpts.SanitizeCfiCrossDso) {
442  }
443  emitAtAvailableLinkGuard();
444  emitLLVMUsed();
445  if (SanStats)
446  SanStats->finish();
447 
448  if (CodeGenOpts.Autolink &&
449  (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
450  EmitModuleLinkOptions();
451  }
452 
453  // On ELF we pass the dependent library specifiers directly to the linker
454  // without manipulating them. This is in contrast to other platforms where
455  // they are mapped to a specific linker option by the compiler. This
456  // difference is a result of the greater variety of ELF linkers and the fact
457  // that ELF linkers tend to handle libraries in a more complicated fashion
458  // than on other platforms. This forces us to defer handling the dependent
459  // libs to the linker.
460  //
461  // CUDA/HIP device and host libraries are different. Currently there is no
462  // way to differentiate dependent libraries for host or device. Existing
463  // usage of #pragma comment(lib, *) is intended for host libraries on
464  // Windows. Therefore emit llvm.dependent-libraries only for host.
465  if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
466  auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
467  for (auto *MD : ELFDependentLibraries)
468  NMD->addOperand(MD);
469  }
470 
471  // Record mregparm value now so it is visible through rest of codegen.
472  if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
473  getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
474  CodeGenOpts.NumRegisterParameters);
475 
476  if (CodeGenOpts.DwarfVersion) {
477  // We actually want the latest version when there are conflicts.
478  // We can change from Warning to Latest if such mode is supported.
479  getModule().addModuleFlag(llvm::Module::Warning, "Dwarf Version",
480  CodeGenOpts.DwarfVersion);
481  }
482  if (CodeGenOpts.EmitCodeView) {
483  // Indicate that we want CodeView in the metadata.
484  getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
485  }
486  if (CodeGenOpts.CodeViewGHash) {
487  getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
488  }
489  if (CodeGenOpts.ControlFlowGuard) {
490  // We want function ID tables for Control Flow Guard.
491  getModule().addModuleFlag(llvm::Module::Warning, "cfguardtable", 1);
492  }
493  if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
494  // We don't support LTO with 2 with different StrictVTablePointers
495  // FIXME: we could support it by stripping all the information introduced
496  // by StrictVTablePointers.
497 
498  getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
499 
500  llvm::Metadata *Ops[2] = {
501  llvm::MDString::get(VMContext, "StrictVTablePointers"),
502  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
503  llvm::Type::getInt32Ty(VMContext), 1))};
504 
505  getModule().addModuleFlag(llvm::Module::Require,
506  "StrictVTablePointersRequirement",
507  llvm::MDNode::get(VMContext, Ops));
508  }
509  if (DebugInfo)
510  // We support a single version in the linked module. The LLVM
511  // parser will drop debug info with a different version number
512  // (and warn about it, too).
513  getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
514  llvm::DEBUG_METADATA_VERSION);
515 
516  // We need to record the widths of enums and wchar_t, so that we can generate
517  // the correct build attributes in the ARM backend. wchar_size is also used by
518  // TargetLibraryInfo.
519  uint64_t WCharWidth =
520  Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
521  getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
522 
523  llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
524  if ( Arch == llvm::Triple::arm
525  || Arch == llvm::Triple::armeb
526  || Arch == llvm::Triple::thumb
527  || Arch == llvm::Triple::thumbeb) {
528  // The minimum width of an enum in bytes
529  uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
530  getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
531  }
532 
533  if (CodeGenOpts.SanitizeCfiCrossDso) {
534  // Indicate that we want cross-DSO control flow integrity checks.
535  getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
536  }
537 
538  if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
539  getModule().addModuleFlag(llvm::Module::Override,
540  "CFI Canonical Jump Tables",
541  CodeGenOpts.SanitizeCfiCanonicalJumpTables);
542  }
543 
544  if (CodeGenOpts.CFProtectionReturn &&
546  // Indicate that we want to instrument return control flow protection.
547  getModule().addModuleFlag(llvm::Module::Override, "cf-protection-return",
548  1);
549  }
550 
551  if (CodeGenOpts.CFProtectionBranch &&
553  // Indicate that we want to instrument branch control flow protection.
554  getModule().addModuleFlag(llvm::Module::Override, "cf-protection-branch",
555  1);
556  }
557 
558  if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
559  // Indicate whether __nvvm_reflect should be configured to flush denormal
560  // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
561  // property.)
562  getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
563  CodeGenOpts.FlushDenorm ? 1 : 0);
564  }
565 
566  // Emit OpenCL specific module metadata: OpenCL/SPIR version.
567  if (LangOpts.OpenCL) {
568  EmitOpenCLMetadata();
569  // Emit SPIR version.
570  if (getTriple().isSPIR()) {
571  // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
572  // opencl.spir.version named metadata.
573  // C++ is backwards compatible with OpenCL v2.0.
574  auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
575  llvm::Metadata *SPIRVerElts[] = {
576  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
577  Int32Ty, Version / 100)),
578  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
579  Int32Ty, (Version / 100 > 1) ? 0 : 2))};
580  llvm::NamedMDNode *SPIRVerMD =
581  TheModule.getOrInsertNamedMetadata("opencl.spir.version");
582  llvm::LLVMContext &Ctx = TheModule.getContext();
583  SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
584  }
585  }
586 
587  if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
588  assert(PLevel < 3 && "Invalid PIC Level");
589  getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
590  if (Context.getLangOpts().PIE)
591  getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
592  }
593 
594  if (getCodeGenOpts().CodeModel.size() > 0) {
595  unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
596  .Case("tiny", llvm::CodeModel::Tiny)
597  .Case("small", llvm::CodeModel::Small)
598  .Case("kernel", llvm::CodeModel::Kernel)
599  .Case("medium", llvm::CodeModel::Medium)
600  .Case("large", llvm::CodeModel::Large)
601  .Default(~0u);
602  if (CM != ~0u) {
603  llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
604  getModule().setCodeModel(codeModel);
605  }
606  }
607 
608  if (CodeGenOpts.NoPLT)
609  getModule().setRtLibUseGOT();
610 
611  SimplifyPersonality();
612 
613  if (getCodeGenOpts().EmitDeclMetadata)
614  EmitDeclMetadata();
615 
616  if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
617  EmitCoverageFile();
618 
619  if (DebugInfo)
620  DebugInfo->finalize();
621 
622  if (getCodeGenOpts().EmitVersionIdentMetadata)
623  EmitVersionIdentMetadata();
624 
625  if (!getCodeGenOpts().RecordCommandLine.empty())
626  EmitCommandLineMetadata();
627 
628  EmitTargetMetadata();
629 }
630 
631 void CodeGenModule::EmitOpenCLMetadata() {
632  // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
633  // opencl.ocl.version named metadata node.
634  // C++ is backwards compatible with OpenCL v2.0.
635  // FIXME: We might need to add CXX version at some point too?
636  auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
637  llvm::Metadata *OCLVerElts[] = {
638  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
639  Int32Ty, Version / 100)),
640  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
641  Int32Ty, (Version % 100) / 10))};
642  llvm::NamedMDNode *OCLVerMD =
643  TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
644  llvm::LLVMContext &Ctx = TheModule.getContext();
645  OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
646 }
647 
649  // Make sure that this type is translated.
650  Types.UpdateCompletedType(TD);
651 }
652 
654  // Make sure that this type is translated.
655  Types.RefreshTypeCacheForClass(RD);
656 }
657 
659  if (!TBAA)
660  return nullptr;
661  return TBAA->getTypeInfo(QTy);
662 }
663 
665  if (!TBAA)
666  return TBAAAccessInfo();
667  return TBAA->getAccessInfo(AccessType);
668 }
669 
672  if (!TBAA)
673  return TBAAAccessInfo();
674  return TBAA->getVTablePtrAccessInfo(VTablePtrType);
675 }
676 
678  if (!TBAA)
679  return nullptr;
680  return TBAA->getTBAAStructInfo(QTy);
681 }
682 
684  if (!TBAA)
685  return nullptr;
686  return TBAA->getBaseTypeInfo(QTy);
687 }
688 
690  if (!TBAA)
691  return nullptr;
692  return TBAA->getAccessTagInfo(Info);
693 }
694 
697  if (!TBAA)
698  return TBAAAccessInfo();
699  return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
700 }
701 
704  TBAAAccessInfo InfoB) {
705  if (!TBAA)
706  return TBAAAccessInfo();
707  return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
708 }
709 
712  TBAAAccessInfo SrcInfo) {
713  if (!TBAA)
714  return TBAAAccessInfo();
715  return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
716 }
717 
718 void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
719  TBAAAccessInfo TBAAInfo) {
720  if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
721  Inst->setMetadata(llvm::LLVMContext::MD_tbaa, Tag);
722 }
723 
725  llvm::Instruction *I, const CXXRecordDecl *RD) {
726  I->setMetadata(llvm::LLVMContext::MD_invariant_group,
727  llvm::MDNode::get(getLLVMContext(), {}));
728 }
729 
730 void CodeGenModule::Error(SourceLocation loc, StringRef message) {
731  unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
732  getDiags().Report(Context.getFullLoc(loc), diagID) << message;
733 }
734 
735 /// ErrorUnsupported - Print out an error that codegen doesn't support the
736 /// specified stmt yet.
737 void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
739  "cannot compile this %0 yet");
740  std::string Msg = Type;
741  getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
742  << Msg << S->getSourceRange();
743 }
744 
745 /// ErrorUnsupported - Print out an error that codegen doesn't support the
746 /// specified decl yet.
747 void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
749  "cannot compile this %0 yet");
750  std::string Msg = Type;
751  getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
752 }
753 
754 llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
755  return llvm::ConstantInt::get(SizeTy, size.getQuantity());
756 }
757 
758 void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
759  const NamedDecl *D) const {
760  if (GV->hasDLLImportStorageClass())
761  return;
762  // Internal definitions always have default visibility.
763  if (GV->hasLocalLinkage()) {
764  GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
765  return;
766  }
767  if (!D)
768  return;
769  // Set visibility for definitions, and for declarations if requested globally
770  // or set explicitly.
772  if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
773  !GV->isDeclarationForLinker())
774  GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
775 }
776 
777 static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
778  llvm::GlobalValue *GV) {
779  if (GV->hasLocalLinkage())
780  return true;
781 
782  if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
783  return true;
784 
785  // DLLImport explicitly marks the GV as external.
786  if (GV->hasDLLImportStorageClass())
787  return false;
788 
789  const llvm::Triple &TT = CGM.getTriple();
790  if (TT.isWindowsGNUEnvironment()) {
791  // In MinGW, variables without DLLImport can still be automatically
792  // imported from a DLL by the linker; don't mark variables that
793  // potentially could come from another DLL as DSO local.
794  if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
795  !GV->isThreadLocal())
796  return false;
797  }
798 
799  // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
800  // remain unresolved in the link, they can be resolved to zero, which is
801  // outside the current DSO.
802  if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
803  return false;
804 
805  // Every other GV is local on COFF.
806  // Make an exception for windows OS in the triple: Some firmware builds use
807  // *-win32-macho triples. This (accidentally?) produced windows relocations
808  // without GOT tables in older clang versions; Keep this behaviour.
809  // FIXME: even thread local variables?
810  if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
811  return true;
812 
813  // Only handle COFF and ELF for now.
814  if (!TT.isOSBinFormatELF())
815  return false;
816 
817  // If this is not an executable, don't assume anything is local.
818  const auto &CGOpts = CGM.getCodeGenOpts();
819  llvm::Reloc::Model RM = CGOpts.RelocationModel;
820  const auto &LOpts = CGM.getLangOpts();
821  if (RM != llvm::Reloc::Static && !LOpts.PIE && !LOpts.OpenMPIsDevice)
822  return false;
823 
824  // A definition cannot be preempted from an executable.
825  if (!GV->isDeclarationForLinker())
826  return true;
827 
828  // Most PIC code sequences that assume that a symbol is local cannot produce a
829  // 0 if it turns out the symbol is undefined. While this is ABI and relocation
830  // depended, it seems worth it to handle it here.
831  if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
832  return false;
833 
834  // PPC has no copy relocations and cannot use a plt entry as a symbol address.
835  llvm::Triple::ArchType Arch = TT.getArch();
836  if (Arch == llvm::Triple::ppc || Arch == llvm::Triple::ppc64 ||
837  Arch == llvm::Triple::ppc64le)
838  return false;
839 
840  // If we can use copy relocations we can assume it is local.
841  if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
842  if (!Var->isThreadLocal() &&
843  (RM == llvm::Reloc::Static || CGOpts.PIECopyRelocations))
844  return true;
845 
846  // If we can use a plt entry as the symbol address we can assume it
847  // is local.
848  // FIXME: This should work for PIE, but the gold linker doesn't support it.
849  if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
850  return true;
851 
852  // Otherwise don't assue it is local.
853  return false;
854 }
855 
856 void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
857  GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
858 }
859 
860 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
861  GlobalDecl GD) const {
862  const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
863  // C++ destructors have a few C++ ABI specific special cases.
864  if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
866  return;
867  }
868  setDLLImportDLLExport(GV, D);
869 }
870 
871 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
872  const NamedDecl *D) const {
873  if (D && D->isExternallyVisible()) {
874  if (D->hasAttr<DLLImportAttr>())
875  GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
876  else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
877  GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
878  }
879 }
880 
881 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
882  GlobalDecl GD) const {
883  setDLLImportDLLExport(GV, GD);
884  setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
885 }
886 
887 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
888  const NamedDecl *D) const {
889  setDLLImportDLLExport(GV, D);
890  setGVPropertiesAux(GV, D);
891 }
892 
893 void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
894  const NamedDecl *D) const {
895  setGlobalVisibility(GV, D);
896  setDSOLocal(GV);
897  GV->setPartition(CodeGenOpts.SymbolPartition);
898 }
899 
900 static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
901  return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
902  .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
903  .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
904  .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
905  .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
906 }
907 
908 static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
910  switch (M) {
912  return llvm::GlobalVariable::GeneralDynamicTLSModel;
914  return llvm::GlobalVariable::LocalDynamicTLSModel;
916  return llvm::GlobalVariable::InitialExecTLSModel;
918  return llvm::GlobalVariable::LocalExecTLSModel;
919  }
920  llvm_unreachable("Invalid TLS model!");
921 }
922 
923 void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
924  assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
925 
926  llvm::GlobalValue::ThreadLocalMode TLM;
927  TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
928 
929  // Override the TLS model if it is explicitly specified.
930  if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
931  TLM = GetLLVMTLSModel(Attr->getModel());
932  }
933 
934  GV->setThreadLocalMode(TLM);
935 }
936 
937 static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
938  StringRef Name) {
939  const TargetInfo &Target = CGM.getTarget();
940  return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
941 }
942 
944  const CPUSpecificAttr *Attr,
945  unsigned CPUIndex,
946  raw_ostream &Out) {
947  // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
948  // supported.
949  if (Attr)
950  Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
951  else if (CGM.getTarget().supportsIFunc())
952  Out << ".resolver";
953 }
954 
955 static void AppendTargetMangling(const CodeGenModule &CGM,
956  const TargetAttr *Attr, raw_ostream &Out) {
957  if (Attr->isDefaultVersion())
958  return;
959 
960  Out << '.';
961  const TargetInfo &Target = CGM.getTarget();
962  TargetAttr::ParsedTargetAttr Info =
963  Attr->parse([&Target](StringRef LHS, StringRef RHS) {
964  // Multiversioning doesn't allow "no-${feature}", so we can
965  // only have "+" prefixes here.
966  assert(LHS.startswith("+") && RHS.startswith("+") &&
967  "Features should always have a prefix.");
968  return Target.multiVersionSortPriority(LHS.substr(1)) >
969  Target.multiVersionSortPriority(RHS.substr(1));
970  });
971 
972  bool IsFirst = true;
973 
974  if (!Info.Architecture.empty()) {
975  IsFirst = false;
976  Out << "arch_" << Info.Architecture;
977  }
978 
979  for (StringRef Feat : Info.Features) {
980  if (!IsFirst)
981  Out << '_';
982  IsFirst = false;
983  Out << Feat.substr(1);
984  }
985 }
986 
987 static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
988  const NamedDecl *ND,
989  bool OmitMultiVersionMangling = false) {
990  SmallString<256> Buffer;
991  llvm::raw_svector_ostream Out(Buffer);
993  if (MC.shouldMangleDeclName(ND)) {
994  llvm::raw_svector_ostream Out(Buffer);
995  if (const auto *D = dyn_cast<CXXConstructorDecl>(ND))
996  MC.mangleCXXCtor(D, GD.getCtorType(), Out);
997  else if (const auto *D = dyn_cast<CXXDestructorDecl>(ND))
998  MC.mangleCXXDtor(D, GD.getDtorType(), Out);
999  else
1000  MC.mangleName(ND, Out);
1001  } else {
1002  IdentifierInfo *II = ND->getIdentifier();
1003  assert(II && "Attempt to mangle unnamed decl.");
1004  const auto *FD = dyn_cast<FunctionDecl>(ND);
1005 
1006  if (FD &&
1007  FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
1008  llvm::raw_svector_ostream Out(Buffer);
1009  Out << "__regcall3__" << II->getName();
1010  } else {
1011  Out << II->getName();
1012  }
1013  }
1014 
1015  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
1016  if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
1017  switch (FD->getMultiVersionKind()) {
1021  FD->getAttr<CPUSpecificAttr>(),
1022  GD.getMultiVersionIndex(), Out);
1023  break;
1025  AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
1026  break;
1028  llvm_unreachable("None multiversion type isn't valid here");
1029  }
1030  }
1031 
1032  return Out.str();
1033 }
1034 
1035 void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
1036  const FunctionDecl *FD) {
1037  if (!FD->isMultiVersion())
1038  return;
1039 
1040  // Get the name of what this would be without the 'target' attribute. This
1041  // allows us to lookup the version that was emitted when this wasn't a
1042  // multiversion function.
1043  std::string NonTargetName =
1044  getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
1045  GlobalDecl OtherGD;
1046  if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
1047  assert(OtherGD.getCanonicalDecl()
1048  .getDecl()
1049  ->getAsFunction()
1050  ->isMultiVersion() &&
1051  "Other GD should now be a multiversioned function");
1052  // OtherFD is the version of this function that was mangled BEFORE
1053  // becoming a MultiVersion function. It potentially needs to be updated.
1054  const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
1055  .getDecl()
1056  ->getAsFunction()
1057  ->getMostRecentDecl();
1058  std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
1059  // This is so that if the initial version was already the 'default'
1060  // version, we don't try to update it.
1061  if (OtherName != NonTargetName) {
1062  // Remove instead of erase, since others may have stored the StringRef
1063  // to this.
1064  const auto ExistingRecord = Manglings.find(NonTargetName);
1065  if (ExistingRecord != std::end(Manglings))
1066  Manglings.remove(&(*ExistingRecord));
1067  auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
1068  MangledDeclNames[OtherGD.getCanonicalDecl()] = Result.first->first();
1069  if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
1070  Entry->setName(OtherName);
1071  }
1072  }
1073 }
1074 
1076  GlobalDecl CanonicalGD = GD.getCanonicalDecl();
1077 
1078  // Some ABIs don't have constructor variants. Make sure that base and
1079  // complete constructors get mangled the same.
1080  if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
1082  CXXCtorType OrigCtorType = GD.getCtorType();
1083  assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
1084  if (OrigCtorType == Ctor_Base)
1085  CanonicalGD = GlobalDecl(CD, Ctor_Complete);
1086  }
1087  }
1088 
1089  auto FoundName = MangledDeclNames.find(CanonicalGD);
1090  if (FoundName != MangledDeclNames.end())
1091  return FoundName->second;
1092 
1093  // Keep the first result in the case of a mangling collision.
1094  const auto *ND = cast<NamedDecl>(GD.getDecl());
1095  std::string MangledName = getMangledNameImpl(*this, GD, ND);
1096 
1097  // Adjust kernel stub mangling as we may need to be able to differentiate
1098  // them from the kernel itself (e.g., for HIP).
1099  if (auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
1100  if (!getLangOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>())
1101  MangledName = getCUDARuntime().getDeviceStubName(MangledName);
1102 
1103  auto Result = Manglings.insert(std::make_pair(MangledName, GD));
1104  return MangledDeclNames[CanonicalGD] = Result.first->first();
1105 }
1106 
1108  const BlockDecl *BD) {
1109  MangleContext &MangleCtx = getCXXABI().getMangleContext();
1110  const Decl *D = GD.getDecl();
1111 
1112  SmallString<256> Buffer;
1113  llvm::raw_svector_ostream Out(Buffer);
1114  if (!D)
1115  MangleCtx.mangleGlobalBlock(BD,
1116  dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
1117  else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
1118  MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
1119  else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
1120  MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
1121  else
1122  MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
1123 
1124  auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
1125  return Result.first->first();
1126 }
1127 
1128 llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
1129  return getModule().getNamedValue(Name);
1130 }
1131 
1132 /// AddGlobalCtor - Add a function to the list that will be called before
1133 /// main() runs.
1134 void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
1135  llvm::Constant *AssociatedData) {
1136  // FIXME: Type coercion of void()* types.
1137  GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
1138 }
1139 
1140 /// AddGlobalDtor - Add a function to the list that will be called
1141 /// when the module is unloaded.
1142 void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority) {
1143  if (CodeGenOpts.RegisterGlobalDtorsWithAtExit) {
1144  DtorsUsingAtExit[Priority].push_back(Dtor);
1145  return;
1146  }
1147 
1148  // FIXME: Type coercion of void()* types.
1149  GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
1150 }
1151 
1152 void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
1153  if (Fns.empty()) return;
1154 
1155  // Ctor function type is void()*.
1156  llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
1157  llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
1158  TheModule.getDataLayout().getProgramAddressSpace());
1159 
1160  // Get the type of a ctor entry, { i32, void ()*, i8* }.
1161  llvm::StructType *CtorStructTy = llvm::StructType::get(
1162  Int32Ty, CtorPFTy, VoidPtrTy);
1163 
1164  // Construct the constructor and destructor arrays.
1165  ConstantInitBuilder builder(*this);
1166  auto ctors = builder.beginArray(CtorStructTy);
1167  for (const auto &I : Fns) {
1168  auto ctor = ctors.beginStruct(CtorStructTy);
1169  ctor.addInt(Int32Ty, I.Priority);
1170  ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
1171  if (I.AssociatedData)
1172  ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
1173  else
1174  ctor.addNullPointer(VoidPtrTy);
1175  ctor.finishAndAddTo(ctors);
1176  }
1177 
1178  auto list =
1179  ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
1180  /*constant*/ false,
1181  llvm::GlobalValue::AppendingLinkage);
1182 
1183  // The LTO linker doesn't seem to like it when we set an alignment
1184  // on appending variables. Take it off as a workaround.
1185  list->setAlignment(0);
1186 
1187  Fns.clear();
1188 }
1189 
1190 llvm::GlobalValue::LinkageTypes
1192  const auto *D = cast<FunctionDecl>(GD.getDecl());
1193 
1195 
1196  if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
1197  return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
1198 
1199  if (isa<CXXConstructorDecl>(D) &&
1200  cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
1201  Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1202  // Our approach to inheriting constructors is fundamentally different from
1203  // that used by the MS ABI, so keep our inheriting constructor thunks
1204  // internal rather than trying to pick an unambiguous mangling for them.
1206  }
1207 
1208  return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
1209 }
1210 
1211 llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
1212  llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
1213  if (!MDS) return nullptr;
1214 
1215  return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
1216 }
1217 
1219  const CGFunctionInfo &Info,
1220  llvm::Function *F) {
1221  unsigned CallingConv;
1222  llvm::AttributeList PAL;
1223  ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv, false);
1224  F->setAttributes(PAL);
1225  F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1226 }
1227 
1228 static void removeImageAccessQualifier(std::string& TyName) {
1229  std::string ReadOnlyQual("__read_only");
1230  std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
1231  if (ReadOnlyPos != std::string::npos)
1232  // "+ 1" for the space after access qualifier.
1233  TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
1234  else {
1235  std::string WriteOnlyQual("__write_only");
1236  std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
1237  if (WriteOnlyPos != std::string::npos)
1238  TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
1239  else {
1240  std::string ReadWriteQual("__read_write");
1241  std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
1242  if (ReadWritePos != std::string::npos)
1243  TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
1244  }
1245  }
1246 }
1247 
1248 // Returns the address space id that should be produced to the
1249 // kernel_arg_addr_space metadata. This is always fixed to the ids
1250 // as specified in the SPIR 2.0 specification in order to differentiate
1251 // for example in clGetKernelArgInfo() implementation between the address
1252 // spaces with targets without unique mapping to the OpenCL address spaces
1253 // (basically all single AS CPUs).
1254 static unsigned ArgInfoAddressSpace(LangAS AS) {
1255  switch (AS) {
1256  case LangAS::opencl_global: return 1;
1257  case LangAS::opencl_constant: return 2;
1258  case LangAS::opencl_local: return 3;
1259  case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs.
1260  default:
1261  return 0; // Assume private.
1262  }
1263 }
1264 
1265 void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
1266  const FunctionDecl *FD,
1267  CodeGenFunction *CGF) {
1268  assert(((FD && CGF) || (!FD && !CGF)) &&
1269  "Incorrect use - FD and CGF should either be both null or not!");
1270  // Create MDNodes that represent the kernel arg metadata.
1271  // Each MDNode is a list in the form of "key", N number of values which is
1272  // the same number of values as their are kernel arguments.
1273 
1274  const PrintingPolicy &Policy = Context.getPrintingPolicy();
1275 
1276  // MDNode for the kernel argument address space qualifiers.
1277  SmallVector<llvm::Metadata *, 8> addressQuals;
1278 
1279  // MDNode for the kernel argument access qualifiers (images only).
1281 
1282  // MDNode for the kernel argument type names.
1283  SmallVector<llvm::Metadata *, 8> argTypeNames;
1284 
1285  // MDNode for the kernel argument base type names.
1286  SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
1287 
1288  // MDNode for the kernel argument type qualifiers.
1289  SmallVector<llvm::Metadata *, 8> argTypeQuals;
1290 
1291  // MDNode for the kernel argument names.
1293 
1294  if (FD && CGF)
1295  for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
1296  const ParmVarDecl *parm = FD->getParamDecl(i);
1297  QualType ty = parm->getType();
1298  std::string typeQuals;
1299 
1300  if (ty->isPointerType()) {
1301  QualType pointeeTy = ty->getPointeeType();
1302 
1303  // Get address qualifier.
1304  addressQuals.push_back(
1305  llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
1306  ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
1307 
1308  // Get argument type name.
1309  std::string typeName =
1310  pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
1311 
1312  // Turn "unsigned type" to "utype"
1313  std::string::size_type pos = typeName.find("unsigned");
1314  if (pointeeTy.isCanonical() && pos != std::string::npos)
1315  typeName.erase(pos + 1, 8);
1316 
1317  argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1318 
1319  std::string baseTypeName =
1321  Policy) +
1322  "*";
1323 
1324  // Turn "unsigned type" to "utype"
1325  pos = baseTypeName.find("unsigned");
1326  if (pos != std::string::npos)
1327  baseTypeName.erase(pos + 1, 8);
1328 
1329  argBaseTypeNames.push_back(
1330  llvm::MDString::get(VMContext, baseTypeName));
1331 
1332  // Get argument type qualifiers:
1333  if (ty.isRestrictQualified())
1334  typeQuals = "restrict";
1335  if (pointeeTy.isConstQualified() ||
1336  (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
1337  typeQuals += typeQuals.empty() ? "const" : " const";
1338  if (pointeeTy.isVolatileQualified())
1339  typeQuals += typeQuals.empty() ? "volatile" : " volatile";
1340  } else {
1341  uint32_t AddrSpc = 0;
1342  bool isPipe = ty->isPipeType();
1343  if (ty->isImageType() || isPipe)
1345 
1346  addressQuals.push_back(
1347  llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
1348 
1349  // Get argument type name.
1350  std::string typeName;
1351  if (isPipe)
1352  typeName = ty.getCanonicalType()
1353  ->getAs<PipeType>()
1354  ->getElementType()
1355  .getAsString(Policy);
1356  else
1357  typeName = ty.getUnqualifiedType().getAsString(Policy);
1358 
1359  // Turn "unsigned type" to "utype"
1360  std::string::size_type pos = typeName.find("unsigned");
1361  if (ty.isCanonical() && pos != std::string::npos)
1362  typeName.erase(pos + 1, 8);
1363 
1364  std::string baseTypeName;
1365  if (isPipe)
1366  baseTypeName = ty.getCanonicalType()
1367  ->getAs<PipeType>()
1368  ->getElementType()
1369  .getCanonicalType()
1370  .getAsString(Policy);
1371  else
1372  baseTypeName =
1374 
1375  // Remove access qualifiers on images
1376  // (as they are inseparable from type in clang implementation,
1377  // but OpenCL spec provides a special query to get access qualifier
1378  // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
1379  if (ty->isImageType()) {
1380  removeImageAccessQualifier(typeName);
1381  removeImageAccessQualifier(baseTypeName);
1382  }
1383 
1384  argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1385 
1386  // Turn "unsigned type" to "utype"
1387  pos = baseTypeName.find("unsigned");
1388  if (pos != std::string::npos)
1389  baseTypeName.erase(pos + 1, 8);
1390 
1391  argBaseTypeNames.push_back(
1392  llvm::MDString::get(VMContext, baseTypeName));
1393 
1394  if (isPipe)
1395  typeQuals = "pipe";
1396  }
1397 
1398  argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
1399 
1400  // Get image and pipe access qualifier:
1401  if (ty->isImageType() || ty->isPipeType()) {
1402  const Decl *PDecl = parm;
1403  if (auto *TD = dyn_cast<TypedefType>(ty))
1404  PDecl = TD->getDecl();
1405  const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
1406  if (A && A->isWriteOnly())
1407  accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
1408  else if (A && A->isReadWrite())
1409  accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
1410  else
1411  accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
1412  } else
1413  accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
1414 
1415  // Get argument name.
1416  argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
1417  }
1418 
1419  Fn->setMetadata("kernel_arg_addr_space",
1420  llvm::MDNode::get(VMContext, addressQuals));
1421  Fn->setMetadata("kernel_arg_access_qual",
1422  llvm::MDNode::get(VMContext, accessQuals));
1423  Fn->setMetadata("kernel_arg_type",
1424  llvm::MDNode::get(VMContext, argTypeNames));
1425  Fn->setMetadata("kernel_arg_base_type",
1426  llvm::MDNode::get(VMContext, argBaseTypeNames));
1427  Fn->setMetadata("kernel_arg_type_qual",
1428  llvm::MDNode::get(VMContext, argTypeQuals));
1429  if (getCodeGenOpts().EmitOpenCLArgMetadata)
1430  Fn->setMetadata("kernel_arg_name",
1431  llvm::MDNode::get(VMContext, argNames));
1432 }
1433 
1434 /// Determines whether the language options require us to model
1435 /// unwind exceptions. We treat -fexceptions as mandating this
1436 /// except under the fragile ObjC ABI with only ObjC exceptions
1437 /// enabled. This means, for example, that C with -fexceptions
1438 /// enables this.
1439 static bool hasUnwindExceptions(const LangOptions &LangOpts) {
1440  // If exceptions are completely disabled, obviously this is false.
1441  if (!LangOpts.Exceptions) return false;
1442 
1443  // If C++ exceptions are enabled, this is true.
1444  if (LangOpts.CXXExceptions) return true;
1445 
1446  // If ObjC exceptions are enabled, this depends on the ABI.
1447  if (LangOpts.ObjCExceptions) {
1448  return LangOpts.ObjCRuntime.hasUnwindExceptions();
1449  }
1450 
1451  return true;
1452 }
1453 
1455  const CXXMethodDecl *MD) {
1456  // Check that the type metadata can ever actually be used by a call.
1457  if (!CGM.getCodeGenOpts().LTOUnit ||
1458  !CGM.HasHiddenLTOVisibility(MD->getParent()))
1459  return false;
1460 
1461  // Only functions whose address can be taken with a member function pointer
1462  // need this sort of type metadata.
1463  return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
1464  !isa<CXXDestructorDecl>(MD);
1465 }
1466 
1467 std::vector<const CXXRecordDecl *>
1469  llvm::SetVector<const CXXRecordDecl *> MostBases;
1470 
1471  std::function<void (const CXXRecordDecl *)> CollectMostBases;
1472  CollectMostBases = [&](const CXXRecordDecl *RD) {
1473  if (RD->getNumBases() == 0)
1474  MostBases.insert(RD);
1475  for (const CXXBaseSpecifier &B : RD->bases())
1476  CollectMostBases(B.getType()->getAsCXXRecordDecl());
1477  };
1478  CollectMostBases(RD);
1479  return MostBases.takeVector();
1480 }
1481 
1483  llvm::Function *F) {
1484  llvm::AttrBuilder B;
1485 
1486  if (CodeGenOpts.UnwindTables)
1487  B.addAttribute(llvm::Attribute::UWTable);
1488 
1489  if (!hasUnwindExceptions(LangOpts))
1490  B.addAttribute(llvm::Attribute::NoUnwind);
1491 
1492  if (!D || !D->hasAttr<NoStackProtectorAttr>()) {
1493  if (LangOpts.getStackProtector() == LangOptions::SSPOn)
1494  B.addAttribute(llvm::Attribute::StackProtect);
1495  else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
1496  B.addAttribute(llvm::Attribute::StackProtectStrong);
1497  else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
1498  B.addAttribute(llvm::Attribute::StackProtectReq);
1499  }
1500 
1501  if (!D) {
1502  // If we don't have a declaration to control inlining, the function isn't
1503  // explicitly marked as alwaysinline for semantic reasons, and inlining is
1504  // disabled, mark the function as noinline.
1505  if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
1506  CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
1507  B.addAttribute(llvm::Attribute::NoInline);
1508 
1509  F->addAttributes(llvm::AttributeList::FunctionIndex, B);
1510  return;
1511  }
1512 
1513  // Track whether we need to add the optnone LLVM attribute,
1514  // starting with the default for this optimization level.
1515  bool ShouldAddOptNone =
1516  !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
1517  // We can't add optnone in the following cases, it won't pass the verifier.
1518  ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
1519  ShouldAddOptNone &= !F->hasFnAttribute(llvm::Attribute::AlwaysInline);
1520  ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
1521 
1522  if (ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) {
1523  B.addAttribute(llvm::Attribute::OptimizeNone);
1524 
1525  // OptimizeNone implies noinline; we should not be inlining such functions.
1526  B.addAttribute(llvm::Attribute::NoInline);
1527  assert(!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
1528  "OptimizeNone and AlwaysInline on same function!");
1529 
1530  // We still need to handle naked functions even though optnone subsumes
1531  // much of their semantics.
1532  if (D->hasAttr<NakedAttr>())
1533  B.addAttribute(llvm::Attribute::Naked);
1534 
1535  // OptimizeNone wins over OptimizeForSize and MinSize.
1536  F->removeFnAttr(llvm::Attribute::OptimizeForSize);
1537  F->removeFnAttr(llvm::Attribute::MinSize);
1538  } else if (D->hasAttr<NakedAttr>()) {
1539  // Naked implies noinline: we should not be inlining such functions.
1540  B.addAttribute(llvm::Attribute::Naked);
1541  B.addAttribute(llvm::Attribute::NoInline);
1542  } else if (D->hasAttr<NoDuplicateAttr>()) {
1543  B.addAttribute(llvm::Attribute::NoDuplicate);
1544  } else if (D->hasAttr<NoInlineAttr>()) {
1545  B.addAttribute(llvm::Attribute::NoInline);
1546  } else if (D->hasAttr<AlwaysInlineAttr>() &&
1547  !F->hasFnAttribute(llvm::Attribute::NoInline)) {
1548  // (noinline wins over always_inline, and we can't specify both in IR)
1549  B.addAttribute(llvm::Attribute::AlwaysInline);
1550  } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
1551  // If we're not inlining, then force everything that isn't always_inline to
1552  // carry an explicit noinline attribute.
1553  if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
1554  B.addAttribute(llvm::Attribute::NoInline);
1555  } else {
1556  // Otherwise, propagate the inline hint attribute and potentially use its
1557  // absence to mark things as noinline.
1558  if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1559  // Search function and template pattern redeclarations for inline.
1560  auto CheckForInline = [](const FunctionDecl *FD) {
1561  auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
1562  return Redecl->isInlineSpecified();
1563  };
1564  if (any_of(FD->redecls(), CheckRedeclForInline))
1565  return true;
1566  const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
1567  if (!Pattern)
1568  return false;
1569  return any_of(Pattern->redecls(), CheckRedeclForInline);
1570  };
1571  if (CheckForInline(FD)) {
1572  B.addAttribute(llvm::Attribute::InlineHint);
1573  } else if (CodeGenOpts.getInlining() ==
1575  !FD->isInlined() &&
1576  !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1577  B.addAttribute(llvm::Attribute::NoInline);
1578  }
1579  }
1580  }
1581 
1582  // Add other optimization related attributes if we are optimizing this
1583  // function.
1584  if (!D->hasAttr<OptimizeNoneAttr>()) {
1585  if (D->hasAttr<ColdAttr>()) {
1586  if (!ShouldAddOptNone)
1587  B.addAttribute(llvm::Attribute::OptimizeForSize);
1588  B.addAttribute(llvm::Attribute::Cold);
1589  }
1590 
1591  if (D->hasAttr<MinSizeAttr>())
1592  B.addAttribute(llvm::Attribute::MinSize);
1593  }
1594 
1595  F->addAttributes(llvm::AttributeList::FunctionIndex, B);
1596 
1597  unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
1598  if (alignment)
1599  F->setAlignment(alignment);
1600 
1601  if (!D->hasAttr<AlignedAttr>())
1602  if (LangOpts.FunctionAlignment)
1603  F->setAlignment(1 << LangOpts.FunctionAlignment);
1604 
1605  // Some C++ ABIs require 2-byte alignment for member functions, in order to
1606  // reserve a bit for differentiating between virtual and non-virtual member
1607  // functions. If the current target's C++ ABI requires this and this is a
1608  // member function, set its alignment accordingly.
1609  if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
1610  if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
1611  F->setAlignment(2);
1612  }
1613 
1614  // In the cross-dso CFI mode with canonical jump tables, we want !type
1615  // attributes on definitions only.
1616  if (CodeGenOpts.SanitizeCfiCrossDso &&
1617  CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
1618  if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1619  // Skip available_externally functions. They won't be codegen'ed in the
1620  // current module anyway.
1621  if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
1623  }
1624  }
1625 
1626  // Emit type metadata on member functions for member function pointer checks.
1627  // These are only ever necessary on definitions; we're guaranteed that the
1628  // definition will be present in the LTO unit as a result of LTO visibility.
1629  auto *MD = dyn_cast<CXXMethodDecl>(D);
1630  if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
1631  for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
1632  llvm::Metadata *Id =
1634  MD->getType(), Context.getRecordType(Base).getTypePtr()));
1635  F->addTypeMetadata(0, Id);
1636  }
1637  }
1638 }
1639 
1640 void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
1641  const Decl *D = GD.getDecl();
1642  if (dyn_cast_or_null<NamedDecl>(D))
1643  setGVProperties(GV, GD);
1644  else
1645  GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1646 
1647  if (D && D->hasAttr<UsedAttr>())
1648  addUsedGlobal(GV);
1649 
1650  if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
1651  const auto *VD = cast<VarDecl>(D);
1652  if (VD->getType().isConstQualified() &&
1653  VD->getStorageDuration() == SD_Static)
1654  addUsedGlobal(GV);
1655  }
1656 }
1657 
1658 bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
1659  llvm::AttrBuilder &Attrs) {
1660  // Add target-cpu and target-features attributes to functions. If
1661  // we have a decl for the function and it has a target attribute then
1662  // parse that and add it to the feature set.
1663  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1664  std::vector<std::string> Features;
1665  const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
1666  FD = FD ? FD->getMostRecentDecl() : FD;
1667  const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
1668  const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
1669  bool AddedAttr = false;
1670  if (TD || SD) {
1671  llvm::StringMap<bool> FeatureMap;
1672  getFunctionFeatureMap(FeatureMap, GD);
1673 
1674  // Produce the canonical string for this set of features.
1675  for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
1676  Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
1677 
1678  // Now add the target-cpu and target-features to the function.
1679  // While we populated the feature map above, we still need to
1680  // get and parse the target attribute so we can get the cpu for
1681  // the function.
1682  if (TD) {
1683  TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1684  if (ParsedAttr.Architecture != "" &&
1685  getTarget().isValidCPUName(ParsedAttr.Architecture))
1686  TargetCPU = ParsedAttr.Architecture;
1687  }
1688  } else {
1689  // Otherwise just add the existing target cpu and target features to the
1690  // function.
1691  Features = getTarget().getTargetOpts().Features;
1692  }
1693 
1694  if (TargetCPU != "") {
1695  Attrs.addAttribute("target-cpu", TargetCPU);
1696  AddedAttr = true;
1697  }
1698  if (!Features.empty()) {
1699  llvm::sort(Features);
1700  Attrs.addAttribute("target-features", llvm::join(Features, ","));
1701  AddedAttr = true;
1702  }
1703 
1704  return AddedAttr;
1705 }
1706 
1707 void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
1708  llvm::GlobalObject *GO) {
1709  const Decl *D = GD.getDecl();
1710  SetCommonAttributes(GD, GO);
1711 
1712  if (D) {
1713  if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
1714  if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
1715  GV->addAttribute("bss-section", SA->getName());
1716  if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
1717  GV->addAttribute("data-section", SA->getName());
1718  if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
1719  GV->addAttribute("rodata-section", SA->getName());
1720  }
1721 
1722  if (auto *F = dyn_cast<llvm::Function>(GO)) {
1723  if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
1724  if (!D->getAttr<SectionAttr>())
1725  F->addFnAttr("implicit-section-name", SA->getName());
1726 
1727  llvm::AttrBuilder Attrs;
1728  if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
1729  // We know that GetCPUAndFeaturesAttributes will always have the
1730  // newest set, since it has the newest possible FunctionDecl, so the
1731  // new ones should replace the old.
1732  F->removeFnAttr("target-cpu");
1733  F->removeFnAttr("target-features");
1734  F->addAttributes(llvm::AttributeList::FunctionIndex, Attrs);
1735  }
1736  }
1737 
1738  if (const auto *CSA = D->getAttr<CodeSegAttr>())
1739  GO->setSection(CSA->getName());
1740  else if (const auto *SA = D->getAttr<SectionAttr>())
1741  GO->setSection(SA->getName());
1742  }
1743 
1744  getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
1745 }
1746 
1748  llvm::Function *F,
1749  const CGFunctionInfo &FI) {
1750  const Decl *D = GD.getDecl();
1751  SetLLVMFunctionAttributes(GD, FI, F);
1753 
1754  F->setLinkage(llvm::Function::InternalLinkage);
1755 
1756  setNonAliasAttributes(GD, F);
1757 }
1758 
1759 static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
1760  // Set linkage and visibility in case we never see a definition.
1762  // Don't set internal linkage on declarations.
1763  // "extern_weak" is overloaded in LLVM; we probably should have
1764  // separate linkage types for this.
1765  if (isExternallyVisible(LV.getLinkage()) &&
1766  (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
1767  GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
1768 }
1769 
1771  llvm::Function *F) {
1772  // Only if we are checking indirect calls.
1773  if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
1774  return;
1775 
1776  // Non-static class methods are handled via vtable or member function pointer
1777  // checks elsewhere.
1778  if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
1779  return;
1780 
1781  llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
1782  F->addTypeMetadata(0, MD);
1783  F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
1784 
1785  // Emit a hash-based bit set entry for cross-DSO calls.
1786  if (CodeGenOpts.SanitizeCfiCrossDso)
1787  if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
1788  F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
1789 }
1790 
1791 void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
1792  bool IsIncompleteFunction,
1793  bool IsThunk) {
1794 
1795  if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
1796  // If this is an intrinsic function, set the function's attributes
1797  // to the intrinsic's attributes.
1798  F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
1799  return;
1800  }
1801 
1802  const auto *FD = cast<FunctionDecl>(GD.getDecl());
1803 
1804  if (!IsIncompleteFunction)
1805  SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
1806 
1807  // Add the Returned attribute for "this", except for iOS 5 and earlier
1808  // where substantial code, including the libstdc++ dylib, was compiled with
1809  // GCC and does not actually return "this".
1810  if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
1811  !(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
1812  assert(!F->arg_empty() &&
1813  F->arg_begin()->getType()
1814  ->canLosslesslyBitCastTo(F->getReturnType()) &&
1815  "unexpected this return");
1816  F->addAttribute(1, llvm::Attribute::Returned);
1817  }
1818 
1819  // Only a few attributes are set on declarations; these may later be
1820  // overridden by a definition.
1821 
1822  setLinkageForGV(F, FD);
1823  setGVProperties(F, FD);
1824 
1825  // Setup target-specific attributes.
1826  if (!IsIncompleteFunction && F->isDeclaration())
1827  getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
1828 
1829  if (const auto *CSA = FD->getAttr<CodeSegAttr>())
1830  F->setSection(CSA->getName());
1831  else if (const auto *SA = FD->getAttr<SectionAttr>())
1832  F->setSection(SA->getName());
1833 
1835  // A replaceable global allocation function does not act like a builtin by
1836  // default, only if it is invoked by a new-expression or delete-expression.
1837  F->addAttribute(llvm::AttributeList::FunctionIndex,
1838  llvm::Attribute::NoBuiltin);
1839 
1840  // A sane operator new returns a non-aliasing pointer.
1841  // FIXME: Also add NonNull attribute to the return value
1842  // for the non-nothrow forms?
1843  auto Kind = FD->getDeclName().getCXXOverloadedOperator();
1844  if (getCodeGenOpts().AssumeSaneOperatorNew &&
1845  (Kind == OO_New || Kind == OO_Array_New))
1846  F->addAttribute(llvm::AttributeList::ReturnIndex,
1847  llvm::Attribute::NoAlias);
1848  }
1849 
1850  if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
1851  F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1852  else if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
1853  if (MD->isVirtual())
1854  F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1855 
1856  // Don't emit entries for function declarations in the cross-DSO mode. This
1857  // is handled with better precision by the receiving DSO. But if jump tables
1858  // are non-canonical then we need type metadata in order to produce the local
1859  // jump table.
1860  if (!CodeGenOpts.SanitizeCfiCrossDso ||
1861  !CodeGenOpts.SanitizeCfiCanonicalJumpTables)
1863 
1864  if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
1865  getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
1866 
1867  if (const auto *CB = FD->getAttr<CallbackAttr>()) {
1868  // Annotate the callback behavior as metadata:
1869  // - The callback callee (as argument number).
1870  // - The callback payloads (as argument numbers).
1871  llvm::LLVMContext &Ctx = F->getContext();
1872  llvm::MDBuilder MDB(Ctx);
1873 
1874  // The payload indices are all but the first one in the encoding. The first
1875  // identifies the callback callee.
1876  int CalleeIdx = *CB->encoding_begin();
1877  ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
1878  F->addMetadata(llvm::LLVMContext::MD_callback,
1879  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
1880  CalleeIdx, PayloadIndices,
1881  /* VarArgsArePassed */ false)}));
1882  }
1883 }
1884 
1885 void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
1886  assert(!GV->isDeclaration() &&
1887  "Only globals with definition can force usage.");
1888  LLVMUsed.emplace_back(GV);
1889 }
1890 
1891 void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
1892  assert(!GV->isDeclaration() &&
1893  "Only globals with definition can force usage.");
1894  LLVMCompilerUsed.emplace_back(GV);
1895 }
1896 
1897 static void emitUsed(CodeGenModule &CGM, StringRef Name,
1898  std::vector<llvm::WeakTrackingVH> &List) {
1899  // Don't create llvm.used if there is no need.
1900  if (List.empty())
1901  return;
1902 
1903  // Convert List to what ConstantArray needs.
1905  UsedArray.resize(List.size());
1906  for (unsigned i = 0, e = List.size(); i != e; ++i) {
1907  UsedArray[i] =
1908  llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
1909  cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
1910  }
1911 
1912  if (UsedArray.empty())
1913  return;
1914  llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
1915 
1916  auto *GV = new llvm::GlobalVariable(
1917  CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
1918  llvm::ConstantArray::get(ATy, UsedArray), Name);
1919 
1920  GV->setSection("llvm.metadata");
1921 }
1922 
1923 void CodeGenModule::emitLLVMUsed() {
1924  emitUsed(*this, "llvm.used", LLVMUsed);
1925  emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
1926 }
1927 
1929  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
1930  LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
1931 }
1932 
1933 void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
1935  getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
1936  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
1937  LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
1938 }
1939 
1940 void CodeGenModule::AddDependentLib(StringRef Lib) {
1941  auto &C = getLLVMContext();
1942  if (getTarget().getTriple().isOSBinFormatELF()) {
1943  ELFDependentLibraries.push_back(
1944  llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
1945  return;
1946  }
1947 
1950  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
1951  LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
1952 }
1953 
1954 /// Add link options implied by the given module, including modules
1955 /// it depends on, using a postorder walk.
1958  llvm::SmallPtrSet<Module *, 16> &Visited) {
1959  // Import this module's parent.
1960  if (Mod->Parent && Visited.insert(Mod->Parent).second) {
1961  addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
1962  }
1963 
1964  // Import this module's dependencies.
1965  for (unsigned I = Mod->Imports.size(); I > 0; --I) {
1966  if (Visited.insert(Mod->Imports[I - 1]).second)
1967  addLinkOptionsPostorder(CGM, Mod->Imports[I-1], Metadata, Visited);
1968  }
1969 
1970  // Add linker options to link against the libraries/frameworks
1971  // described by this module.
1972  llvm::LLVMContext &Context = CGM.getLLVMContext();
1973  bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
1974 
1975  // For modules that use export_as for linking, use that module
1976  // name instead.
1977  if (Mod->UseExportAsModuleLinkName)
1978  return;
1979 
1980  for (unsigned I = Mod->LinkLibraries.size(); I > 0; --I) {
1981  // Link against a framework. Frameworks are currently Darwin only, so we
1982  // don't to ask TargetCodeGenInfo for the spelling of the linker option.
1983  if (Mod->LinkLibraries[I-1].IsFramework) {
1984  llvm::Metadata *Args[2] = {
1985  llvm::MDString::get(Context, "-framework"),
1986  llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library)};
1987 
1988  Metadata.push_back(llvm::MDNode::get(Context, Args));
1989  continue;
1990  }
1991 
1992  // Link against a library.
1993  if (IsELF) {
1994  llvm::Metadata *Args[2] = {
1995  llvm::MDString::get(Context, "lib"),
1996  llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library),
1997  };
1998  Metadata.push_back(llvm::MDNode::get(Context, Args));
1999  } else {
2002  Mod->LinkLibraries[I - 1].Library, Opt);
2003  auto *OptString = llvm::MDString::get(Context, Opt);
2004  Metadata.push_back(llvm::MDNode::get(Context, OptString));
2005  }
2006  }
2007 }
2008 
2009 void CodeGenModule::EmitModuleLinkOptions() {
2010  // Collect the set of all of the modules we want to visit to emit link
2011  // options, which is essentially the imported modules and all of their
2012  // non-explicit child modules.
2013  llvm::SetVector<clang::Module *> LinkModules;
2014  llvm::SmallPtrSet<clang::Module *, 16> Visited;
2016 
2017  // Seed the stack with imported modules.
2018  for (Module *M : ImportedModules) {
2019  // Do not add any link flags when an implementation TU of a module imports
2020  // a header of that same module.
2021  if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
2023  continue;
2024  if (Visited.insert(M).second)
2025  Stack.push_back(M);
2026  }
2027 
2028  // Find all of the modules to import, making a little effort to prune
2029  // non-leaf modules.
2030  while (!Stack.empty()) {
2031  clang::Module *Mod = Stack.pop_back_val();
2032 
2033  bool AnyChildren = false;
2034 
2035  // Visit the submodules of this module.
2036  for (const auto &SM : Mod->submodules()) {
2037  // Skip explicit children; they need to be explicitly imported to be
2038  // linked against.
2039  if (SM->IsExplicit)
2040  continue;
2041 
2042  if (Visited.insert(SM).second) {
2043  Stack.push_back(SM);
2044  AnyChildren = true;
2045  }
2046  }
2047 
2048  // We didn't find any children, so add this module to the list of
2049  // modules to link against.
2050  if (!AnyChildren) {
2051  LinkModules.insert(Mod);
2052  }
2053  }
2054 
2055  // Add link options for all of the imported modules in reverse topological
2056  // order. We don't do anything to try to order import link flags with respect
2057  // to linker options inserted by things like #pragma comment().
2058  SmallVector<llvm::MDNode *, 16> MetadataArgs;
2059  Visited.clear();
2060  for (Module *M : LinkModules)
2061  if (Visited.insert(M).second)
2062  addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
2063  std::reverse(MetadataArgs.begin(), MetadataArgs.end());
2064  LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
2065 
2066  // Add the linker options metadata flag.
2067  auto *NMD = getModule().getOrInsertNamedMetadata("llvm.linker.options");
2068  for (auto *MD : LinkerOptionsMetadata)
2069  NMD->addOperand(MD);
2070 }
2071 
2072 void CodeGenModule::EmitDeferred() {
2073  // Emit deferred declare target declarations.
2074  if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
2075  getOpenMPRuntime().emitDeferredTargetDecls();
2076 
2077  // Emit code for any potentially referenced deferred decls. Since a
2078  // previously unused static decl may become used during the generation of code
2079  // for a static function, iterate until no changes are made.
2080 
2081  if (!DeferredVTables.empty()) {
2082  EmitDeferredVTables();
2083 
2084  // Emitting a vtable doesn't directly cause more vtables to
2085  // become deferred, although it can cause functions to be
2086  // emitted that then need those vtables.
2087  assert(DeferredVTables.empty());
2088  }
2089 
2090  // Stop if we're out of both deferred vtables and deferred declarations.
2091  if (DeferredDeclsToEmit.empty())
2092  return;
2093 
2094  // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
2095  // work, it will not interfere with this.
2096  std::vector<GlobalDecl> CurDeclsToEmit;
2097  CurDeclsToEmit.swap(DeferredDeclsToEmit);
2098 
2099  for (GlobalDecl &D : CurDeclsToEmit) {
2100  // We should call GetAddrOfGlobal with IsForDefinition set to true in order
2101  // to get GlobalValue with exactly the type we need, not something that
2102  // might had been created for another decl with the same mangled name but
2103  // different type.
2104  llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
2106 
2107  // In case of different address spaces, we may still get a cast, even with
2108  // IsForDefinition equal to true. Query mangled names table to get
2109  // GlobalValue.
2110  if (!GV)
2111  GV = GetGlobalValue(getMangledName(D));
2112 
2113  // Make sure GetGlobalValue returned non-null.
2114  assert(GV);
2115 
2116  // Check to see if we've already emitted this. This is necessary
2117  // for a couple of reasons: first, decls can end up in the
2118  // deferred-decls queue multiple times, and second, decls can end
2119  // up with definitions in unusual ways (e.g. by an extern inline
2120  // function acquiring a strong function redefinition). Just
2121  // ignore these cases.
2122  if (!GV->isDeclaration())
2123  continue;
2124 
2125  // Otherwise, emit the definition and move on to the next one.
2126  EmitGlobalDefinition(D, GV);
2127 
2128  // If we found out that we need to emit more decls, do that recursively.
2129  // This has the advantage that the decls are emitted in a DFS and related
2130  // ones are close together, which is convenient for testing.
2131  if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
2132  EmitDeferred();
2133  assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
2134  }
2135  }
2136 }
2137 
2138 void CodeGenModule::EmitVTablesOpportunistically() {
2139  // Try to emit external vtables as available_externally if they have emitted
2140  // all inlined virtual functions. It runs after EmitDeferred() and therefore
2141  // is not allowed to create new references to things that need to be emitted
2142  // lazily. Note that it also uses fact that we eagerly emitting RTTI.
2143 
2144  assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
2145  && "Only emit opportunistic vtables with optimizations");
2146 
2147  for (const CXXRecordDecl *RD : OpportunisticVTables) {
2148  assert(getVTables().isVTableExternal(RD) &&
2149  "This queue should only contain external vtables");
2150  if (getCXXABI().canSpeculativelyEmitVTable(RD))
2151  VTables.GenerateClassData(RD);
2152  }
2153  OpportunisticVTables.clear();
2154 }
2155 
2157  if (Annotations.empty())
2158  return;
2159 
2160  // Create a new global variable for the ConstantStruct in the Module.
2161  llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
2162  Annotations[0]->getType(), Annotations.size()), Annotations);
2163  auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
2164  llvm::GlobalValue::AppendingLinkage,
2165  Array, "llvm.global.annotations");
2166  gv->setSection(AnnotationSection);
2167 }
2168 
2169 llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
2170  llvm::Constant *&AStr = AnnotationStrings[Str];
2171  if (AStr)
2172  return AStr;
2173 
2174  // Not found yet, create a new global.
2175  llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
2176  auto *gv =
2177  new llvm::GlobalVariable(getModule(), s->getType(), true,
2178  llvm::GlobalValue::PrivateLinkage, s, ".str");
2179  gv->setSection(AnnotationSection);
2180  gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2181  AStr = gv;
2182  return gv;
2183 }
2184 
2187  PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2188  if (PLoc.isValid())
2189  return EmitAnnotationString(PLoc.getFilename());
2190  return EmitAnnotationString(SM.getBufferName(Loc));
2191 }
2192 
2195  PresumedLoc PLoc = SM.getPresumedLoc(L);
2196  unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
2197  SM.getExpansionLineNumber(L);
2198  return llvm::ConstantInt::get(Int32Ty, LineNo);
2199 }
2200 
2201 llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
2202  const AnnotateAttr *AA,
2203  SourceLocation L) {
2204  // Get the globals for file name, annotation, and the line number.
2205  llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
2206  *UnitGV = EmitAnnotationUnit(L),
2207  *LineNoCst = EmitAnnotationLineNo(L);
2208 
2209  // Create the ConstantStruct for the global annotation.
2210  llvm::Constant *Fields[4] = {
2211  llvm::ConstantExpr::getBitCast(GV, Int8PtrTy),
2212  llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
2213  llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
2214  LineNoCst
2215  };
2216  return llvm::ConstantStruct::getAnon(Fields);
2217 }
2218 
2220  llvm::GlobalValue *GV) {
2221  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2222  // Get the struct elements for these annotations.
2223  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2224  Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
2225 }
2226 
2228  llvm::Function *Fn,
2229  SourceLocation Loc) const {
2230  const auto &SanitizerBL = getContext().getSanitizerBlacklist();
2231  // Blacklist by function name.
2232  if (SanitizerBL.isBlacklistedFunction(Kind, Fn->getName()))
2233  return true;
2234  // Blacklist by location.
2235  if (Loc.isValid())
2236  return SanitizerBL.isBlacklistedLocation(Kind, Loc);
2237  // If location is unknown, this may be a compiler-generated function. Assume
2238  // it's located in the main file.
2239  auto &SM = Context.getSourceManager();
2240  if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
2241  return SanitizerBL.isBlacklistedFile(Kind, MainFile->getName());
2242  }
2243  return false;
2244 }
2245 
2246 bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
2247  SourceLocation Loc, QualType Ty,
2248  StringRef Category) const {
2249  // For now globals can be blacklisted only in ASan and KASan.
2250  const SanitizerMask EnabledAsanMask =
2251  LangOpts.Sanitize.Mask &
2252  (SanitizerKind::Address | SanitizerKind::KernelAddress |
2253  SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
2254  SanitizerKind::MemTag);
2255  if (!EnabledAsanMask)
2256  return false;
2257  const auto &SanitizerBL = getContext().getSanitizerBlacklist();
2258  if (SanitizerBL.isBlacklistedGlobal(EnabledAsanMask, GV->getName(), Category))
2259  return true;
2260  if (SanitizerBL.isBlacklistedLocation(EnabledAsanMask, Loc, Category))
2261  return true;
2262  // Check global type.
2263  if (!Ty.isNull()) {
2264  // Drill down the array types: if global variable of a fixed type is
2265  // blacklisted, we also don't instrument arrays of them.
2266  while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
2267  Ty = AT->getElementType();
2269  // We allow to blacklist only record types (classes, structs etc.)
2270  if (Ty->isRecordType()) {
2271  std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
2272  if (SanitizerBL.isBlacklistedType(EnabledAsanMask, TypeStr, Category))
2273  return true;
2274  }
2275  }
2276  return false;
2277 }
2278 
2279 bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
2280  StringRef Category) const {
2281  const auto &XRayFilter = getContext().getXRayFilter();
2282  using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
2283  auto Attr = ImbueAttr::NONE;
2284  if (Loc.isValid())
2285  Attr = XRayFilter.shouldImbueLocation(Loc, Category);
2286  if (Attr == ImbueAttr::NONE)
2287  Attr = XRayFilter.shouldImbueFunction(Fn->getName());
2288  switch (Attr) {
2289  case ImbueAttr::NONE:
2290  return false;
2291  case ImbueAttr::ALWAYS:
2292  Fn->addFnAttr("function-instrument", "xray-always");
2293  break;
2294  case ImbueAttr::ALWAYS_ARG1:
2295  Fn->addFnAttr("function-instrument", "xray-always");
2296  Fn->addFnAttr("xray-log-args", "1");
2297  break;
2298  case ImbueAttr::NEVER:
2299  Fn->addFnAttr("function-instrument", "xray-never");
2300  break;
2301  }
2302  return true;
2303 }
2304 
2305 bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
2306  // Never defer when EmitAllDecls is specified.
2307  if (LangOpts.EmitAllDecls)
2308  return true;
2309 
2310  if (CodeGenOpts.KeepStaticConsts) {
2311  const auto *VD = dyn_cast<VarDecl>(Global);
2312  if (VD && VD->getType().isConstQualified() &&
2313  VD->getStorageDuration() == SD_Static)
2314  return true;
2315  }
2316 
2317  return getContext().DeclMustBeEmitted(Global);
2318 }
2319 
2320 bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
2321  if (const auto *FD = dyn_cast<FunctionDecl>(Global))
2323  // Implicit template instantiations may change linkage if they are later
2324  // explicitly instantiated, so they should not be emitted eagerly.
2325  return false;
2326  if (const auto *VD = dyn_cast<VarDecl>(Global))
2327  if (Context.getInlineVariableDefinitionKind(VD) ==
2329  // A definition of an inline constexpr static data member may change
2330  // linkage later if it's redeclared outside the class.
2331  return false;
2332  // If OpenMP is enabled and threadprivates must be generated like TLS, delay
2333  // codegen for global variables, because they may be marked as threadprivate.
2334  if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
2335  getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
2336  !isTypeConstant(Global->getType(), false) &&
2337  !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
2338  return false;
2339 
2340  return true;
2341 }
2342 
2344  const CXXUuidofExpr* E) {
2345  // Sema has verified that IIDSource has a __declspec(uuid()), and that its
2346  // well-formed.
2347  StringRef Uuid = E->getUuidStr();
2348  std::string Name = "_GUID_" + Uuid.lower();
2349  std::replace(Name.begin(), Name.end(), '-', '_');
2350 
2351  // The UUID descriptor should be pointer aligned.
2353 
2354  // Look for an existing global.
2355  if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
2356  return ConstantAddress(GV, Alignment);
2357 
2358  llvm::Constant *Init = EmitUuidofInitializer(Uuid);
2359  assert(Init && "failed to initialize as constant");
2360 
2361  auto *GV = new llvm::GlobalVariable(
2362  getModule(), Init->getType(),
2363  /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
2364  if (supportsCOMDAT())
2365  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
2366  setDSOLocal(GV);
2367  return ConstantAddress(GV, Alignment);
2368 }
2369 
2371  const AliasAttr *AA = VD->getAttr<AliasAttr>();
2372  assert(AA && "No alias?");
2373 
2374  CharUnits Alignment = getContext().getDeclAlign(VD);
2375  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
2376 
2377  // See if there is already something with the target's name in the module.
2378  llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
2379  if (Entry) {
2380  unsigned AS = getContext().getTargetAddressSpace(VD->getType());
2381  auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
2382  return ConstantAddress(Ptr, Alignment);
2383  }
2384 
2385  llvm::Constant *Aliasee;
2386  if (isa<llvm::FunctionType>(DeclTy))
2387  Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
2388  GlobalDecl(cast<FunctionDecl>(VD)),
2389  /*ForVTable=*/false);
2390  else
2391  Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
2392  llvm::PointerType::getUnqual(DeclTy),
2393  nullptr);
2394 
2395  auto *F = cast<llvm::GlobalValue>(Aliasee);
2396  F->setLinkage(llvm::Function::ExternalWeakLinkage);
2397  WeakRefReferences.insert(F);
2398 
2399  return ConstantAddress(Aliasee, Alignment);
2400 }
2401 
2403  const auto *Global = cast<ValueDecl>(GD.getDecl());
2404 
2405  // Weak references don't produce any output by themselves.
2406  if (Global->hasAttr<WeakRefAttr>())
2407  return;
2408 
2409  // If this is an alias definition (which otherwise looks like a declaration)
2410  // emit it now.
2411  if (Global->hasAttr<AliasAttr>())
2412  return EmitAliasDefinition(GD);
2413 
2414  // IFunc like an alias whose value is resolved at runtime by calling resolver.
2415  if (Global->hasAttr<IFuncAttr>())
2416  return emitIFuncDefinition(GD);
2417 
2418  // If this is a cpu_dispatch multiversion function, emit the resolver.
2419  if (Global->hasAttr<CPUDispatchAttr>())
2420  return emitCPUDispatchDefinition(GD);
2421 
2422  // If this is CUDA, be selective about which declarations we emit.
2423  if (LangOpts.CUDA) {
2424  if (LangOpts.CUDAIsDevice) {
2425  if (!Global->hasAttr<CUDADeviceAttr>() &&
2426  !Global->hasAttr<CUDAGlobalAttr>() &&
2427  !Global->hasAttr<CUDAConstantAttr>() &&
2428  !Global->hasAttr<CUDASharedAttr>() &&
2429  !(LangOpts.HIP && Global->hasAttr<HIPPinnedShadowAttr>()))
2430  return;
2431  } else {
2432  // We need to emit host-side 'shadows' for all global
2433  // device-side variables because the CUDA runtime needs their
2434  // size and host-side address in order to provide access to
2435  // their device-side incarnations.
2436 
2437  // So device-only functions are the only things we skip.
2438  if (isa<FunctionDecl>(Global) && !Global->hasAttr<CUDAHostAttr>() &&
2439  Global->hasAttr<CUDADeviceAttr>())
2440  return;
2441 
2442  assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
2443  "Expected Variable or Function");
2444  }
2445  }
2446 
2447  if (LangOpts.OpenMP) {
2448  // If this is OpenMP device, check if it is legal to emit this global
2449  // normally.
2450  if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
2451  return;
2452  if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Global)) {
2453  if (MustBeEmitted(Global))
2455  return;
2456  } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
2457  if (MustBeEmitted(Global))
2458  EmitOMPDeclareMapper(DMD);
2459  return;
2460  }
2461  }
2462 
2463  // Ignore declarations, they will be emitted on their first use.
2464  if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
2465  // Forward declarations are emitted lazily on first use.
2466  if (!FD->doesThisDeclarationHaveABody()) {
2468  return;
2469 
2470  StringRef MangledName = getMangledName(GD);
2471 
2472  // Compute the function info and LLVM type.
2474  llvm::Type *Ty = getTypes().GetFunctionType(FI);
2475 
2476  GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
2477  /*DontDefer=*/false);
2478  return;
2479  }
2480  } else {
2481  const auto *VD = cast<VarDecl>(Global);
2482  assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
2483  if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
2484  !Context.isMSStaticDataMemberInlineDefinition(VD)) {
2485  if (LangOpts.OpenMP) {
2486  // Emit declaration of the must-be-emitted declare target variable.
2488  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
2489  bool UnifiedMemoryEnabled =
2490  getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
2491  if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2492  !UnifiedMemoryEnabled) {
2493  (void)GetAddrOfGlobalVar(VD);
2494  } else {
2495  assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2496  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2497  UnifiedMemoryEnabled)) &&
2498  "Link clause or to clause with unified memory expected.");
2499  (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
2500  }
2501 
2502  return;
2503  }
2504  }
2505  // If this declaration may have caused an inline variable definition to
2506  // change linkage, make sure that it's emitted.
2507  if (Context.getInlineVariableDefinitionKind(VD) ==
2509  GetAddrOfGlobalVar(VD);
2510  return;
2511  }
2512  }
2513 
2514  // Defer code generation to first use when possible, e.g. if this is an inline
2515  // function. If the global must always be emitted, do it eagerly if possible
2516  // to benefit from cache locality.
2517  if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
2518  // Emit the definition if it can't be deferred.
2519  EmitGlobalDefinition(GD);
2520  return;
2521  }
2522 
2523  // If we're deferring emission of a C++ variable with an
2524  // initializer, remember the order in which it appeared in the file.
2525  if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
2526  cast<VarDecl>(Global)->hasInit()) {
2527  DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
2528  CXXGlobalInits.push_back(nullptr);
2529  }
2530 
2531  StringRef MangledName = getMangledName(GD);
2532  if (GetGlobalValue(MangledName) != nullptr) {
2533  // The value has already been used and should therefore be emitted.
2534  addDeferredDeclToEmit(GD);
2535  } else if (MustBeEmitted(Global)) {
2536  // The value must be emitted, but cannot be emitted eagerly.
2537  assert(!MayBeEmittedEagerly(Global));
2538  addDeferredDeclToEmit(GD);
2539  } else {
2540  // Otherwise, remember that we saw a deferred decl with this name. The
2541  // first use of the mangled name will cause it to move into
2542  // DeferredDeclsToEmit.
2543  DeferredDecls[MangledName] = GD;
2544  }
2545 }
2546 
2547 // Check if T is a class type with a destructor that's not dllimport.
2549  if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
2550  if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
2551  if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
2552  return true;
2553 
2554  return false;
2555 }
2556 
2557 namespace {
2558  struct FunctionIsDirectlyRecursive
2559  : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
2560  const StringRef Name;
2561  const Builtin::Context &BI;
2562  FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
2563  : Name(N), BI(C) {}
2564 
2565  bool VisitCallExpr(const CallExpr *E) {
2566  const FunctionDecl *FD = E->getDirectCallee();
2567  if (!FD)
2568  return false;
2569  AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
2570  if (Attr && Name == Attr->getLabel())
2571  return true;
2572  unsigned BuiltinID = FD->getBuiltinID();
2573  if (!BuiltinID || !BI.isLibFunction(BuiltinID))
2574  return false;
2575  StringRef BuiltinName = BI.getName(BuiltinID);
2576  if (BuiltinName.startswith("__builtin_") &&
2577  Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
2578  return true;
2579  }
2580  return false;
2581  }
2582 
2583  bool VisitStmt(const Stmt *S) {
2584  for (const Stmt *Child : S->children())
2585  if (Child && this->Visit(Child))
2586  return true;
2587  return false;
2588  }
2589  };
2590 
2591  // Make sure we're not referencing non-imported vars or functions.
2592  struct DLLImportFunctionVisitor
2593  : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
2594  bool SafeToInline = true;
2595 
2596  bool shouldVisitImplicitCode() const { return true; }
2597 
2598  bool VisitVarDecl(VarDecl *VD) {
2599  if (VD->getTLSKind()) {
2600  // A thread-local variable cannot be imported.
2601  SafeToInline = false;
2602  return SafeToInline;
2603  }
2604 
2605  // A variable definition might imply a destructor call.
2606  if (VD->isThisDeclarationADefinition())
2607  SafeToInline = !HasNonDllImportDtor(VD->getType());
2608 
2609  return SafeToInline;
2610  }
2611 
2612  bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
2613  if (const auto *D = E->getTemporary()->getDestructor())
2614  SafeToInline = D->hasAttr<DLLImportAttr>();
2615  return SafeToInline;
2616  }
2617 
2618  bool VisitDeclRefExpr(DeclRefExpr *E) {
2619  ValueDecl *VD = E->getDecl();
2620  if (isa<FunctionDecl>(VD))
2621  SafeToInline = VD->hasAttr<DLLImportAttr>();
2622  else if (VarDecl *V = dyn_cast<VarDecl>(VD))
2623  SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
2624  return SafeToInline;
2625  }
2626 
2627  bool VisitCXXConstructExpr(CXXConstructExpr *E) {
2628  SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
2629  return SafeToInline;
2630  }
2631 
2632  bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
2633  CXXMethodDecl *M = E->getMethodDecl();
2634  if (!M) {
2635  // Call through a pointer to member function. This is safe to inline.
2636  SafeToInline = true;
2637  } else {
2638  SafeToInline = M->hasAttr<DLLImportAttr>();
2639  }
2640  return SafeToInline;
2641  }
2642 
2643  bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
2644  SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
2645  return SafeToInline;
2646  }
2647 
2648  bool VisitCXXNewExpr(CXXNewExpr *E) {
2649  SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
2650  return SafeToInline;
2651  }
2652  };
2653 }
2654 
2655 // isTriviallyRecursive - Check if this function calls another
2656 // decl that, because of the asm attribute or the other decl being a builtin,
2657 // ends up pointing to itself.
2658 bool
2659 CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
2660  StringRef Name;
2661  if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
2662  // asm labels are a special kind of mangling we have to support.
2663  AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
2664  if (!Attr)
2665  return false;
2666  Name = Attr->getLabel();
2667  } else {
2668  Name = FD->getName();
2669  }
2670 
2671  FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
2672  const Stmt *Body = FD->getBody();
2673  return Body ? Walker.Visit(Body) : false;
2674 }
2675 
2676 bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
2677  if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
2678  return true;
2679  const auto *F = cast<FunctionDecl>(GD.getDecl());
2680  if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
2681  return false;
2682 
2683  if (F->hasAttr<DLLImportAttr>()) {
2684  // Check whether it would be safe to inline this dllimport function.
2685  DLLImportFunctionVisitor Visitor;
2686  Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
2687  if (!Visitor.SafeToInline)
2688  return false;
2689 
2690  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(F)) {
2691  // Implicit destructor invocations aren't captured in the AST, so the
2692  // check above can't see them. Check for them manually here.
2693  for (const Decl *Member : Dtor->getParent()->decls())
2694  if (isa<FieldDecl>(Member))
2695  if (HasNonDllImportDtor(cast<FieldDecl>(Member)->getType()))
2696  return false;
2697  for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
2698  if (HasNonDllImportDtor(B.getType()))
2699  return false;
2700  }
2701  }
2702 
2703  // PR9614. Avoid cases where the source code is lying to us. An available
2704  // externally function should have an equivalent function somewhere else,
2705  // but a function that calls itself is clearly not equivalent to the real
2706  // implementation.
2707  // This happens in glibc's btowc and in some configure checks.
2708  return !isTriviallyRecursive(F);
2709 }
2710 
2711 bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
2712  return CodeGenOpts.OptimizationLevel > 0;
2713 }
2714 
2715 void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
2716  llvm::GlobalValue *GV) {
2717  const auto *FD = cast<FunctionDecl>(GD.getDecl());
2718 
2719  if (FD->isCPUSpecificMultiVersion()) {
2720  auto *Spec = FD->getAttr<CPUSpecificAttr>();
2721  for (unsigned I = 0; I < Spec->cpus_size(); ++I)
2722  EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
2723  // Requires multiple emits.
2724  } else
2725  EmitGlobalFunctionDefinition(GD, GV);
2726 }
2727 
2728 void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
2729  const auto *D = cast<ValueDecl>(GD.getDecl());
2730 
2731  PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
2732  Context.getSourceManager(),
2733  "Generating code for declaration");
2734 
2735  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
2736  // At -O0, don't generate IR for functions with available_externally
2737  // linkage.
2738  if (!shouldEmitFunction(GD))
2739  return;
2740 
2741  llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
2742  std::string Name;
2743  llvm::raw_string_ostream OS(Name);
2744  FD->getNameForDiagnostic(OS, getContext().getPrintingPolicy(),
2745  /*Qualified=*/true);
2746  return Name;
2747  });
2748 
2749  if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
2750  // Make sure to emit the definition(s) before we emit the thunks.
2751  // This is necessary for the generation of certain thunks.
2752  if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
2753  ABI->emitCXXStructor(GD);
2754  else if (FD->isMultiVersion())
2755  EmitMultiVersionFunctionDefinition(GD, GV);
2756  else
2757  EmitGlobalFunctionDefinition(GD, GV);
2758 
2759  if (Method->isVirtual())
2760  getVTables().EmitThunks(GD);
2761 
2762  return;
2763  }
2764 
2765  if (FD->isMultiVersion())
2766  return EmitMultiVersionFunctionDefinition(GD, GV);
2767  return EmitGlobalFunctionDefinition(GD, GV);
2768  }
2769 
2770  if (const auto *VD = dyn_cast<VarDecl>(D))
2771  return EmitGlobalVarDefinition(VD, !VD->hasDefinition());
2772 
2773  llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
2774 }
2775 
2776 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
2777  llvm::Function *NewFn);
2778 
2779 static unsigned
2782  unsigned Priority = 0;
2783  for (StringRef Feat : RO.Conditions.Features)
2784  Priority = std::max(Priority, TI.multiVersionSortPriority(Feat));
2785 
2786  if (!RO.Conditions.Architecture.empty())
2787  Priority = std::max(
2789  return Priority;
2790 }
2791 
2792 void CodeGenModule::emitMultiVersionFunctions() {
2793  for (GlobalDecl GD : MultiVersionFuncs) {
2795  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2797  FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
2798  GlobalDecl CurGD{
2799  (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
2800  StringRef MangledName = getMangledName(CurGD);
2801  llvm::Constant *Func = GetGlobalValue(MangledName);
2802  if (!Func) {
2803  if (CurFD->isDefined()) {
2804  EmitGlobalFunctionDefinition(CurGD, nullptr);
2805  Func = GetGlobalValue(MangledName);
2806  } else {
2807  const CGFunctionInfo &FI =
2809  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
2810  Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
2811  /*DontDefer=*/false, ForDefinition);
2812  }
2813  assert(Func && "This should have just been created");
2814  }
2815 
2816  const auto *TA = CurFD->getAttr<TargetAttr>();
2818  TA->getAddedFeatures(Feats);
2819 
2820  Options.emplace_back(cast<llvm::Function>(Func),
2821  TA->getArchitecture(), Feats);
2822  });
2823 
2824  llvm::Function *ResolverFunc;
2825  const TargetInfo &TI = getTarget();
2826 
2827  if (TI.supportsIFunc() || FD->isTargetMultiVersion())
2828  ResolverFunc = cast<llvm::Function>(
2829  GetGlobalValue((getMangledName(GD) + ".resolver").str()));
2830  else
2831  ResolverFunc = cast<llvm::Function>(GetGlobalValue(getMangledName(GD)));
2832 
2833  if (supportsCOMDAT())
2834  ResolverFunc->setComdat(
2835  getModule().getOrInsertComdat(ResolverFunc->getName()));
2836 
2837  llvm::stable_sort(
2838  Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
2840  return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
2841  });
2842  CodeGenFunction CGF(*this);
2843  CGF.EmitMultiVersionResolver(ResolverFunc, Options);
2844  }
2845 }
2846 
2847 void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
2848  const auto *FD = cast<FunctionDecl>(GD.getDecl());
2849  assert(FD && "Not a FunctionDecl?");
2850  const auto *DD = FD->getAttr<CPUDispatchAttr>();
2851  assert(DD && "Not a cpu_dispatch Function?");
2852  llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
2853 
2854  if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
2855  const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
2856  DeclTy = getTypes().GetFunctionType(FInfo);
2857  }
2858 
2859  StringRef ResolverName = getMangledName(GD);
2860 
2861  llvm::Type *ResolverType;
2862  GlobalDecl ResolverGD;
2863  if (getTarget().supportsIFunc())
2864  ResolverType = llvm::FunctionType::get(
2865  llvm::PointerType::get(DeclTy,
2866  Context.getTargetAddressSpace(FD->getType())),
2867  false);
2868  else {
2869  ResolverType = DeclTy;
2870  ResolverGD = GD;
2871  }
2872 
2873  auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
2874  ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
2875 
2877  const TargetInfo &Target = getTarget();
2878  unsigned Index = 0;
2879  for (const IdentifierInfo *II : DD->cpus()) {
2880  // Get the name of the target function so we can look it up/create it.
2881  std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
2882  getCPUSpecificMangling(*this, II->getName());
2883 
2884  llvm::Constant *Func = GetGlobalValue(MangledName);
2885 
2886  if (!Func) {
2887  GlobalDecl ExistingDecl = Manglings.lookup(MangledName);
2888  if (ExistingDecl.getDecl() &&
2889  ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
2890  EmitGlobalFunctionDefinition(ExistingDecl, nullptr);
2891  Func = GetGlobalValue(MangledName);
2892  } else {
2893  if (!ExistingDecl.getDecl())
2894  ExistingDecl = GD.getWithMultiVersionIndex(Index);
2895 
2896  Func = GetOrCreateLLVMFunction(
2897  MangledName, DeclTy, ExistingDecl,
2898  /*ForVTable=*/false, /*DontDefer=*/true,
2899  /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
2900  }
2901  }
2902 
2904  Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
2905  llvm::transform(Features, Features.begin(),
2906  [](StringRef Str) { return Str.substr(1); });
2907  Features.erase(std::remove_if(
2908  Features.begin(), Features.end(), [&Target](StringRef Feat) {
2909  return !Target.validateCpuSupports(Feat);
2910  }), Features.end());
2911  Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
2912  ++Index;
2913  }
2914 
2915  llvm::sort(
2916  Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
2920  });
2921 
2922  // If the list contains multiple 'default' versions, such as when it contains
2923  // 'pentium' and 'generic', don't emit the call to the generic one (since we
2924  // always run on at least a 'pentium'). We do this by deleting the 'least
2925  // advanced' (read, lowest mangling letter).
2926  while (Options.size() > 1 &&
2928  (Options.end() - 2)->Conditions.Features) == 0) {
2929  StringRef LHSName = (Options.end() - 2)->Function->getName();
2930  StringRef RHSName = (Options.end() - 1)->Function->getName();
2931  if (LHSName.compare(RHSName) < 0)
2932  Options.erase(Options.end() - 2);
2933  else
2934  Options.erase(Options.end() - 1);
2935  }
2936 
2937  CodeGenFunction CGF(*this);
2938  CGF.EmitMultiVersionResolver(ResolverFunc, Options);
2939 }
2940 
2941 /// If a dispatcher for the specified mangled name is not in the module, create
2942 /// and return an llvm Function with the specified type.
2943 llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
2944  GlobalDecl GD, llvm::Type *DeclTy, const FunctionDecl *FD) {
2945  std::string MangledName =
2946  getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
2947 
2948  // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
2949  // a separate resolver).
2950  std::string ResolverName = MangledName;
2951  if (getTarget().supportsIFunc())
2952  ResolverName += ".ifunc";
2953  else if (FD->isTargetMultiVersion())
2954  ResolverName += ".resolver";
2955 
2956  // If this already exists, just return that one.
2957  if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
2958  return ResolverGV;
2959 
2960  // Since this is the first time we've created this IFunc, make sure
2961  // that we put this multiversioned function into the list to be
2962  // replaced later if necessary (target multiversioning only).
2964  MultiVersionFuncs.push_back(GD);
2965 
2966  if (getTarget().supportsIFunc()) {
2967  llvm::Type *ResolverType = llvm::FunctionType::get(
2968  llvm::PointerType::get(
2969  DeclTy, getContext().getTargetAddressSpace(FD->getType())),
2970  false);
2971  llvm::Constant *Resolver = GetOrCreateLLVMFunction(
2972  MangledName + ".resolver", ResolverType, GlobalDecl{},
2973  /*ForVTable=*/false);
2974  llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
2975  DeclTy, 0, llvm::Function::ExternalLinkage, "", Resolver, &getModule());
2976  GIF->setName(ResolverName);
2977  SetCommonAttributes(FD, GIF);
2978 
2979  return GIF;
2980  }
2981 
2982  llvm::Constant *Resolver = GetOrCreateLLVMFunction(
2983  ResolverName, DeclTy, GlobalDecl{}, /*ForVTable=*/false);
2984  assert(isa<llvm::GlobalValue>(Resolver) &&
2985  "Resolver should be created for the first time");
2986  SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
2987  return Resolver;
2988 }
2989 
2990 /// GetOrCreateLLVMFunction - If the specified mangled name is not in the
2991 /// module, create and return an llvm Function with the specified type. If there
2992 /// is something in the module with the specified name, return it potentially
2993 /// bitcasted to the right type.
2994 ///
2995 /// If D is non-null, it specifies a decl that correspond to this. This is used
2996 /// to set the attributes on the function when it is first created.
2997 llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
2998  StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable,
2999  bool DontDefer, bool IsThunk, llvm::AttributeList ExtraAttrs,
3000  ForDefinition_t IsForDefinition) {
3001  const Decl *D = GD.getDecl();
3002 
3003  // Any attempts to use a MultiVersion function should result in retrieving
3004  // the iFunc instead. Name Mangling will handle the rest of the changes.
3005  if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
3006  // For the device mark the function as one that should be emitted.
3007  if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
3008  !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
3009  !DontDefer && !IsForDefinition) {
3010  if (const FunctionDecl *FDDef = FD->getDefinition()) {
3011  GlobalDecl GDDef;
3012  if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
3013  GDDef = GlobalDecl(CD, GD.getCtorType());
3014  else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
3015  GDDef = GlobalDecl(DD, GD.getDtorType());
3016  else
3017  GDDef = GlobalDecl(FDDef);
3018  EmitGlobal(GDDef);
3019  }
3020  }
3021 
3022  if (FD->isMultiVersion()) {
3023  const auto *TA = FD->getAttr<TargetAttr>();
3024  if (TA && TA->isDefaultVersion())
3025  UpdateMultiVersionNames(GD, FD);
3026  if (!IsForDefinition)
3027  return GetOrCreateMultiVersionResolver(GD, Ty, FD);
3028  }
3029  }
3030 
3031  // Lookup the entry, lazily creating it if necessary.
3032  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
3033  if (Entry) {
3034  if (WeakRefReferences.erase(Entry)) {
3035  const FunctionDecl *FD = cast_or_null<FunctionDecl>(D);
3036  if (FD && !FD->hasAttr<WeakAttr>())
3037  Entry->setLinkage(llvm::Function::ExternalLinkage);
3038  }
3039 
3040  // Handle dropped DLL attributes.
3041  if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>()) {
3042  Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
3043  setDSOLocal(Entry);
3044  }
3045 
3046  // If there are two attempts to define the same mangled name, issue an
3047  // error.
3048  if (IsForDefinition && !Entry->isDeclaration()) {
3049  GlobalDecl OtherGD;
3050  // Check that GD is not yet in DiagnosedConflictingDefinitions is required
3051  // to make sure that we issue an error only once.
3052  if (lookupRepresentativeDecl(MangledName, OtherGD) &&
3053  (GD.getCanonicalDecl().getDecl() !=
3054  OtherGD.getCanonicalDecl().getDecl()) &&
3055  DiagnosedConflictingDefinitions.insert(GD).second) {
3056  getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
3057  << MangledName;
3058  getDiags().Report(OtherGD.getDecl()->getLocation(),
3059  diag::note_previous_definition);
3060  }
3061  }
3062 
3063  if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
3064  (Entry->getType()->getElementType() == Ty)) {
3065  return Entry;
3066  }
3067 
3068  // Make sure the result is of the correct type.
3069  // (If function is requested for a definition, we always need to create a new
3070  // function, not just return a bitcast.)
3071  if (!IsForDefinition)
3072  return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
3073  }
3074 
3075  // This function doesn't have a complete type (for example, the return
3076  // type is an incomplete struct). Use a fake type instead, and make
3077  // sure not to try to set attributes.
3078  bool IsIncompleteFunction = false;
3079 
3080  llvm::FunctionType *FTy;
3081  if (isa<llvm::FunctionType>(Ty)) {
3082  FTy = cast<llvm::FunctionType>(Ty);
3083  } else {
3084  FTy = llvm::FunctionType::get(VoidTy, false);
3085  IsIncompleteFunction = true;
3086  }
3087 
3088  llvm::Function *F =
3090  Entry ? StringRef() : MangledName, &getModule());
3091 
3092  // If we already created a function with the same mangled name (but different
3093  // type) before, take its name and add it to the list of functions to be
3094  // replaced with F at the end of CodeGen.
3095  //
3096  // This happens if there is a prototype for a function (e.g. "int f()") and
3097  // then a definition of a different type (e.g. "int f(int x)").
3098  if (Entry) {
3099  F->takeName(Entry);
3100 
3101  // This might be an implementation of a function without a prototype, in
3102  // which case, try to do special replacement of calls which match the new
3103  // prototype. The really key thing here is that we also potentially drop
3104  // arguments from the call site so as to make a direct call, which makes the
3105  // inliner happier and suppresses a number of optimizer warnings (!) about
3106  // dropping arguments.
3107  if (!Entry->use_empty()) {
3109  Entry->removeDeadConstantUsers();
3110  }
3111 
3112  llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
3113  F, Entry->getType()->getElementType()->getPointerTo());
3114  addGlobalValReplacement(Entry, BC);
3115  }
3116 
3117  assert(F->getName() == MangledName && "name was uniqued!");
3118  if (D)
3119  SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
3120  if (ExtraAttrs.hasAttributes(llvm::AttributeList::FunctionIndex)) {
3121  llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeList::FunctionIndex);
3122  F->addAttributes(llvm::AttributeList::FunctionIndex, B);
3123  }
3124 
3125  if (!DontDefer) {
3126  // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
3127  // each other bottoming out with the base dtor. Therefore we emit non-base
3128  // dtors on usage, even if there is no dtor definition in the TU.
3129  if (D && isa<CXXDestructorDecl>(D) &&
3130  getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
3131  GD.getDtorType()))
3132  addDeferredDeclToEmit(GD);
3133 
3134  // This is the first use or definition of a mangled name. If there is a
3135  // deferred decl with this name, remember that we need to emit it at the end
3136  // of the file.
3137  auto DDI = DeferredDecls.find(MangledName);
3138  if (DDI != DeferredDecls.end()) {
3139  // Move the potentially referenced deferred decl to the
3140  // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
3141  // don't need it anymore).
3142  addDeferredDeclToEmit(DDI->second);
3143  DeferredDecls.erase(DDI);
3144 
3145  // Otherwise, there are cases we have to worry about where we're
3146  // using a declaration for which we must emit a definition but where
3147  // we might not find a top-level definition:
3148  // - member functions defined inline in their classes
3149  // - friend functions defined inline in some class
3150  // - special member functions with implicit definitions
3151  // If we ever change our AST traversal to walk into class methods,
3152  // this will be unnecessary.
3153  //
3154  // We also don't emit a definition for a function if it's going to be an
3155  // entry in a vtable, unless it's already marked as used.
3156  } else if (getLangOpts().CPlusPlus && D) {
3157  // Look for a declaration that's lexically in a record.
3158  for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
3159  FD = FD->getPreviousDecl()) {
3160  if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
3161  if (FD->doesThisDeclarationHaveABody()) {
3162  addDeferredDeclToEmit(GD.getWithDecl(FD));
3163  break;
3164  }
3165  }
3166  }
3167  }
3168  }
3169 
3170  // Make sure the result is of the requested type.
3171  if (!IsIncompleteFunction) {
3172  assert(F->getType()->getElementType() == Ty);
3173  return F;
3174  }
3175 
3176  llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
3177  return llvm::ConstantExpr::getBitCast(F, PTy);
3178 }
3179 
3180 /// GetAddrOfFunction - Return the address of the given function. If Ty is
3181 /// non-null, then this function will use the specified type if it has to
3182 /// create it (this occurs when we see a definition of the function).
3184  llvm::Type *Ty,
3185  bool ForVTable,
3186  bool DontDefer,
3187  ForDefinition_t IsForDefinition) {
3188  // If there was no specific requested type, just convert it now.
3189  if (!Ty) {
3190  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3191  Ty = getTypes().ConvertType(FD->getType());
3192  }
3193 
3194  // Devirtualized destructor calls may come through here instead of via
3195  // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
3196  // of the complete destructor when necessary.
3197  if (const auto *DD = dyn_cast<CXXDestructorDecl>(GD.getDecl())) {
3198  if (getTarget().getCXXABI().isMicrosoft() &&
3199  GD.getDtorType() == Dtor_Complete &&
3200  DD->getParent()->getNumVBases() == 0)
3201  GD = GlobalDecl(DD, Dtor_Base);
3202  }
3203 
3204  StringRef MangledName = getMangledName(GD);
3205  return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
3206  /*IsThunk=*/false, llvm::AttributeList(),
3207  IsForDefinition);
3208 }
3209 
3210 static const FunctionDecl *
3211 GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
3214 
3215  IdentifierInfo &CII = C.Idents.get(Name);
3216  for (const auto &Result : DC->lookup(&CII))
3217  if (const auto FD = dyn_cast<FunctionDecl>(Result))
3218  return FD;
3219 
3220  if (!C.getLangOpts().CPlusPlus)
3221  return nullptr;
3222 
3223  // Demangle the premangled name from getTerminateFn()
3224  IdentifierInfo &CXXII =
3225  (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
3226  ? C.Idents.get("terminate")
3227  : C.Idents.get(Name);
3228 
3229  for (const auto &N : {"__cxxabiv1", "std"}) {
3230  IdentifierInfo &NS = C.Idents.get(N);
3231  for (const auto &Result : DC->lookup(&NS)) {
3232  NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
3233  if (auto LSD = dyn_cast<LinkageSpecDecl>(Result))
3234  for (const auto &Result : LSD->lookup(&NS))
3235  if ((ND = dyn_cast<NamespaceDecl>(Result)))
3236  break;
3237 
3238  if (ND)
3239  for (const auto &Result : ND->lookup(&CXXII))
3240  if (const auto *FD = dyn_cast<FunctionDecl>(Result))
3241  return FD;
3242  }
3243  }
3244 
3245  return nullptr;
3246 }
3247 
3248 /// CreateRuntimeFunction - Create a new runtime function with the specified
3249 /// type and name.
3250 llvm::FunctionCallee
3251 CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
3252  llvm::AttributeList ExtraAttrs,
3253  bool Local) {
3254  llvm::Constant *C =
3255  GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
3256  /*DontDefer=*/false, /*IsThunk=*/false,
3257  ExtraAttrs);
3258 
3259  if (auto *F = dyn_cast<llvm::Function>(C)) {
3260  if (F->empty()) {
3261  F->setCallingConv(getRuntimeCC());
3262 
3263  // In Windows Itanium environments, try to mark runtime functions
3264  // dllimport. For Mingw and MSVC, don't. We don't really know if the user
3265  // will link their standard library statically or dynamically. Marking
3266  // functions imported when they are not imported can cause linker errors
3267  // and warnings.
3268  if (!Local && getTriple().isWindowsItaniumEnvironment() &&
3269  !getCodeGenOpts().LTOVisibilityPublicStd) {
3270  const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
3271  if (!FD || FD->hasAttr<DLLImportAttr>()) {
3272  F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3273  F->setLinkage(llvm::GlobalValue::ExternalLinkage);
3274  }
3275  }
3276  setDSOLocal(F);
3277  }
3278  }
3279 
3280  return {FTy, C};
3281 }
3282 
3283 /// isTypeConstant - Determine whether an object of this type can be emitted
3284 /// as a constant.
3285 ///
3286 /// If ExcludeCtor is true, the duration when the object's constructor runs
3287 /// will not be considered. The caller will need to verify that the object is
3288 /// not written to during its construction.
3289 bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
3290  if (!Ty.isConstant(Context) && !Ty->isReferenceType())
3291  return false;
3292 
3293  if (Context.getLangOpts().CPlusPlus) {
3294  if (const CXXRecordDecl *Record
3295  = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
3296  return ExcludeCtor && !Record->hasMutableFields() &&
3297  Record->hasTrivialDestructor();
3298  }
3299 
3300  return true;
3301 }
3302 
3303 /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
3304 /// create and return an llvm GlobalVariable with the specified type. If there
3305 /// is something in the module with the specified name, return it potentially
3306 /// bitcasted to the right type.
3307 ///
3308 /// If D is non-null, it specifies a decl that correspond to this. This is used
3309 /// to set the attributes on the global when it is first created.
3310 ///
3311 /// If IsForDefinition is true, it is guaranteed that an actual global with
3312 /// type Ty will be returned, not conversion of a variable with the same
3313 /// mangled name but some other type.
3314 llvm::Constant *
3315 CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
3316  llvm::PointerType *Ty,
3317  const VarDecl *D,
3318  ForDefinition_t IsForDefinition) {
3319  // Lookup the entry, lazily creating it if necessary.
3320  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
3321  if (Entry) {
3322  if (WeakRefReferences.erase(Entry)) {
3323  if (D && !D->hasAttr<WeakAttr>())
3324  Entry->setLinkage(llvm::Function::ExternalLinkage);
3325  }
3326 
3327  // Handle dropped DLL attributes.
3328  if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
3329  Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
3330 
3331  if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
3332  getOpenMPRuntime().registerTargetGlobalVariable(D, Entry);
3333 
3334  if (Entry->getType() == Ty)
3335  return Entry;
3336 
3337  // If there are two attempts to define the same mangled name, issue an
3338  // error.
3339  if (IsForDefinition && !Entry->isDeclaration()) {
3340  GlobalDecl OtherGD;
3341  const VarDecl *OtherD;
3342 
3343  // Check that D is not yet in DiagnosedConflictingDefinitions is required
3344  // to make sure that we issue an error only once.
3345  if (D && lookupRepresentativeDecl(MangledName, OtherGD) &&
3346  (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) &&
3347  (OtherD = dyn_cast<VarDecl>(OtherGD.getDecl())) &&
3348  OtherD->hasInit() &&
3349  DiagnosedConflictingDefinitions.insert(D).second) {
3350  getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
3351  << MangledName;
3352  getDiags().Report(OtherGD.getDecl()->getLocation(),
3353  diag::note_previous_definition);
3354  }
3355  }
3356 
3357  // Make sure the result is of the correct type.
3358  if (Entry->getType()->getAddressSpace() != Ty->getAddressSpace())
3359  return llvm::ConstantExpr::getAddrSpaceCast(Entry, Ty);
3360 
3361  // (If global is requested for a definition, we always need to create a new
3362  // global, not just return a bitcast.)
3363  if (!IsForDefinition)
3364  return llvm::ConstantExpr::getBitCast(Entry, Ty);
3365  }
3366 
3367  auto AddrSpace = GetGlobalVarAddressSpace(D);
3368  auto TargetAddrSpace = getContext().getTargetAddressSpace(AddrSpace);
3369 
3370  auto *GV = new llvm::GlobalVariable(
3371  getModule(), Ty->getElementType(), false,
3372  llvm::GlobalValue::ExternalLinkage, nullptr, MangledName, nullptr,
3373  llvm::GlobalVariable::NotThreadLocal, TargetAddrSpace);
3374 
3375  // If we already created a global with the same mangled name (but different
3376  // type) before, take its name and remove it from its parent.
3377  if (Entry) {
3378  GV->takeName(Entry);
3379 
3380  if (!Entry->use_empty()) {
3381  llvm::Constant *NewPtrForOldDecl =
3382  llvm::ConstantExpr::getBitCast(GV, Entry->getType());
3383  Entry->replaceAllUsesWith(NewPtrForOldDecl);
3384  }
3385 
3386  Entry->eraseFromParent();
3387  }
3388 
3389  // This is the first use or definition of a mangled name. If there is a
3390  // deferred decl with this name, remember that we need to emit it at the end
3391  // of the file.
3392  auto DDI = DeferredDecls.find(MangledName);
3393  if (DDI != DeferredDecls.end()) {
3394  // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
3395  // list, and remove it from DeferredDecls (since we don't need it anymore).
3396  addDeferredDeclToEmit(DDI->second);
3397  DeferredDecls.erase(DDI);
3398  }
3399 
3400  // Handle things which are present even on external declarations.
3401  if (D) {
3402  if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
3403  getOpenMPRuntime().registerTargetGlobalVariable(D, GV);
3404 
3405  // FIXME: This code is overly simple and should be merged with other global
3406  // handling.
3407  GV->setConstant(isTypeConstant(D->getType(), false));
3408 
3409  GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
3410 
3411  setLinkageForGV(GV, D);
3412 
3413  if (D->getTLSKind()) {
3414  if (D->getTLSKind() == VarDecl::TLS_Dynamic)
3415  CXXThreadLocals.push_back(D);
3416  setTLSMode(GV, *D);
3417  }
3418 
3419  setGVProperties(GV, D);
3420 
3421  // If required by the ABI, treat declarations of static data members with
3422  // inline initializers as definitions.
3423  if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
3424  EmitGlobalVarDefinition(D);
3425  }
3426 
3427  // Emit section information for extern variables.
3428  if (D->hasExternalStorage()) {
3429  if (const SectionAttr *SA = D->getAttr<SectionAttr>())
3430  GV->setSection(SA->getName());
3431  }
3432 
3433  // Handle XCore specific ABI requirements.
3434  if (getTriple().getArch() == llvm::Triple::xcore &&
3436  D->getType().isConstant(Context) &&
3438  GV->setSection(".cp.rodata");
3439 
3440  // Check if we a have a const declaration with an initializer, we may be
3441  // able to emit it as available_externally to expose it's value to the
3442  // optimizer.
3443  if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() &&
3444  D->getType().isConstQualified() && !GV->hasInitializer() &&
3445  !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) {
3446  const auto *Record =
3447  Context.getBaseElementType(D->getType())->getAsCXXRecordDecl();
3448  bool HasMutableFields = Record && Record->hasMutableFields();
3449  if (!HasMutableFields) {
3450  const VarDecl *InitDecl;
3451  const Expr *InitExpr = D->getAnyInitializer(InitDecl);
3452  if (InitExpr) {
3453  ConstantEmitter emitter(*this);
3454  llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
3455  if (Init) {
3456  auto *InitType = Init->getType();
3457  if (GV->getType()->getElementType() != InitType) {
3458  // The type of the initializer does not match the definition.
3459  // This happens when an initializer has a different type from
3460  // the type of the global (because of padding at the end of a
3461  // structure for instance).
3462  GV->setName(StringRef());
3463  // Make a new global with the correct type, this is now guaranteed
3464  // to work.
3465  auto *NewGV = cast<llvm::GlobalVariable>(
3466  GetAddrOfGlobalVar(D, InitType, IsForDefinition));
3467 
3468  // Erase the old global, since it is no longer used.
3469  GV->eraseFromParent();
3470  GV = NewGV;
3471  } else {
3472  GV->setInitializer(Init);
3473  GV->setConstant(true);
3474  GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
3475  }
3476  emitter.finalize(GV);
3477  }
3478  }
3479  }
3480  }
3481  }
3482 
3483  LangAS ExpectedAS =
3484  D ? D->getType().getAddressSpace()
3485  : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
3486  assert(getContext().getTargetAddressSpace(ExpectedAS) ==
3487  Ty->getPointerAddressSpace());
3488  if (AddrSpace != ExpectedAS)
3489  return getTargetCodeGenInfo().performAddrSpaceCast(*this, GV, AddrSpace,
3490  ExpectedAS, Ty);
3491 
3492  if (GV->isDeclaration())
3493  getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
3494 
3495  return GV;
3496 }
3497 
3498 llvm::Constant *
3500  ForDefinition_t IsForDefinition) {
3501  const Decl *D = GD.getDecl();
3502  if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
3503  return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
3504  /*DontDefer=*/false, IsForDefinition);
3505  else if (isa<CXXMethodDecl>(D)) {
3506  auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
3507  cast<CXXMethodDecl>(D));
3508  auto Ty = getTypes().GetFunctionType(*FInfo);
3509  return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
3510  IsForDefinition);
3511  } else if (isa<FunctionDecl>(D)) {
3513  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3514  return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
3515  IsForDefinition);
3516  } else
3517  return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr,
3518  IsForDefinition);
3519 }
3520 
3522  StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
3523  unsigned Alignment) {
3524  llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
3525  llvm::GlobalVariable *OldGV = nullptr;
3526 
3527  if (GV) {
3528  // Check if the variable has the right type.
3529  if (GV->getType()->getElementType() == Ty)
3530  return GV;
3531 
3532  // Because C++ name mangling, the only way we can end up with an already
3533  // existing global with the same name is if it has been declared extern "C".
3534  assert(GV->isDeclaration() && "Declaration has wrong type!");
3535  OldGV = GV;
3536  }
3537 
3538  // Create a new variable.
3539  GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
3540  Linkage, nullptr, Name);
3541 
3542  if (OldGV) {
3543  // Replace occurrences of the old variable if needed.
3544  GV->takeName(OldGV);
3545 
3546  if (!OldGV->use_empty()) {
3547  llvm::Constant *NewPtrForOldDecl =
3548  llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3549  OldGV->replaceAllUsesWith(NewPtrForOldDecl);
3550  }
3551 
3552  OldGV->eraseFromParent();
3553  }
3554 
3555  if (supportsCOMDAT() && GV->isWeakForLinker() &&
3556  !GV->hasAvailableExternallyLinkage())
3557  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
3558 
3559  GV->setAlignment(Alignment);
3560 
3561  return GV;
3562 }
3563 
3564 /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
3565 /// given global variable. If Ty is non-null and if the global doesn't exist,
3566 /// then it will be created with the specified type instead of whatever the
3567 /// normal requested type would be. If IsForDefinition is true, it is guaranteed
3568 /// that an actual global with type Ty will be returned, not conversion of a
3569 /// variable with the same mangled name but some other type.
3571  llvm::Type *Ty,
3572  ForDefinition_t IsForDefinition) {
3573  assert(D->hasGlobalStorage() && "Not a global variable");
3574  QualType ASTTy = D->getType();
3575  if (!Ty)
3576  Ty = getTypes().ConvertTypeForMem(ASTTy);
3577 
3578  llvm::PointerType *PTy =
3579  llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
3580 
3581  StringRef MangledName = getMangledName(D);
3582  return GetOrCreateLLVMGlobal(MangledName, PTy, D, IsForDefinition);
3583 }
3584 
3585 /// CreateRuntimeVariable - Create a new runtime global variable with the
3586 /// specified type and name.
3587 llvm::Constant *
3589  StringRef Name) {
3590  auto PtrTy =
3591  getContext().getLangOpts().OpenCL
3592  ? llvm::PointerType::get(
3593  Ty, getContext().getTargetAddressSpace(LangAS::opencl_global))
3594  : llvm::PointerType::getUnqual(Ty);
3595  auto *Ret = GetOrCreateLLVMGlobal(Name, PtrTy, nullptr);
3596  setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
3597  return Ret;
3598 }
3599 
3601  assert(!D->getInit() && "Cannot emit definite definitions here!");
3602 
3603  StringRef MangledName = getMangledName(D);
3604  llvm::GlobalValue *GV = GetGlobalValue(MangledName);
3605 
3606  // We already have a definition, not declaration, with the same mangled name.
3607  // Emitting of declaration is not required (and actually overwrites emitted
3608  // definition).
3609  if (GV && !GV->isDeclaration())
3610  return;
3611 
3612  // If we have not seen a reference to this variable yet, place it into the
3613  // deferred declarations table to be emitted if needed later.
3614  if (!MustBeEmitted(D) && !GV) {
3615  DeferredDecls[MangledName] = D;
3616  return;
3617  }
3618 
3619  // The tentative definition is the only definition.
3620  EmitGlobalVarDefinition(D);
3621 }
3622 
3624  return Context.toCharUnitsFromBits(
3625  getDataLayout().getTypeStoreSizeInBits(Ty));
3626 }
3627 
3629  LangAS AddrSpace = LangAS::Default;
3630  if (LangOpts.OpenCL) {
3631  AddrSpace = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
3632  assert(AddrSpace == LangAS::opencl_global ||
3633  AddrSpace == LangAS::opencl_constant ||
3634  AddrSpace == LangAS::opencl_local ||
3635  AddrSpace >= LangAS::FirstTargetAddressSpace);
3636  return AddrSpace;
3637  }
3638 
3639  if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
3640  if (D && D->hasAttr<CUDAConstantAttr>())
3641  return LangAS::cuda_constant;
3642  else if (D && D->hasAttr<CUDASharedAttr>())
3643  return LangAS::cuda_shared;
3644  else if (D && D->hasAttr<CUDADeviceAttr>())
3645  return LangAS::cuda_device;
3646  else if (D && D->getType().isConstQualified())
3647  return LangAS::cuda_constant;
3648  else
3649  return LangAS::cuda_device;
3650  }
3651 
3652  if (LangOpts.OpenMP) {
3653  LangAS AS;
3654  if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(D, AS))
3655  return AS;
3656  }
3658 }
3659 
3661  // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
3662  if (LangOpts.OpenCL)
3663  return LangAS::opencl_constant;
3664  if (auto AS = getTarget().getConstantAddressSpace())
3665  return AS.getValue();
3666  return LangAS::Default;
3667 }
3668 
3669 // In address space agnostic languages, string literals are in default address
3670 // space in AST. However, certain targets (e.g. amdgcn) request them to be
3671 // emitted in constant address space in LLVM IR. To be consistent with other
3672 // parts of AST, string literal global variables in constant address space
3673 // need to be casted to default address space before being put into address
3674 // map and referenced by other part of CodeGen.
3675 // In OpenCL, string literals are in constant address space in AST, therefore
3676 // they should not be casted to default address space.
3677 static llvm::Constant *
3679  llvm::GlobalVariable *GV) {
3680  llvm::Constant *Cast = GV;
3681  if (!CGM.getLangOpts().OpenCL) {
3682  if (auto AS = CGM.getTarget().getConstantAddressSpace()) {
3683  if (AS != LangAS::Default)
3685  CGM, GV, AS.getValue(), LangAS::Default,
3686  GV->getValueType()->getPointerTo(
3688  }
3689  }
3690  return Cast;
3691 }
3692 
3693 template<typename SomeDecl>
3695  llvm::GlobalValue *GV) {
3696  if (!getLangOpts().CPlusPlus)
3697  return;
3698 
3699  // Must have 'used' attribute, or else inline assembly can't rely on
3700  // the name existing.
3701  if (!D->template hasAttr<UsedAttr>())
3702  return;
3703 
3704  // Must have internal linkage and an ordinary name.
3705  if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
3706  return;
3707 
3708  // Must be in an extern "C" context. Entities declared directly within
3709  // a record are not extern "C" even if the record is in such a context.
3710  const SomeDecl *First = D->getFirstDecl();
3711  if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
3712  return;
3713 
3714  // OK, this is an internal linkage entity inside an extern "C" linkage
3715  // specification. Make a note of that so we can give it the "expected"
3716  // mangled name if nothing else is using that name.
3717  std::pair<StaticExternCMap::iterator, bool> R =
3718  StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
3719 
3720  // If we have multiple internal linkage entities with the same name
3721  // in extern "C" regions, none of them gets that name.
3722  if (!R.second)
3723  R.first->second = nullptr;
3724 }
3725 
3726 static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
3727  if (!CGM.supportsCOMDAT())
3728  return false;
3729 
3730  // Do not set COMDAT attribute for CUDA/HIP stub functions to prevent
3731  // them being "merged" by the COMDAT Folding linker optimization.
3732  if (D.hasAttr<CUDAGlobalAttr>())
3733  return false;
3734 
3735  if (D.hasAttr<SelectAnyAttr>())
3736  return true;
3737 
3739  if (auto *VD = dyn_cast<VarDecl>(&D))
3740  Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
3741  else
3742  Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
3743 
3744  switch (Linkage) {
3745  case GVA_Internal:
3747  case GVA_StrongExternal:
3748  return false;
3749  case GVA_DiscardableODR:
3750  case GVA_StrongODR:
3751  return true;
3752  }
3753  llvm_unreachable("No such linkage");
3754 }
3755 
3757  llvm::GlobalObject &GO) {
3758  if (!shouldBeInCOMDAT(*this, D))
3759  return;
3760  GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
3761 }
3762 
3763 /// Pass IsTentative as true if you want to create a tentative definition.
3764 void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
3765  bool IsTentative) {
3766  // OpenCL global variables of sampler type are translated to function calls,
3767  // therefore no need to be translated.
3768  QualType ASTTy = D->getType();
3769  if (getLangOpts().OpenCL && ASTTy->isSamplerT())
3770  return;
3771 
3772  // If this is OpenMP device, check if it is legal to emit this global
3773  // normally.
3774  if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
3775  OpenMPRuntime->emitTargetGlobalVariable(D))
3776  return;
3777 
3778  llvm::Constant *Init = nullptr;
3780  bool NeedsGlobalCtor = false;
3781  bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor();
3782 
3783  const VarDecl *InitDecl;
3784  const Expr *InitExpr = D->getAnyInitializer(InitDecl);
3785 
3786  Optional<ConstantEmitter> emitter;
3787 
3788  // CUDA E.2.4.1 "__shared__ variables cannot have an initialization
3789  // as part of their declaration." Sema has already checked for
3790  // error cases, so we just need to set Init to UndefValue.
3791  bool IsCUDASharedVar =
3792  getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
3793  // Shadows of initialized device-side global variables are also left
3794  // undefined.
3795  bool IsCUDAShadowVar =
3796  !getLangOpts().CUDAIsDevice &&
3797  (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
3798  D->hasAttr<CUDASharedAttr>());
3799  // HIP pinned shadow of initialized host-side global variables are also
3800  // left undefined.
3801  bool IsHIPPinnedShadowVar =
3802  getLangOpts().CUDAIsDevice && D->hasAttr<HIPPinnedShadowAttr>();
3803  if (getLangOpts().CUDA &&
3804  (IsCUDASharedVar || IsCUDAShadowVar || IsHIPPinnedShadowVar))
3805  Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
3806  else if (!InitExpr) {
3807  // This is a tentative definition; tentative definitions are
3808  // implicitly initialized with { 0 }.
3809  //
3810  // Note that tentative definitions are only emitted at the end of
3811  // a translation unit, so they should never have incomplete
3812  // type. In addition, EmitTentativeDefinition makes sure that we
3813  // never attempt to emit a tentative definition if a real one
3814  // exists. A use may still exists, however, so we still may need
3815  // to do a RAUW.
3816  assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
3817  Init = EmitNullConstant(D->getType());
3818  } else {
3819  initializedGlobalDecl = GlobalDecl(D);
3820  emitter.emplace(*this);
3821  Init = emitter->tryEmitForInitializer(*InitDecl);
3822 
3823  if (!Init) {
3824  QualType T = InitExpr->getType();
3825  if (D->getType()->isReferenceType())
3826  T = D->getType();
3827 
3828  if (getLangOpts().CPlusPlus) {
3829  Init = EmitNullConstant(T);
3830  NeedsGlobalCtor = true;
3831  } else {
3832  ErrorUnsupported(D, "static initializer");
3833  Init = llvm::UndefValue::get(getTypes().ConvertType(T));
3834  }
3835  } else {
3836  // We don't need an initializer, so remove the entry for the delayed
3837  // initializer position (just in case this entry was delayed) if we
3838  // also don't need to register a destructor.
3839  if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
3840  DelayedCXXInitPosition.erase(D);
3841  }
3842  }
3843 
3844  llvm::Type* InitType = Init->getType();
3845  llvm::Constant *Entry =
3846  GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative));
3847 
3848  // Strip off a bitcast if we got one back.
3849  if (auto *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
3850  assert(CE->getOpcode() == llvm::Instruction::BitCast ||
3851  CE->getOpcode() == llvm::Instruction::AddrSpaceCast ||
3852  // All zero index gep.
3853  CE->getOpcode() == llvm::Instruction::GetElementPtr);
3854  Entry = CE->getOperand(0);
3855  }
3856 
3857  // Entry is now either a Function or GlobalVariable.
3858  auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
3859 
3860  // We have a definition after a declaration with the wrong type.
3861  // We must make a new GlobalVariable* and update everything that used OldGV
3862  // (a declaration or tentative definition) with the new GlobalVariable*
3863  // (which will be a definition).
3864  //
3865  // This happens if there is a prototype for a global (e.g.
3866  // "extern int x[];") and then a definition of a different type (e.g.
3867  // "int x[10];"). This also happens when an initializer has a different type
3868  // from the type of the global (this happens with unions).
3869  if (!GV || GV->getType()->getElementType() != InitType ||
3870  GV->getType()->getAddressSpace() !=
3872 
3873  // Move the old entry aside so that we'll create a new one.
3874  Entry->setName(StringRef());
3875 
3876  // Make a new global with the correct type, this is now guaranteed to work.
3877  GV = cast<llvm::GlobalVariable>(
3878  GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative)));
3879 
3880  // Replace all uses of the old global with the new global
3881  llvm::Constant *NewPtrForOldDecl =
3882  llvm::ConstantExpr::getBitCast(GV, Entry->getType());
3883  Entry->replaceAllUsesWith(NewPtrForOldDecl);
3884 
3885  // Erase the old global, since it is no longer used.
3886  cast<llvm::GlobalValue>(Entry)->eraseFromParent();
3887  }
3888 
3890 
3891  if (D->hasAttr<AnnotateAttr>())
3892  AddGlobalAnnotations(D, GV);
3893 
3894  // Set the llvm linkage type as appropriate.
3895  llvm::GlobalValue::LinkageTypes Linkage =
3896  getLLVMLinkageVarDefinition(D, GV->isConstant());
3897 
3898  // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
3899  // the device. [...]"
3900  // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
3901  // __device__, declares a variable that: [...]
3902  // Is accessible from all the threads within the grid and from the host
3903  // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
3904  // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
3905  if (GV && LangOpts.CUDA) {
3906  if (LangOpts.CUDAIsDevice) {
3907  if (Linkage != llvm::GlobalValue::InternalLinkage &&
3908  (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()))
3909  GV->setExternallyInitialized(true);
3910  } else {
3911  // Host-side shadows of external declarations of device-side
3912  // global variables become internal definitions. These have to
3913  // be internal in order to prevent name conflicts with global
3914  // host variables with the same name in a different TUs.
3915  if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
3916  D->hasAttr<HIPPinnedShadowAttr>()) {
3918 
3919  // Shadow variables and their properties must be registered
3920  // with CUDA runtime.
3921  unsigned Flags = 0;
3922  if (!D->hasDefinition())
3924  if (D->hasAttr<CUDAConstantAttr>())
3926  // Extern global variables will be registered in the TU where they are
3927  // defined.
3928  if (!D->hasExternalStorage())
3929  getCUDARuntime().registerDeviceVar(D, *GV, Flags);
3930  } else if (D->hasAttr<CUDASharedAttr>())
3931  // __shared__ variables are odd. Shadows do get created, but
3932  // they are not registered with the CUDA runtime, so they
3933  // can't really be used to access their device-side
3934  // counterparts. It's not clear yet whether it's nvcc's bug or
3935  // a feature, but we've got to do the same for compatibility.
3937  }
3938  }
3939 
3940  if (!IsHIPPinnedShadowVar)
3941  GV->setInitializer(Init);
3942  if (emitter) emitter->finalize(GV);
3943 
3944  // If it is safe to mark the global 'constant', do so now.
3945  GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
3946  isTypeConstant(D->getType(), true));
3947 
3948  // If it is in a read-only section, mark it 'constant'.
3949  if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
3950  const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
3951  if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
3952  GV->setConstant(true);
3953  }
3954 
3955  GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
3956 
3957 
3958  // On Darwin, if the normal linkage of a C++ thread_local variable is
3959  // LinkOnce or Weak, we keep the normal linkage to prevent multiple
3960  // copies within a linkage unit; otherwise, the backing variable has
3961  // internal linkage and all accesses should just be calls to the
3962  // Itanium-specified entry point, which has the normal linkage of the
3963  // variable. This is to preserve the ability to change the implementation
3964  // behind the scenes.
3965  if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic &&
3966  Context.getTargetInfo().getTriple().isOSDarwin() &&
3967  !llvm::GlobalVariable::isLinkOnceLinkage(Linkage) &&
3968  !llvm::GlobalVariable::isWeakLinkage(Linkage))
3970 
3971  GV->setLinkage(Linkage);
3972  if (D->hasAttr<DLLImportAttr>())
3973  GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3974  else if (D->hasAttr<DLLExportAttr>())
3975  GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
3976  else
3977  GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
3978 
3979  if (Linkage == llvm::GlobalVariable::CommonLinkage) {
3980  // common vars aren't constant even if declared const.
3981  GV->setConstant(false);
3982  // Tentative definition of global variables may be initialized with
3983  // non-zero null pointers. In this case they should have weak linkage
3984  // since common linkage must have zero initializer and must not have
3985  // explicit section therefore cannot have non-zero initial value.
3986  if (!GV->getInitializer()->isNullValue())
3987  GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
3988  }
3989 
3990  setNonAliasAttributes(D, GV);
3991 
3992  if (D->getTLSKind() && !GV->isThreadLocal()) {
3993  if (D->getTLSKind() == VarDecl::TLS_Dynamic)
3994  CXXThreadLocals.push_back(D);
3995  setTLSMode(GV, *D);
3996  }
3997 
3998  maybeSetTrivialComdat(*D, *GV);
3999 
4000  // Emit the initializer function if necessary.
4001  if (NeedsGlobalCtor || NeedsGlobalDtor)
4002  EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
4003 
4004  SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
4005 
4006  // Emit global variable debug information.
4007  if (CGDebugInfo *DI = getModuleDebugInfo())
4008  if (getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
4009  DI->EmitGlobalVariable(GV, D);
4010 }
4011 
4012 static bool isVarDeclStrongDefinition(const ASTContext &Context,
4013  CodeGenModule &CGM, const VarDecl *D,
4014  bool NoCommon) {
4015  // Don't give variables common linkage if -fno-common was specified unless it
4016  // was overridden by a NoCommon attribute.
4017  if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
4018  return true;
4019 
4020  // C11 6.9.2/2:
4021  // A declaration of an identifier for an object that has file scope without
4022  // an initializer, and without a storage-class specifier or with the
4023  // storage-class specifier static, constitutes a tentative definition.
4024  if (D->getInit() || D->hasExternalStorage())
4025  return true;
4026 
4027  // A variable cannot be both common and exist in a section.
4028  if (D->hasAttr<SectionAttr>())
4029  return true;
4030 
4031  // A variable cannot be both common and exist in a section.
4032  // We don't try to determine which is the right section in the front-end.
4033  // If no specialized section name is applicable, it will resort to default.
4034  if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
4035  D->hasAttr<PragmaClangDataSectionAttr>() ||
4036  D->hasAttr<PragmaClangRodataSectionAttr>())
4037  return true;
4038 
4039  // Thread local vars aren't considered common linkage.
4040  if (D->getTLSKind())
4041  return true;
4042 
4043  // Tentative definitions marked with WeakImportAttr are true definitions.
4044  if (D->hasAttr<WeakImportAttr>())
4045  return true;
4046 
4047  // A variable cannot be both common and exist in a comdat.
4048  if (shouldBeInCOMDAT(CGM, *D))
4049  return true;
4050 
4051  // Declarations with a required alignment do not have common linkage in MSVC
4052  // mode.
4053  if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
4054  if (D->hasAttr<AlignedAttr>())
4055  return true;
4056  QualType VarType = D->getType();
4057  if (Context.isAlignmentRequired(VarType))
4058  return true;
4059 
4060  if (const auto *RT = VarType->getAs<RecordType>()) {
4061  const RecordDecl *RD = RT->getDecl();
4062  for (const FieldDecl *FD : RD->fields()) {
4063  if (FD->isBitField())
4064  continue;
4065  if (FD->hasAttr<AlignedAttr>())
4066  return true;
4067  if (Context.isAlignmentRequired(FD->getType()))
4068  return true;
4069  }
4070  }
4071  }
4072 
4073  // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
4074  // common symbols, so symbols with greater alignment requirements cannot be
4075  // common.
4076  // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
4077  // alignments for common symbols via the aligncomm directive, so this
4078  // restriction only applies to MSVC environments.
4079  if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
4080  Context.getTypeAlignIfKnown(D->getType()) >
4081  Context.toBits(CharUnits::fromQuantity(32)))
4082  return true;
4083 
4084  return false;
4085 }
4086 
4087 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
4088  const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
4089  if (Linkage == GVA_Internal)
4091 
4092  if (D->hasAttr<WeakAttr>()) {
4093  if (IsConstantVariable)
4094  return llvm::GlobalVariable::WeakODRLinkage;
4095  else
4096  return llvm::GlobalVariable::WeakAnyLinkage;
4097  }
4098 
4099  if (const auto *FD = D->getAsFunction())
4100  if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
4101  return llvm::GlobalVariable::LinkOnceAnyLinkage;
4102 
4103  // We are guaranteed to have a strong definition somewhere else,
4104  // so we can use available_externally linkage.
4105  if (Linkage == GVA_AvailableExternally)
4106  return llvm::GlobalValue::AvailableExternallyLinkage;
4107 
4108  // Note that Apple's kernel linker doesn't support symbol
4109  // coalescing, so we need to avoid linkonce and weak linkages there.
4110  // Normally, this means we just map to internal, but for explicit
4111  // instantiations we'll map to external.
4112 
4113  // In C++, the compiler has to emit a definition in every translation unit
4114  // that references the function. We should use linkonce_odr because
4115  // a) if all references in this translation unit are optimized away, we
4116  // don't need to codegen it. b) if the function persists, it needs to be
4117  // merged with other definitions. c) C++ has the ODR, so we know the
4118  // definition is dependable.
4119  if (Linkage == GVA_DiscardableODR)
4120  return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
4122 
4123  // An explicit instantiation of a template has weak linkage, since
4124  // explicit instantiations can occur in multiple translation units
4125  // and must all be equivalent. However, we are not allowed to
4126  // throw away these explicit instantiations.
4127  //
4128  // We don't currently support CUDA device code spread out across multiple TUs,
4129  // so say that CUDA templates are either external (for kernels) or internal.
4130  // This lets llvm perform aggressive inter-procedural optimizations.
4131  if (Linkage == GVA_StrongODR) {
4132  if (Context.getLangOpts().AppleKext)
4134  if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice)
4135  return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
4137  return llvm::Function::WeakODRLinkage;
4138  }
4139 
4140  // C++ doesn't have tentative definitions and thus cannot have common
4141  // linkage.
4142  if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
4143  !isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D),
4144  CodeGenOpts.NoCommon))
4145  return llvm::GlobalVariable::CommonLinkage;
4146 
4147  // selectany symbols are externally visible, so use weak instead of
4148  // linkonce. MSVC optimizes away references to const selectany globals, so
4149  // all definitions should be the same and ODR linkage should be used.
4150  // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
4151  if (D->hasAttr<SelectAnyAttr>())
4152  return llvm::GlobalVariable::WeakODRLinkage;
4153 
4154  // Otherwise, we have strong external linkage.
4155  assert(Linkage == GVA_StrongExternal);
4157 }
4158 
4159 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
4160  const VarDecl *VD, bool IsConstant) {
4162  return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
4163 }
4164 
4165 /// Replace the uses of a function that was declared with a non-proto type.
4166 /// We want to silently drop extra arguments from call sites
4167 static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
4168  llvm::Function *newFn) {
4169  // Fast path.
4170  if (old->use_empty()) return;
4171 
4172  llvm::Type *newRetTy = newFn->getReturnType();
4175 
4176  for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
4177  ui != ue; ) {
4178  llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
4179  llvm::User *user = use->getUser();
4180 
4181  // Recognize and replace uses of bitcasts. Most calls to
4182  // unprototyped functions will use bitcasts.
4183  if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(user)) {
4184  if (bitcast->getOpcode() == llvm::Instruction::BitCast)
4185  replaceUsesOfNonProtoConstant(bitcast, newFn);
4186  continue;
4187  }
4188 
4189  // Recognize calls to the function.
4190  llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
4191  if (!callSite) continue;
4192  if (!callSite->isCallee(&*use))
4193  continue;
4194 
4195  // If the return types don't match exactly, then we can't
4196  // transform this call unless it's dead.
4197  if (callSite->getType() != newRetTy && !callSite->use_empty())
4198  continue;
4199 
4200  // Get the call site's attribute list.
4202  llvm::AttributeList oldAttrs = callSite->getAttributes();
4203 
4204  // If the function was passed too few arguments, don't transform.
4205  unsigned newNumArgs = newFn->arg_size();
4206  if (callSite->arg_size() < newNumArgs)
4207  continue;
4208 
4209  // If extra arguments were passed, we silently drop them.
4210  // If any of the types mismatch, we don't transform.
4211  unsigned argNo = 0;
4212  bool dontTransform = false;
4213  for (llvm::Argument &A : newFn->args()) {
4214  if (callSite->getArgOperand(argNo)->getType() != A.getType()) {
4215  dontTransform = true;
4216  break;
4217  }
4218 
4219  // Add any parameter attributes.
4220  newArgAttrs.push_back(oldAttrs.getParamAttributes(argNo));
4221  argNo++;
4222  }
4223  if (dontTransform)
4224  continue;
4225 
4226  // Okay, we can transform this. Create the new call instruction and copy
4227  // over the required information.
4228  newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
4229 
4230  // Copy over any operand bundles.
4231  callSite->getOperandBundlesAsDefs(newBundles);
4232 
4233  llvm::CallBase *newCall;
4234  if (dyn_cast<llvm::CallInst>(callSite)) {
4235  newCall =
4236  llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
4237  } else {
4238  auto *oldInvoke = cast<llvm::InvokeInst>(callSite);
4239  newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(),
4240  oldInvoke->getUnwindDest(), newArgs,
4241  newBundles, "", callSite);
4242  }
4243  newArgs.clear(); // for the next iteration
4244 
4245  if (!newCall->getType()->isVoidTy())
4246  newCall->takeName(callSite);
4247  newCall->setAttributes(llvm::AttributeList::get(
4248  newFn->getContext(), oldAttrs.getFnAttributes(),
4249  oldAttrs.getRetAttributes(), newArgAttrs));
4250  newCall->setCallingConv(callSite->getCallingConv());
4251 
4252  // Finally, remove the old call, replacing any uses with the new one.
4253  if (!callSite->use_empty())
4254  callSite->replaceAllUsesWith(newCall);
4255 
4256  // Copy debug location attached to CI.
4257  if (callSite->getDebugLoc())
4258  newCall->setDebugLoc(callSite->getDebugLoc());
4259 
4260  callSite->eraseFromParent();
4261  }
4262 }
4263 
4264 /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
4265 /// implement a function with no prototype, e.g. "int foo() {}". If there are
4266 /// existing call uses of the old function in the module, this adjusts them to
4267 /// call the new function directly.
4268 ///
4269 /// This is not just a cleanup: the always_inline pass requires direct calls to
4270 /// functions to be able to inline them. If there is a bitcast in the way, it
4271 /// won't inline them. Instcombine normally deletes these calls, but it isn't
4272 /// run at -O0.
4273 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
4274  llvm::Function *NewFn) {
4275  // If we're redefining a global as a function, don't transform it.
4276  if (!isa<llvm::Function>(Old)) return;
4277 
4278  replaceUsesOfNonProtoConstant(Old, NewFn);
4279 }
4280 
4282  auto DK = VD->isThisDeclarationADefinition();
4283  if (DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>())
4284  return;
4285 
4287  // If we have a definition, this might be a deferred decl. If the
4288  // instantiation is explicit, make sure we emit it at the end.
4290  GetAddrOfGlobalVar(VD);
4291 
4292  EmitTopLevelDecl(VD);
4293 }
4294 
4295 void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
4296  llvm::GlobalValue *GV) {
4297  const auto *D = cast<FunctionDecl>(GD.getDecl());
4298 
4299  // Compute the function info and LLVM type.
4301  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
4302 
4303  // Get or create the prototype for the function.
4304  if (!GV || (GV->getType()->getElementType() != Ty))
4305  GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
4306  /*DontDefer=*/true,
4307  ForDefinition));
4308 
4309  // Already emitted.
4310  if (!GV->isDeclaration())
4311  return;
4312 
4313  // We need to set linkage and visibility on the function before
4314  // generating code for it because various parts of IR generation
4315  // want to propagate this information down (e.g. to local static
4316  // declarations).
4317  auto *Fn = cast<llvm::Function>(GV);
4318  setFunctionLinkage(GD, Fn);
4319 
4320  // FIXME: this is redundant with part of setFunctionDefinitionAttributes
4321  setGVProperties(Fn, GD);
4322 
4324 
4325 
4326  maybeSetTrivialComdat(*D, *Fn);
4327 
4328  CodeGenFunction(*this).GenerateCode(D, Fn, FI);
4329 
4330  setNonAliasAttributes(GD, Fn);
4332 
4333  if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
4334  AddGlobalCtor(Fn, CA->getPriority());
4335  if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
4336  AddGlobalDtor(Fn, DA->getPriority());
4337  if (D->hasAttr<AnnotateAttr>())
4338  AddGlobalAnnotations(D, Fn);
4339 }
4340 
4341 void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
4342  const auto *D = cast<ValueDecl>(GD.getDecl());
4343  const AliasAttr *AA = D->getAttr<AliasAttr>();
4344  assert(AA && "Not an alias?");
4345 
4346  StringRef MangledName = getMangledName(GD);
4347 
4348  if (AA->getAliasee() == MangledName) {
4349  Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
4350  return;
4351  }
4352 
4353  // If there is a definition in the module, then it wins over the alias.
4354  // This is dubious, but allow it to be safe. Just ignore the alias.
4355  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4356  if (Entry && !Entry->isDeclaration())
4357  return;
4358 
4359  Aliases.push_back(GD);
4360 
4361  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
4362 
4363  // Create a reference to the named value. This ensures that it is emitted
4364  // if a deferred decl.
4365  llvm::Constant *Aliasee;
4366  if (isa<llvm::FunctionType>(DeclTy))
4367  Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
4368  /*ForVTable=*/false);
4369  else
4370  Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
4371  llvm::PointerType::getUnqual(DeclTy),
4372  /*D=*/nullptr);
4373 
4374  // Create the new alias itself, but don't set a name yet.
4375  auto *GA = llvm::GlobalAlias::create(
4376  DeclTy, 0, llvm::Function::ExternalLinkage, "", Aliasee, &getModule());
4377 
4378  if (Entry) {
4379  if (GA->getAliasee() == Entry) {
4380  Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
4381  return;
4382  }
4383 
4384  assert(Entry->isDeclaration());
4385 
4386  // If there is a declaration in the module, then we had an extern followed
4387  // by the alias, as in:
4388  // extern int test6();
4389  // ...
4390  // int test6() __attribute__((alias("test7")));
4391  //
4392  // Remove it and replace uses of it with the alias.
4393  GA->takeName(Entry);
4394 
4395  Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
4396  Entry->getType()));
4397  Entry->eraseFromParent();
4398  } else {
4399  GA->setName(MangledName);
4400  }
4401 
4402  // Set attributes which are particular to an alias; this is a
4403  // specialization of the attributes which may be set on a global
4404  // variable/function.
4405  if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
4406  D->isWeakImported()) {
4407  GA->setLinkage(llvm::Function::WeakAnyLinkage);
4408  }
4409 
4410  if (const auto *VD = dyn_cast<VarDecl>(D))
4411  if (VD->getTLSKind())
4412  setTLSMode(GA, *VD);
4413 
4414  SetCommonAttributes(GD, GA);
4415 }
4416 
4417 void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
4418  const auto *D = cast<ValueDecl>(GD.getDecl());
4419  const IFuncAttr *IFA = D->getAttr<IFuncAttr>();
4420  assert(IFA && "Not an ifunc?");
4421 
4422  StringRef MangledName = getMangledName(GD);
4423 
4424  if (IFA->getResolver() == MangledName) {
4425  Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
4426  return;
4427  }
4428 
4429  // Report an error if some definition overrides ifunc.
4430  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4431  if (Entry && !Entry->isDeclaration()) {
4432  GlobalDecl OtherGD;
4433  if (lookupRepresentativeDecl(MangledName, OtherGD) &&
4434  DiagnosedConflictingDefinitions.insert(GD).second) {
4435  Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name)
4436  << MangledName;
4437  Diags.Report(OtherGD.getDecl()->getLocation(),
4438  diag::note_previous_definition);
4439  }
4440  return;
4441  }
4442 
4443  Aliases.push_back(GD);
4444 
4445  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
4446  llvm::Constant *Resolver =
4447  GetOrCreateLLVMFunction(IFA->getResolver(), DeclTy, GD,
4448  /*ForVTable=*/false);
4449  llvm::GlobalIFunc *GIF =
4451  "", Resolver, &getModule());
4452  if (Entry) {
4453  if (GIF->getResolver() == Entry) {
4454  Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
4455  return;
4456  }
4457  assert(Entry->isDeclaration());
4458 
4459  // If there is a declaration in the module, then we had an extern followed
4460  // by the ifunc, as in:
4461  // extern int test();
4462  // ...
4463  // int test() __attribute__((ifunc("resolver")));
4464  //
4465  // Remove it and replace uses of it with the ifunc.
4466  GIF->takeName(Entry);
4467 
4468  Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GIF,
4469  Entry->getType()));
4470  Entry->eraseFromParent();
4471  } else
4472  GIF->setName(MangledName);
4473 
4474  SetCommonAttributes(GD, GIF);
4475 }
4476 
4477 llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
4478  ArrayRef<llvm::Type*> Tys) {
4479  return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
4480  Tys);
4481 }
4482 
4483 static llvm::StringMapEntry<llvm::GlobalVariable *> &
4484 GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
4485  const StringLiteral *Literal, bool TargetIsLSB,
4486  bool &IsUTF16, unsigned &StringLength) {
4487  StringRef String = Literal->getString();
4488  unsigned NumBytes = String.size();
4489 
4490  // Check for simple case.
4491  if (!Literal->containsNonAsciiOrNull()) {
4492  StringLength = NumBytes;
4493  return *Map.insert(std::make_pair(String, nullptr)).first;
4494  }
4495 
4496  // Otherwise, convert the UTF8 literals into a string of shorts.
4497  IsUTF16 = true;
4498 
4499  SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
4500  const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
4501  llvm::UTF16 *ToPtr = &ToBuf[0];
4502 
4503  (void)llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
4504  ToPtr + NumBytes, llvm::strictConversion);
4505 
4506  // ConvertUTF8toUTF16 returns the length in ToPtr.
4507  StringLength = ToPtr - &ToBuf[0];
4508 
4509  // Add an explicit null.
4510  *ToPtr = 0;
4511  return *Map.insert(std::make_pair(
4512  StringRef(reinterpret_cast<const char *>(ToBuf.data()),
4513  (StringLength + 1) * 2),
4514  nullptr)).first;
4515 }
4516 
4519  unsigned StringLength = 0;
4520  bool isUTF16 = false;
4521  llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
4522  GetConstantCFStringEntry(CFConstantStringMap, Literal,
4523  getDataLayout().isLittleEndian(), isUTF16,
4524  StringLength);
4525 
4526  if (auto *C = Entry.second)
4527  return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
4528 
4529  llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
4530  llvm::Constant *Zeros[] = { Zero, Zero };
4531 
4532  const ASTContext &Context = getContext();
4533  const llvm::Triple &Triple = getTriple();
4534 
4535  const auto CFRuntime = getLangOpts().CFRuntime;
4536  const bool IsSwiftABI =
4537  static_cast<unsigned>(CFRuntime) >=
4538  static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
4539  const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
4540 
4541  // If we don't already have it, get __CFConstantStringClassReference.
4542  if (!CFConstantStringClassRef) {
4543  const char *CFConstantStringClassName = "__CFConstantStringClassReference";
4545  Ty = llvm::ArrayType::get(Ty, 0);
4546 
4547  switch (CFRuntime) {
4548  default: break;
4549  case LangOptions::CoreFoundationABI::Swift: LLVM_FALLTHROUGH;
4551  CFConstantStringClassName =
4552  Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
4553  : "$s10Foundation19_NSCFConstantStringCN";
4554  Ty = IntPtrTy;
4555  break;
4557  CFConstantStringClassName =
4558  Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
4559  : "$S10Foundation19_NSCFConstantStringCN";
4560  Ty = IntPtrTy;
4561  break;
4563  CFConstantStringClassName =
4564  Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
4565  : "__T010Foundation19_NSCFConstantStringCN";
4566  Ty = IntPtrTy;
4567  break;
4568  }
4569 
4570  llvm::Constant *C = CreateRuntimeVariable(Ty, CFConstantStringClassName);
4571 
4572  if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
4573  llvm::GlobalValue *GV = nullptr;
4574 
4575  if ((GV = dyn_cast<llvm::GlobalValue>(C))) {
4576  IdentifierInfo &II = Context.Idents.get(GV->getName());
4577  TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
4579 
4580  const VarDecl *VD = nullptr;
4581  for (const auto &Result : DC->lookup(&II))
4582  if ((VD = dyn_cast<VarDecl>(Result)))
4583  break;
4584 
4585  if (Triple.isOSBinFormatELF()) {
4586  if (!VD)
4587  GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
4588  } else {
4589  GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
4590  if (!VD || !VD->hasAttr<DLLExportAttr>())
4591  GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
4592  else
4593  GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
4594  }
4595 
4596  setDSOLocal(GV);
4597  }
4598  }
4599 
4600  // Decay array -> ptr
4601  CFConstantStringClassRef =
4602  IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty)
4603  : llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
4604  }
4605 
4606  QualType CFTy = Context.getCFConstantStringType();
4607 
4608  auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
4609 
4610  ConstantInitBuilder Builder(*this);
4611  auto Fields = Builder.beginStruct(STy);
4612 
4613  // Class pointer.
4614  Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
4615 
4616  // Flags.
4617  if (IsSwiftABI) {
4618  Fields.addInt(IntPtrTy, IsSwift4_1 ? 0x05 : 0x01);
4619  Fields.addInt(Int64Ty, isUTF16 ? 0x07d0 : 0x07c8);
4620  } else {
4621  Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
4622  }
4623 
4624  // String pointer.
4625  llvm::Constant *C = nullptr;
4626  if (isUTF16) {
4627  auto Arr = llvm::makeArrayRef(
4628  reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
4629  Entry.first().size() / 2);
4630  C = llvm::ConstantDataArray::get(VMContext, Arr);
4631  } else {
4632  C = llvm::ConstantDataArray::getString(VMContext, Entry.first());
4633  }
4634 
4635  // Note: -fwritable-strings doesn't make the backing store strings of
4636  // CFStrings writable. (See <rdar://problem/10657500>)
4637  auto *GV =
4638  new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
4639  llvm::GlobalValue::PrivateLinkage, C, ".str");
4640  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4641  // Don't enforce the target's minimum global alignment, since the only use
4642  // of the string is via this class initializer.
4643  CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(Context.ShortTy)
4644  : Context.getTypeAlignInChars(Context.CharTy);
4645  GV->setAlignment(Align.getQuantity());
4646 
4647  // FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
4648  // Without it LLVM can merge the string with a non unnamed_addr one during
4649  // LTO. Doing that changes the section it ends in, which surprises ld64.
4650  if (Triple.isOSBinFormatMachO())
4651  GV->setSection(isUTF16 ? "__TEXT,__ustring"
4652  : "__TEXT,__cstring,cstring_literals");
4653  // Make sure the literal ends up in .rodata to allow for safe ICF and for
4654  // the static linker to adjust permissions to read-only later on.
4655  else if (Triple.isOSBinFormatELF())
4656  GV->setSection(".rodata");
4657 
4658  // String.
4659  llvm::Constant *Str =
4660  llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
4661 
4662  if (isUTF16)
4663  // Cast the UTF16 string to the correct type.
4664  Str = llvm::ConstantExpr::getBitCast(Str, Int8PtrTy);
4665  Fields.add(Str);
4666 
4667  // String length.
4668  llvm::IntegerType *LengthTy =
4669  llvm::IntegerType::get(getModule().getContext(),
4670  Context.getTargetInfo().getLongWidth());
4671  if (IsSwiftABI) {
4672  if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
4674  LengthTy = Int32Ty;
4675  else
4676  LengthTy = IntPtrTy;
4677  }
4678  Fields.addInt(LengthTy, StringLength);
4679 
4680  // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is
4681  // properly aligned on 32-bit platforms.
4682  CharUnits Alignment =
4683  IsSwiftABI ? Context.toCharUnitsFromBits(64) : getPointerAlign();
4684 
4685  // The struct.
4686  GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
4687  /*isConstant=*/false,
4688  llvm::GlobalVariable::PrivateLinkage);
4689  GV->addAttribute("objc_arc_inert");
4690  switch (Triple.getObjectFormat()) {
4691  case llvm::Triple::UnknownObjectFormat:
4692  llvm_unreachable("unknown file format");
4693  case llvm::Triple::XCOFF:
4694  llvm_unreachable("XCOFF is not yet implemented");
4695  case llvm::Triple::COFF:
4696  case llvm::Triple::ELF:
4697  case llvm::Triple::Wasm:
4698  GV->setSection("cfstring");
4699  break;
4700  case llvm::Triple::MachO:
4701  GV->setSection("__DATA,__cfstring");
4702  break;
4703  }
4704  Entry.second = GV;
4705 
4706  return ConstantAddress(GV, Alignment);
4707 }
4708 
4710  return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo;
4711 }
4712 
4714  if (ObjCFastEnumerationStateType.isNull()) {
4715  RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
4716  D->startDefinition();
4717 
4718  QualType FieldTypes[] = {
4719  Context.UnsignedLongTy,
4720  Context.getPointerType(Context.getObjCIdType()),
4721  Context.getPointerType(Context.UnsignedLongTy),
4722  Context.getConstantArrayType(Context.UnsignedLongTy,
4723  llvm::APInt(32, 5), ArrayType::Normal, 0)
4724  };
4725 
4726  for (size_t i = 0; i < 4; ++i) {
4727  FieldDecl *Field = FieldDecl::Create(Context,
4728  D,
4729  SourceLocation(),
4730  SourceLocation(), nullptr,
4731  FieldTypes[i], /*TInfo=*/nullptr,
4732  /*BitWidth=*/nullptr,
4733  /*Mutable=*/false,
4734  ICIS_NoInit);
4735  Field->setAccess(AS_public);
4736  D->addDecl(Field);
4737  }
4738 
4739  D->completeDefinition();
4740  ObjCFastEnumerationStateType = Context.getTagDeclType(D);
4741  }
4742 
4743  return ObjCFastEnumerationStateType;
4744 }
4745 
4746 llvm::Constant *
4748  assert(!E->getType()->isPointerType() && "Strings are always arrays");
4749 
4750  // Don't emit it as the address of the string, emit the string data itself
4751  // as an inline array.
4752  if (E->getCharByteWidth() == 1) {
4753  SmallString<64> Str(E->getString());
4754 
4755  // Resize the string to the right size, which is indicated by its type.
4756  const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
4757  Str.resize(CAT->getSize().getZExtValue());
4758  return llvm::ConstantDataArray::getString(VMContext, Str, false);
4759  }
4760 
4761  auto *AType = cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
4762  llvm::Type *ElemTy = AType->getElementType();
4763  unsigned NumElements = AType->getNumElements();
4764 
4765  // Wide strings have either 2-byte or 4-byte elements.
4766  if (ElemTy->getPrimitiveSizeInBits() == 16) {
4767  SmallVector<uint16_t, 32> Elements;
4768  Elements.reserve(NumElements);
4769 
4770  for(unsigned i = 0, e = E->getLength(); i != e; ++i)
4771  Elements.push_back(E->getCodeUnit(i));
4772  Elements.resize(NumElements);
4773  return llvm::ConstantDataArray::get(VMContext, Elements);
4774  }
4775 
4776  assert(ElemTy->getPrimitiveSizeInBits() == 32);
4777  SmallVector<uint32_t, 32> Elements;
4778  Elements.reserve(NumElements);
4779 
4780  for(unsigned i = 0, e = E->getLength(); i != e; ++i)
4781  Elements.push_back(E->getCodeUnit(i));
4782  Elements.resize(NumElements);
4783  return llvm::ConstantDataArray::get(VMContext, Elements);
4784 }
4785 
4786 static llvm::GlobalVariable *
4787 GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
4788  CodeGenModule &CGM, StringRef GlobalName,
4789  CharUnits Alignment) {
4790  unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
4792 
4793  llvm::Module &M = CGM.getModule();
4794  // Create a global variable for this string
4795  auto *GV = new llvm::GlobalVariable(
4796  M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
4797  nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
4798  GV->setAlignment(Alignment.getQuantity());
4799  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4800  if (GV->isWeakForLinker()) {
4801  assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
4802  GV->setComdat(M.getOrInsertComdat(GV->getName()));
4803  }
4804  CGM.setDSOLocal(GV);
4805 
4806  return GV;
4807 }
4808 
4809 /// GetAddrOfConstantStringFromLiteral - Return a pointer to a
4810 /// constant array for the given string literal.
4813  StringRef Name) {
4815 
4816  llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
4817  llvm::GlobalVariable **Entry = nullptr;
4818  if (!LangOpts.WritableStrings) {
4819  Entry = &ConstantStringMap[C];
4820  if (auto GV = *Entry) {
4821  if (Alignment.getQuantity() > GV->getAlignment())
4822  GV->setAlignment(Alignment.getQuantity());
4824  Alignment);
4825  }
4826  }
4827 
4828  SmallString<256> MangledNameBuffer;
4829  StringRef GlobalVariableName;
4830  llvm::GlobalValue::LinkageTypes LT;
4831 
4832  // Mangle the string literal if that's how the ABI merges duplicate strings.
4833  // Don't do it if they are writable, since we don't want writes in one TU to
4834  // affect strings in another.
4835  if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) &&
4836  !LangOpts.WritableStrings) {
4837  llvm::raw_svector_ostream Out(MangledNameBuffer);
4839  LT = llvm::GlobalValue::LinkOnceODRLinkage;
4840  GlobalVariableName = MangledNameBuffer;
4841  } else {
4842  LT = llvm::GlobalValue::PrivateLinkage;
4843  GlobalVariableName = Name;
4844  }
4845 
4846  auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
4847  if (Entry)
4848  *Entry = GV;
4849 
4850  SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
4851  QualType());
4852 
4854  Alignment);
4855 }
4856 
4857 /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
4858 /// array for the given ObjCEncodeExpr node.
4861  std::string Str;
4863 
4864  return GetAddrOfConstantCString(Str);
4865 }
4866 
4867 /// GetAddrOfConstantCString - Returns a pointer to a character array containing
4868 /// the literal and a terminating '\0' character.
4869 /// The result has pointer to array type.
4871  const std::string &Str, const char *GlobalName) {
4872  StringRef StrWithNull(Str.c_str(), Str.size() + 1);
4873  CharUnits Alignment =
4875 
4876  llvm::Constant *C =
4877  llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
4878 
4879  // Don't share any string literals if strings aren't constant.
4880  llvm::GlobalVariable **Entry = nullptr;
4881  if (!LangOpts.WritableStrings) {
4882  Entry = &ConstantStringMap[C];
4883  if (auto GV = *Entry) {
4884  if (Alignment.getQuantity() > GV->getAlignment())
4885  GV->setAlignment(Alignment.getQuantity());
4887  Alignment);
4888  }
4889  }
4890 
4891  // Get the default prefix if a name wasn't specified.
4892  if (!GlobalName)
4893  GlobalName = ".str";
4894  // Create a global variable for this.
4895  auto GV = GenerateStringLiteral(C, llvm::GlobalValue::PrivateLinkage, *this,
4896  GlobalName, Alignment);
4897  if (Entry)
4898  *Entry = GV;
4899 
4901  Alignment);
4902 }
4903 
4905  const MaterializeTemporaryExpr *E, const Expr *Init) {
4906  assert((E->getStorageDuration() == SD_Static ||
4907  E->getStorageDuration() == SD_Thread) && "not a global temporary");
4908  const auto *VD = cast<VarDecl>(E->getExtendingDecl());
4909 
4910  // If we're not materializing a subobject of the temporary, keep the
4911  // cv-qualifiers from the type of the MaterializeTemporaryExpr.
4912  QualType MaterializedType = Init->getType();
4913  if (Init == E->GetTemporaryExpr())
4914  MaterializedType = E->getType();
4915 
4916  CharUnits Align = getContext().getTypeAlignInChars(MaterializedType);
4917 
4918  if (llvm::Constant *Slot = MaterializedGlobalTemporaryMap[E])
4919  return ConstantAddress(Slot, Align);
4920 
4921  // FIXME: If an externally-visible declaration extends multiple temporaries,
4922  // we need to give each temporary the same name in every translation unit (and
4923  // we also need to make the temporaries externally-visible).
4924  SmallString<256> Name;
4925  llvm::raw_svector_ostream Out(Name);
4927  VD, E->getManglingNumber(), Out);
4928 
4929  APValue *Value = nullptr;
4930  if (E->getStorageDuration() == SD_Static) {
4931  // We might have a cached constant initializer for this temporary. Note
4932  // that this might have a different value from the value computed by
4933  // evaluating the initializer if the surrounding constant expression
4934  // modifies the temporary.
4935  Value = getContext().getMaterializedTemporaryValue(E, false);
4936  if (Value && Value->isAbsent())
4937  Value = nullptr;
4938  }
4939 
4940  // Try evaluating it now, it might have a constant initializer.
4941  Expr::EvalResult EvalResult;
4942  if (!Value && Init->EvaluateAsRValue(EvalResult, getContext()) &&
4943  !EvalResult.hasSideEffects())
4944  Value = &EvalResult.Val;
4945 
4946  LangAS AddrSpace =
4947  VD ? GetGlobalVarAddressSpace(VD) : MaterializedType.getAddressSpace();
4948 
4949  Optional<ConstantEmitter> emitter;
4950  llvm::Constant *InitialValue = nullptr;
4951  bool Constant = false;
4952  llvm::Type *Type;
4953  if (Value) {
4954  // The temporary has a constant initializer, use it.
4955  emitter.emplace(*this);
4956  InitialValue = emitter->emitForInitializer(*Value, AddrSpace,
4957  MaterializedType);
4958  Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/Value);
4959  Type = InitialValue->getType();
4960  } else {
4961  // No initializer, the initialization will be provided when we
4962  // initialize the declaration which performed lifetime extension.
4963  Type = getTypes().ConvertTypeForMem(MaterializedType);
4964  }
4965 
4966  // Create a global variable for this lifetime-extended temporary.
4967  llvm::GlobalValue::LinkageTypes Linkage =
4968  getLLVMLinkageVarDefinition(VD, Constant);
4969  if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
4970  const VarDecl *InitVD;
4971  if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&
4972  isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) {
4973  // Temporaries defined inside a class get linkonce_odr linkage because the
4974  // class can be defined in multiple translation units.
4975  Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
4976  } else {
4977  // There is no need for this temporary to have external linkage if the
4978  // VarDecl has external linkage.
4980  }
4981  }
4982  auto TargetAS = getContext().getTargetAddressSpace(AddrSpace);
4983  auto *GV = new llvm::GlobalVariable(
4984  getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
4985  /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
4986  if (emitter) emitter->finalize(GV);
4987  setGVProperties(GV, VD);
4988  GV->setAlignment(Align.getQuantity());
4989  if (supportsCOMDAT() && GV->isWeakForLinker())
4990  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
4991  if (VD->getTLSKind())
4992  setTLSMode(GV, *VD);
4993  llvm::Constant *CV = GV;
4994  if (AddrSpace != LangAS::Default)
4996  *this, GV, AddrSpace, LangAS::Default,
4997  Type->getPointerTo(
4998  getContext().getTargetAddressSpace(LangAS::Default)));
4999  MaterializedGlobalTemporaryMap[E] = CV;
5000  return ConstantAddress(CV, Align);
5001 }
5002 
5003 /// EmitObjCPropertyImplementations - Emit information for synthesized
5004 /// properties for an implementation.
5005 void CodeGenModule::EmitObjCPropertyImplementations(const
5007  for (const auto *PID : D->property_impls()) {
5008  // Dynamic is just for type-checking.
5009  if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
5010  ObjCPropertyDecl *PD = PID->getPropertyDecl();
5011 
5012  // Determine which methods need to be implemented, some may have
5013  // been overridden. Note that ::isPropertyAccessor is not the method
5014  // we want, that just indicates if the decl came from a
5015  // property. What we want to know is if the method is defined in
5016  // this implementation.
5017  if (!D->getInstanceMethod(PD->getGetterName()))
5019  const_cast<ObjCImplementationDecl *>(D), PID);
5020  if (!PD->isReadOnly() &&
5021  !D->getInstanceMethod(PD->getSetterName()))
5023  const_cast<ObjCImplementationDecl *>(D), PID);
5024  }
5025  }
5026 }
5027 
5029  const ObjCInterfaceDecl *iface = impl->getClassInterface();
5030  for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
5031  ivar; ivar = ivar->getNextIvar())
5032  if (ivar->getType().isDestructedType())
5033  return true;
5034 
5035  return false;
5036 }
5037 
5040  CodeGenFunction CGF(CGM);
5042  E = D->init_end(); B != E; ++B) {
5043  CXXCtorInitializer *CtorInitExp = *B;
5044  Expr *Init = CtorInitExp->getInit();
5045  if (!CGF.isTrivialInitializer(Init))
5046  return false;
5047  }
5048  return true;
5049 }
5050 
5051 /// EmitObjCIvarInitializations - Emit information for ivar initialization
5052 /// for an implementation.
5053 void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
5054  // We might need a .cxx_destruct even if we don't have any ivar initializers.
5055  if (needsDestructMethod(D)) {
5056  IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
5057  Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
5058  ObjCMethodDecl *DTORMethod =
5060  cxxSelector, getContext().VoidTy, nullptr, D,
5061  /*isInstance=*/true, /*isVariadic=*/false,
5062  /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true,
5063  /*isDefined=*/false, ObjCMethodDecl::Required);
5064  D->addInstanceMethod(DTORMethod);
5065  CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
5066  D->setHasDestructors(true);
5067  }
5068 
5069  // If the implementation doesn't have any ivar initializers, we don't need
5070  // a .cxx_construct.
5071  if (D->getNumIvarInitializers() == 0 ||
5072  AllTrivialInitializers(*this, D))
5073  return;
5074 
5075  IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
5076  Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
5077  // The constructor returns 'self'.
5079  D->getLocation(),
5080  D->getLocation(),
5081  cxxSelector,
5083  nullptr, D, /*isInstance=*/true,
5084  /*isVariadic=*/false,
5085  /*isPropertyAccessor=*/true,
5086  /*isImplicitlyDeclared=*/true,
5087  /*isDefined=*/false,
5089  D->addInstanceMethod(CTORMethod);
5090  CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
5091  D->setHasNonZeroConstructors(true);
5092 }
5093 
5094 // EmitLinkageSpec - Emit all declarations in a linkage spec.
5095 void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
5096  if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
5098  ErrorUnsupported(LSD, "linkage spec");
5099  return;
5100  }
5101 
5102  EmitDeclContext(LSD);
5103 }
5104 
5105 void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
5106  for (auto *I : DC->decls()) {
5107  // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
5108  // are themselves considered "top-level", so EmitTopLevelDecl on an
5109  // ObjCImplDecl does not recursively visit them. We need to do that in
5110  // case they're nested inside another construct (LinkageSpecDecl /
5111  // ExportDecl) that does stop them from being considered "top-level".
5112  if (auto *OID = dyn_cast<ObjCImplDecl>(I)) {
5113  for (auto *M : OID->methods())
5114  EmitTopLevelDecl(M);
5115  }
5116 
5117  EmitTopLevelDecl(I);
5118  }
5119 }
5120 
5121 /// EmitTopLevelDecl - Emit code for a single top level declaration.
5123  // Ignore dependent declarations.
5124  if (D->isTemplated())
5125  return;
5126 
5127  switch (D->getKind()) {
5128  case Decl::CXXConversion:
5129  case Decl::CXXMethod:
5130  case Decl::Function:
5131  EmitGlobal(cast<FunctionDecl>(D));
5132  // Always provide some coverage mapping
5133  // even for the functions that aren't emitted.
5135  break;
5136 
5137  case Decl::CXXDeductionGuide:
5138  // Function-like, but does not result in code emission.
5139  break;
5140 
5141  case Decl::Var:
5142  case Decl::Decomposition:
5143  case Decl::VarTemplateSpecialization:
5144  EmitGlobal(cast<VarDecl>(D));
5145  if (auto *DD = dyn_cast<DecompositionDecl>(D))
5146  for (auto *B : DD->bindings())
5147  if (auto *HD = B->getHoldingVar())
5148  EmitGlobal(HD);
5149  break;
5150 
5151  // Indirect fields from global anonymous structs and unions can be
5152  // ignored; only the actual variable requires IR gen support.
5153  case Decl::IndirectField:
5154  break;
5155 
5156  // C++ Decls
5157  case Decl::Namespace:
5158  EmitDeclContext(cast<NamespaceDecl>(D));
5159  break;
5160  case Decl::ClassTemplateSpecialization: {
5161  const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
5162  if (DebugInfo &&
5163  Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition &&
5164  Spec->hasDefinition())
5165  DebugInfo->completeTemplateDefinition(*Spec);
5166  } LLVM_FALLTHROUGH;
5167  case Decl::CXXRecord:
5168  if (DebugInfo) {
5169  if (auto *ES = D->getASTContext().getExternalSource())
5170  if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
5171  DebugInfo->completeUnusedClass(cast<CXXRecordDecl>(*D));
5172  }
5173  // Emit any static data members, they may be definitions.
5174  for (auto *I : cast<CXXRecordDecl>(D)->decls())
5175  if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I))
5176  EmitTopLevelDecl(I);
5177  break;
5178  // No code generation needed.
5179  case Decl::UsingShadow:
5180  case Decl::ClassTemplate:
5181  case Decl::VarTemplate:
5182  case Decl::Concept:
5183  case Decl::VarTemplatePartialSpecialization:
5184  case Decl::FunctionTemplate:
5185  case Decl::TypeAliasTemplate:
5186  case Decl::Block:
5187  case Decl::Empty:
5188  case Decl::Binding:
5189  break;
5190  case Decl::Using: // using X; [C++]
5191  if (CGDebugInfo *DI = getModuleDebugInfo())
5192  DI->EmitUsingDecl(cast<UsingDecl>(*D));
5193  return;
5194  case Decl::NamespaceAlias:
5195  if (CGDebugInfo *DI = getModuleDebugInfo())
5196  DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
5197  return;
5198  case Decl::UsingDirective: // using namespace X; [C++]
5199  if (CGDebugInfo *DI = getModuleDebugInfo())
5200  DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
5201  return;
5202  case Decl::CXXConstructor:
5203  getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
5204  break;
5205  case Decl::CXXDestructor:
5206  getCXXABI().EmitCXXDestructors(cast<CXXDestructorDecl>(D));
5207  break;
5208 
5209  case Decl::StaticAssert:
5210  // Nothing to do.
5211  break;
5212 
5213  // Objective-C Decls
5214 
5215  // Forward declarations, no (immediate) code generation.
5216  case Decl::ObjCInterface:
5217  case Decl::ObjCCategory:
5218  break;
5219 
5220  case Decl::ObjCProtocol: {
5221  auto *Proto = cast<ObjCProtocolDecl>(D);
5222  if (Proto->isThisDeclarationADefinition())
5223  ObjCRuntime->GenerateProtocol(Proto);
5224  break;
5225  }
5226 
5227  case Decl::ObjCCategoryImpl:
5228  // Categories have properties but don't support synthesize so we
5229  // can ignore them here.
5230  ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
5231  break;
5232 
5233  case Decl::ObjCImplementation: {
5234  auto *OMD = cast<ObjCImplementationDecl>(D);
5235  EmitObjCPropertyImplementations(OMD);
5236  EmitObjCIvarInitializations(OMD);
5237  ObjCRuntime->GenerateClass(OMD);
5238  // Emit global variable debug information.
5239  if (CGDebugInfo *DI = getModuleDebugInfo())
5240  if (getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
5241  DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(
5242  OMD->getClassInterface()), OMD->getLocation());
5243  break;
5244  }
5245  case Decl::ObjCMethod: {
5246  auto *OMD = cast<ObjCMethodDecl>(D);
5247  // If this is not a prototype, emit the body.
5248  if (OMD->getBody())
5249  CodeGenFunction(*this).GenerateObjCMethod(OMD);
5250  break;
5251  }
5252  case Decl::ObjCCompatibleAlias:
5253  ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
5254  break;
5255 
5256  case Decl::PragmaComment: {
5257  const auto *PCD = cast<PragmaCommentDecl>(D);
5258  switch (PCD->getCommentKind()) {
5259  case PCK_Unknown:
5260  llvm_unreachable("unexpected pragma comment kind");
5261  case PCK_Linker:
5262  AppendLinkerOptions(PCD->getArg());
5263  break;
5264  case PCK_Lib:
5265  AddDependentLib(PCD->getArg());
5266  break;
5267  case PCK_Compiler:
5268  case PCK_ExeStr:
5269  case PCK_User:
5270  break; // We ignore all of these.
5271  }
5272  break;
5273  }
5274 
5275  case Decl::PragmaDetectMismatch: {
5276  const auto *PDMD = cast<PragmaDetectMismatchDecl>(D);
5277  AddDetectMismatch(PDMD->getName(), PDMD->getValue());
5278  break;
5279  }
5280 
5281  case Decl::LinkageSpec:
5282  EmitLinkageSpec(cast<LinkageSpecDecl>(D));
5283  break;
5284 
5285  case Decl::FileScopeAsm: {
5286  // File-scope asm is ignored during device-side CUDA compilation.
5287  if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
5288  break;
5289  // File-scope asm is ignored during device-side OpenMP compilation.
5290  if (LangOpts.OpenMPIsDevice)
5291  break;
5292  auto *AD = cast<FileScopeAsmDecl>(D);
5293  getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
5294  break;
5295  }
5296 
5297  case Decl::Import: {
5298  auto *Import = cast<ImportDecl>(D);
5299 
5300  // If we've already imported this module, we're done.
5301  if (!ImportedModules.insert(Import->getImportedModule()))
5302  break;
5303 
5304  // Emit debug information for direct imports.
5305  if (!Import->getImportedOwningModule()) {
5306  if (CGDebugInfo *DI = getModuleDebugInfo())
5307  DI->EmitImportDecl(*Import);
5308  }
5309 
5310  // Find all of the submodules and emit the module initializers.
5311  llvm::SmallPtrSet<clang::Module *, 16> Visited;
5313  Visited.insert(Import->getImportedModule());
5314  Stack.push_back(Import->getImportedModule());
5315 
5316  while (!Stack.empty()) {
5317  clang::Module *Mod = Stack.pop_back_val();
5318  if (!EmittedModuleInitializers.insert(Mod).second)
5319  continue;
5320 
5321  for (auto *D : Context.getModuleInitializers(Mod))
5322  EmitTopLevelDecl(D);
5323 
5324  // Visit the submodules of this module.
5326  SubEnd = Mod->submodule_end();
5327  Sub != SubEnd; ++Sub) {
5328  // Skip explicit children; they need to be explicitly imported to emit
5329  // the initializers.
5330  if ((*Sub)->IsExplicit)
5331  continue;
5332 
5333  if (Visited.insert(*Sub).second)
5334  Stack.push_back(*Sub);
5335  }
5336  }
5337  break;
5338  }
5339 
5340  case Decl::Export:
5341  EmitDeclContext(cast<ExportDecl>(D));
5342  break;
5343 
5344  case Decl::OMPThreadPrivate:
5345  EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
5346  break;
5347 
5348  case Decl::OMPAllocate:
5349  break;
5350 
5351  case Decl::OMPDeclareReduction:
5352  EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(D));
5353  break;
5354 
5355  case Decl::OMPDeclareMapper:
5356  EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(D));
5357  break;
5358 
5359  case Decl::OMPRequires:
5360  EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
5361  break;
5362 
5363  default:
5364  // Make sure we handled everything we should, every other kind is a
5365  // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
5366  // function. Need to recode Decl::Kind to do that easily.
5367  assert(isa<TypeDecl>(D) && "Unsupported decl kind");
5368  break;
5369  }
5370 }
5371 
5373  // Do we need to generate coverage mapping?
5374  if (!CodeGenOpts.CoverageMapping)
5375  return;
5376  switch (D->getKind()) {
5377  case Decl::CXXConversion:
5378  case Decl::CXXMethod:
5379  case Decl::Function:
5380  case Decl::ObjCMethod:
5381  case Decl::CXXConstructor:
5382  case Decl::CXXDestructor: {
5383  if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
5384  return;
5386  if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
5387  return;
5388  auto I = DeferredEmptyCoverageMappingDecls.find(D);
5389  if (I == DeferredEmptyCoverageMappingDecls.end())
5390  DeferredEmptyCoverageMappingDecls[D] = true;
5391  break;
5392  }
5393  default:
5394  break;
5395  };
5396 }
5397 
5399  // Do we need to generate coverage mapping?
5400  if (!CodeGenOpts.CoverageMapping)
5401  return;
5402  if (const auto *Fn = dyn_cast<FunctionDecl>(D)) {
5403  if (Fn->isTemplateInstantiation())
5404  ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern());
5405  }
5406  auto I = DeferredEmptyCoverageMappingDecls.find(D);
5407  if (I == DeferredEmptyCoverageMappingDecls.end())
5408  DeferredEmptyCoverageMappingDecls[D] = false;
5409  else
5410  I->second = false;
5411 }
5412 
5414  // We call takeVector() here to avoid use-after-free.
5415  // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because
5416  // we deserialize function bodies to emit coverage info for them, and that
5417  // deserializes more declarations. How should we handle that case?
5418  for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) {
5419  if (!Entry.second)
5420  continue;
5421  const Decl *D = Entry.first;
5422  switch (D->getKind()) {
5423  case Decl::CXXConversion:
5424  case Decl::CXXMethod:
5425  case Decl::Function:
5426  case Decl::ObjCMethod: {
5427  CodeGenPGO PGO(*this);
5428  GlobalDecl GD(cast<FunctionDecl>(D));
5430  getFunctionLinkage(GD));
5431  break;
5432  }
5433  case Decl::CXXConstructor: {
5434  CodeGenPGO PGO(*this);
5435  GlobalDecl GD(cast<CXXConstructorDecl>(D), Ctor_Base);
5437  getFunctionLinkage(GD));
5438  break;
5439  }
5440  case Decl::CXXDestructor: {
5441  CodeGenPGO PGO(*this);
5442  GlobalDecl GD(cast<CXXDestructorDecl>(D), Dtor_Base);
5444  getFunctionLinkage(GD));
5445  break;
5446  }
5447  default:
5448  break;
5449  };
5450  }
5451 }
5452 
5453 /// Turns the given pointer into a constant.
5454 static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
5455  const void *Ptr) {
5456  uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
5457  llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
5458  return llvm::ConstantInt::get(i64, PtrInt);
5459 }
5460 
5462  llvm::NamedMDNode *&GlobalMetadata,
5463  GlobalDecl D,
5464  llvm::GlobalValue *Addr) {
5465  if (!GlobalMetadata)
5466  GlobalMetadata =
5467  CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
5468 
5469  // TODO: should we report variant information for ctors/dtors?
5470  llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(Addr),
5471  llvm::ConstantAsMetadata::get(GetPointerConstant(
5472  CGM.getLLVMContext(), D.getDecl()))};
5473  GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
5474 }
5475 
5476 /// For each function which is declared within an extern "C" region and marked
5477 /// as 'used', but has internal linkage, create an alias from the unmangled
5478 /// name to the mangled name if possible. People expect to be able to refer
5479 /// to such functions with an unmangled name from inline assembly within the
5480 /// same translation unit.
5481 void CodeGenModule::EmitStaticExternCAliases() {
5482  if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases())
5483  return;
5484  for (auto &I : StaticExternCValues) {
5485  IdentifierInfo *Name = I.first;
5486  llvm::GlobalValue *Val = I.second;
5487  if (Val && !getModule().getNamedValue(Name->getName()))
5489  }
5490 }
5491 
5492 bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
5493  GlobalDecl &Result) const {
5494  auto Res = Manglings.find(MangledName);
5495  if (Res == Manglings.end())
5496  return false;
5497  Result = Res->getValue();
5498  return true;
5499 }
5500 
5501 /// Emits metadata nodes associating all the global values in the
5502 /// current module with the Decls they came from. This is useful for
5503 /// projects using IR gen as a subroutine.
5504 ///
5505 /// Since there's currently no way to associate an MDNode directly
5506 /// with an llvm::GlobalValue, we create a global named metadata
5507 /// with the name 'clang.global.decl.ptrs'.
5508 void CodeGenModule::EmitDeclMetadata() {
5509  llvm::NamedMDNode *GlobalMetadata = nullptr;
5510 
5511  for (auto &I : MangledDeclNames) {
5512  llvm::GlobalValue *Addr = getModule().getNamedValue(I.second);
5513  // Some mangled names don't necessarily have an associated GlobalValue
5514  // in this module, e.g. if we mangled it for DebugInfo.
5515  if (Addr)
5516  EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr);
5517  }
5518 }
5519 
5520 /// Emits metadata nodes for all the local variables in the current
5521 /// function.
5522 void CodeGenFunction::EmitDeclMetadata() {
5523  if (LocalDeclMap.empty()) return;
5524 
5525  llvm::LLVMContext &Context = getLLVMContext();
5526 
5527  // Find the unique metadata ID for this name.
5528  unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
5529 
5530  llvm::NamedMDNode *GlobalMetadata = nullptr;
5531 
5532  for (auto &I : LocalDeclMap) {
5533  const Decl *D = I.first;
5534  llvm::Value *Addr = I.second.getPointer();
5535  if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
5537  Alloca->setMetadata(
5538  DeclPtrKind, llvm::MDNode::get(
5539  Context, llvm::ValueAsMetadata::getConstant(DAddr)));
5540  } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
5541  GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
5542  EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
5543  }
5544  }
5545 }
5546 
5547 void CodeGenModule::EmitVersionIdentMetadata() {
5548  llvm::NamedMDNode *IdentMetadata =
5549  TheModule.getOrInsertNamedMetadata("llvm.ident");
5550  std::string Version = getClangFullVersion();
5551  llvm::LLVMContext &Ctx = TheModule.getContext();
5552 
5553  llvm::Metadata *IdentNode[] = {llvm::MDString::get(Ctx, Version)};
5554  IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
5555 }
5556 
5557 void CodeGenModule::EmitCommandLineMetadata() {
5558  llvm::NamedMDNode *CommandLineMetadata =
5559  TheModule.getOrInsertNamedMetadata("llvm.commandline");
5560  std::string CommandLine = getCodeGenOpts().RecordCommandLine;
5561  llvm::LLVMContext &Ctx = TheModule.getContext();
5562 
5563  llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Ctx, CommandLine)};
5564  CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
5565 }
5566 
5567 void CodeGenModule::EmitTargetMetadata() {
5568  // Warning, new MangledDeclNames may be appended within this loop.
5569  // We rely on MapVector insertions adding new elements to the end
5570  // of the container.
5571  // FIXME: Move this loop into the one target that needs it, and only
5572  // loop over those declarations for which we couldn't emit the target
5573  // metadata when we emitted the declaration.
5574  for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
5575  auto Val = *(MangledDeclNames.begin() + I);
5576  const Decl *D = Val.first.getDecl()->getMostRecentDecl();
5577  llvm::GlobalValue *GV = GetGlobalValue(Val.second);
5578  getTargetCodeGenInfo().emitTargetMD(D, GV, *this);
5579  }
5580 }
5581 
5582 void CodeGenModule::EmitCoverageFile() {
5583  if (getCodeGenOpts().CoverageDataFile.empty() &&
5585  return;
5586 
5587  llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu");
5588  if (!CUNode)
5589  return;
5590 
5591  llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
5592  llvm::LLVMContext &Ctx = TheModule.getContext();
5593  auto *CoverageDataFile =
5594  llvm::MDString::get(Ctx, getCodeGenOpts().CoverageDataFile);
5595  auto *CoverageNotesFile =
5596  llvm::MDString::get(Ctx, getCodeGenOpts().CoverageNotesFile);
5597  for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
5598  llvm::MDNode *CU = CUNode->getOperand(i);
5599  llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU};
5600  GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
5601  }
5602 }
5603 
5604 llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) {
5605  // Sema has checked that all uuid strings are of the form
5606  // "12345678-1234-1234-1234-1234567890ab".
5607  assert(Uuid.size() == 36);
5608  for (unsigned i = 0; i < 36; ++i) {
5609  if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuid[i] == '-');
5610  else assert(isHexDigit(Uuid[i]));
5611  }
5612 
5613  // The starts of all bytes of Field3 in Uuid. Field 3 is "1234-1234567890ab".
5614  const unsigned Field3ValueOffsets[8] = { 19, 21, 24, 26, 28, 30, 32, 34 };
5615 
5616  llvm::Constant *Field3[8];
5617  for (unsigned Idx = 0; Idx < 8; ++Idx)
5618  Field3[Idx] = llvm::ConstantInt::get(
5619  Int8Ty, Uuid.substr(Field3ValueOffsets[Idx], 2), 16);
5620 
5621  llvm::Constant *Fields[4] = {
5622  llvm::ConstantInt::get(Int32Ty, Uuid.substr(0, 8), 16),
5623  llvm::ConstantInt::get(Int16Ty, Uuid.substr(9, 4), 16),
5624  llvm::ConstantInt::get(Int16Ty, Uuid.substr(14, 4), 16),
5625  llvm::ConstantArray::get(llvm::ArrayType::get(Int8Ty, 8), Field3)
5626  };
5627 
5628  return llvm::ConstantStruct::getAnon(Fields);
5629 }
5630 
5632  bool ForEH) {
5633  // Return a bogus pointer if RTTI is disabled, unless it's for EH.
5634  // FIXME: should we even be calling this method if RTTI is disabled
5635  // and it's not for EH?
5636  if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice)
5637  return llvm::Constant::getNullValue(Int8PtrTy);
5638 
5639  if (ForEH && Ty->isObjCObjectPointerType() &&
5640  LangOpts.ObjCRuntime.isGNUFamily())
5641  return ObjCRuntime->GetEHType(Ty);
5642 
5643  return getCXXABI().getAddrOfRTTIDescriptor(Ty);
5644 }
5645 
5647  // Do not emit threadprivates in simd-only mode.
5648  if (LangOpts.OpenMP && LangOpts.OpenMPSimd)
5649  return;
5650  for (auto RefExpr : D->varlists()) {
5651  auto *VD = cast<VarDecl>(cast<DeclRefExpr>(RefExpr)->getDecl());
5652  bool PerformInit =
5653  VD->getAnyInitializer() &&
5654  !VD->getAnyInitializer()->isConstantInitializer(getContext(),
5655  /*ForRef=*/false);
5656 
5658  if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
5659  VD, Addr, RefExpr->getBeginLoc(), PerformInit))
5660  CXXGlobalInits.push_back(InitFunction);
5661  }
5662 }
5663 
5664 llvm::Metadata *
5665 CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
5666  StringRef Suffix) {
5667  llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
5668  if (InternalId)
5669  return InternalId;
5670 
5671  if (isExternallyVisible(T->getLinkage())) {
5672  std::string OutName;
5673  llvm::raw_string_ostream Out(OutName);
5675  Out << Suffix;
5676 
5677  InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
5678  } else {
5679  InternalId = llvm::MDNode::getDistinct(getLLVMContext(),
5681  }
5682 
5683  return InternalId;
5684 }
5685 
5687  return CreateMetadataIdentifierImpl(T, MetadataIdMap, "");
5688 }
5689 
5690 llvm::Metadata *
5692  return CreateMetadataIdentifierImpl(T, VirtualMetadataIdMap, ".virtual");
5693 }
5694 
5695 // Generalize pointer types to a void pointer with the qualifiers of the
5696 // originally pointed-to type, e.g. 'const char *' and 'char * const *'
5697 // generalize to 'const void *' while 'char *' and 'const char **' generalize to
5698 // 'void *'.
5700  if (!Ty->isPointerType())
5701  return Ty;
5702 
5703  return Ctx.getPointerType(
5705  Ty->getPointeeType().getCVRQualifiers()));
5706 }
5707 
5708 // Apply type generalization to a FunctionType's return and argument types
5710  if (auto *FnType = Ty->getAs<FunctionProtoType>()) {
5711  SmallVector<QualType, 8> GeneralizedParams;
5712  for (auto &Param : FnType->param_types())
5713  GeneralizedParams.push_back(GeneralizeType(Ctx, Param));
5714 
5715  return Ctx.getFunctionType(
5716  GeneralizeType(Ctx, FnType->getReturnType()),
5717  GeneralizedParams, FnType->getExtProtoInfo());
5718  }
5719 
5720  if (auto *FnType = Ty->getAs<FunctionNoProtoType>())
5721  return Ctx.getFunctionNoProtoType(
5722  GeneralizeType(Ctx, FnType->getReturnType()));
5723 
5724  llvm_unreachable("Encountered unknown FunctionType");
5725 }
5726 
5728  return CreateMetadataIdentifierImpl(GeneralizeFunctionType(getContext(), T),
5729  GeneralizedMetadataIdMap, ".generalized");
5730 }
5731 
5732 /// Returns whether this module needs the "all-vtables" type identifier.
5734  // Returns true if at least one of vtable-based CFI checkers is enabled and
5735  // is not in the trapping mode.
5736  return ((LangOpts.Sanitize.has(SanitizerKind::CFIVCall) &&
5737  !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIVCall)) ||
5738  (LangOpts.Sanitize.has(SanitizerKind::CFINVCall) &&
5739  !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFINVCall)) ||
5740  (LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) &&
5741  !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIDerivedCast)) ||
5742  (LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast) &&
5743  !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIUnrelatedCast)));
5744 }
5745 
5746 void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
5747  CharUnits Offset,
5748  const CXXRecordDecl *RD) {
5749  llvm::Metadata *MD =
5751  VTable->addTypeMetadata(Offset.getQuantity(), MD);
5752 
5753  if (CodeGenOpts.SanitizeCfiCrossDso)
5754  if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
5755  VTable->addTypeMetadata(Offset.getQuantity(),
5756  llvm::ConstantAsMetadata::get(CrossDsoTypeId));
5757 
5758  if (NeedAllVtablesTypeId()) {
5759  llvm::Metadata *MD = llvm::MDString::get(getLLVMContext(), "all-vtables");
5760  VTable->addTypeMetadata(Offset.getQuantity(), MD);
5761  }
5762 }