clang  16.0.0git
CodeGenModule.cpp
Go to the documentation of this file.
1 //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-module state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenModule.h"
14 #include "ABIInfo.h"
15 #include "CGBlocks.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGCall.h"
19 #include "CGDebugInfo.h"
20 #include "CGHLSLRuntime.h"
21 #include "CGObjCRuntime.h"
22 #include "CGOpenCLRuntime.h"
23 #include "CGOpenMPRuntime.h"
24 #include "CGOpenMPRuntimeGPU.h"
25 #include "CodeGenFunction.h"
26 #include "CodeGenPGO.h"
27 #include "ConstantEmitter.h"
28 #include "CoverageMappingGen.h"
29 #include "TargetInfo.h"
30 #include "clang/AST/ASTContext.h"
31 #include "clang/AST/CharUnits.h"
32 #include "clang/AST/DeclCXX.h"
33 #include "clang/AST/DeclObjC.h"
34 #include "clang/AST/DeclTemplate.h"
35 #include "clang/AST/Mangle.h"
37 #include "clang/AST/StmtVisitor.h"
38 #include "clang/Basic/Builtins.h"
39 #include "clang/Basic/CharInfo.h"
41 #include "clang/Basic/Diagnostic.h"
43 #include "clang/Basic/Module.h"
45 #include "clang/Basic/TargetInfo.h"
46 #include "clang/Basic/Version.h"
50 #include "llvm/ADT/STLExtras.h"
51 #include "llvm/ADT/StringExtras.h"
52 #include "llvm/ADT/StringSwitch.h"
53 #include "llvm/ADT/Triple.h"
54 #include "llvm/Analysis/TargetLibraryInfo.h"
55 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
56 #include "llvm/IR/CallingConv.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/Module.h"
61 #include "llvm/IR/ProfileSummary.h"
62 #include "llvm/ProfileData/InstrProfReader.h"
63 #include "llvm/ProfileData/SampleProf.h"
64 #include "llvm/Support/CRC.h"
65 #include "llvm/Support/CodeGen.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/ConvertUTF.h"
68 #include "llvm/Support/ErrorHandling.h"
69 #include "llvm/Support/TimeProfiler.h"
70 #include "llvm/Support/X86TargetParser.h"
71 #include "llvm/Support/xxhash.h"
72 
73 using namespace clang;
74 using namespace CodeGen;
75 
76 static llvm::cl::opt<bool> LimitedCoverage(
77  "limited-coverage-experimental", llvm::cl::Hidden,
78  llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
79 
80 static const char AnnotationSection[] = "llvm.metadata";
81 
83  switch (CGM.getContext().getCXXABIKind()) {
84  case TargetCXXABI::AppleARM64:
85  case TargetCXXABI::Fuchsia:
86  case TargetCXXABI::GenericAArch64:
87  case TargetCXXABI::GenericARM:
88  case TargetCXXABI::iOS:
89  case TargetCXXABI::WatchOS:
90  case TargetCXXABI::GenericMIPS:
91  case TargetCXXABI::GenericItanium:
92  case TargetCXXABI::WebAssembly:
93  case TargetCXXABI::XL:
94  return CreateItaniumCXXABI(CGM);
95  case TargetCXXABI::Microsoft:
96  return CreateMicrosoftCXXABI(CGM);
97  }
98 
99  llvm_unreachable("invalid C++ ABI kind");
100 }
101 
102 CodeGenModule::CodeGenModule(ASTContext &C,
104  const HeaderSearchOptions &HSO,
105  const PreprocessorOptions &PPO,
106  const CodeGenOptions &CGO, llvm::Module &M,
107  DiagnosticsEngine &diags,
108  CoverageSourceInfo *CoverageInfo)
109  : Context(C), LangOpts(C.getLangOpts()), FS(std::move(FS)),
110  HeaderSearchOpts(HSO), PreprocessorOpts(PPO), CodeGenOpts(CGO),
111  TheModule(M), Diags(diags), Target(C.getTargetInfo()),
112  ABI(createCXXABI(*this)), VMContext(M.getContext()), Types(*this),
113  VTables(*this), SanitizerMD(new SanitizerMetadata(*this)) {
114 
115  // Initialize the type cache.
116  llvm::LLVMContext &LLVMContext = M.getContext();
117  VoidTy = llvm::Type::getVoidTy(LLVMContext);
118  Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
119  Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
120  Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
121  Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
122  HalfTy = llvm::Type::getHalfTy(LLVMContext);
123  BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
124  FloatTy = llvm::Type::getFloatTy(LLVMContext);
125  DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
126  PointerWidthInBits = C.getTargetInfo().getPointerWidth(LangAS::Default);
128  C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(LangAS::Default))
129  .getQuantity();
131  C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
133  C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
134  CharTy =
135  llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getCharWidth());
136  IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
137  IntPtrTy = llvm::IntegerType::get(LLVMContext,
138  C.getTargetInfo().getMaxPointerWidth());
139  Int8PtrTy = Int8Ty->getPointerTo(0);
140  Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
141  const llvm::DataLayout &DL = M.getDataLayout();
142  AllocaInt8PtrTy = Int8Ty->getPointerTo(DL.getAllocaAddrSpace());
143  GlobalsInt8PtrTy = Int8Ty->getPointerTo(DL.getDefaultGlobalsAddressSpace());
145 
146  // Build C++20 Module initializers.
147  // TODO: Add Microsoft here once we know the mangling required for the
148  // initializers.
149  CXX20ModuleInits =
150  LangOpts.CPlusPlusModules && getCXXABI().getMangleContext().getKind() ==
152 
154 
155  if (LangOpts.ObjC)
156  createObjCRuntime();
157  if (LangOpts.OpenCL)
158  createOpenCLRuntime();
159  if (LangOpts.OpenMP)
160  createOpenMPRuntime();
161  if (LangOpts.CUDA)
162  createCUDARuntime();
163  if (LangOpts.HLSL)
164  createHLSLRuntime();
165 
166  // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
167  if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
168  (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
169  TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
170  getCXXABI().getMangleContext()));
171 
172  // If debug info or coverage generation is enabled, create the CGDebugInfo
173  // object.
174  if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
175  CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
176  DebugInfo.reset(new CGDebugInfo(*this));
177 
178  Block.GlobalUniqueCount = 0;
179 
180  if (C.getLangOpts().ObjC)
181  ObjCData.reset(new ObjCEntrypoints());
182 
183  if (CodeGenOpts.hasProfileClangUse()) {
184  auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
185  CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
186  // We're checking for profile read errors in CompilerInvocation, so if
187  // there was an error it should've already been caught. If it hasn't been
188  // somehow, trip an assertion.
189  assert(ReaderOrErr);
190  PGOReader = std::move(ReaderOrErr.get());
191  }
192 
193  // If coverage mapping generation is enabled, create the
194  // CoverageMappingModuleGen object.
195  if (CodeGenOpts.CoverageMapping)
196  CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
197 
198  // Generate the module name hash here if needed.
199  if (CodeGenOpts.UniqueInternalLinkageNames &&
200  !getModule().getSourceFileName().empty()) {
201  std::string Path = getModule().getSourceFileName();
202  // Check if a path substitution is needed from the MacroPrefixMap.
203  for (const auto &Entry : LangOpts.MacroPrefixMap)
204  if (Path.rfind(Entry.first, 0) != std::string::npos) {
205  Path = Entry.second + Path.substr(Entry.first.size());
206  break;
207  }
208  ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(Path);
209  }
210 }
211 
213 
214 void CodeGenModule::createObjCRuntime() {
215  // This is just isGNUFamily(), but we want to force implementors of
216  // new ABIs to decide how best to do this.
217  switch (LangOpts.ObjCRuntime.getKind()) {
219  case ObjCRuntime::GCC:
220  case ObjCRuntime::ObjFW:
221  ObjCRuntime.reset(CreateGNUObjCRuntime(*this));
222  return;
223 
225  case ObjCRuntime::MacOSX:
226  case ObjCRuntime::iOS:
228  ObjCRuntime.reset(CreateMacObjCRuntime(*this));
229  return;
230  }
231  llvm_unreachable("bad runtime kind");
232 }
233 
234 void CodeGenModule::createOpenCLRuntime() {
235  OpenCLRuntime.reset(new CGOpenCLRuntime(*this));
236 }
237 
238 void CodeGenModule::createOpenMPRuntime() {
239  // Select a specialized code generation class based on the target, if any.
240  // If it does not exist use the default implementation.
241  switch (getTriple().getArch()) {
242  case llvm::Triple::nvptx:
243  case llvm::Triple::nvptx64:
244  case llvm::Triple::amdgcn:
245  assert(getLangOpts().OpenMPIsDevice &&
246  "OpenMP AMDGPU/NVPTX is only prepared to deal with device code.");
247  OpenMPRuntime.reset(new CGOpenMPRuntimeGPU(*this));
248  break;
249  default:
250  if (LangOpts.OpenMPSimd)
251  OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
252  else
253  OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
254  break;
255  }
256 }
257 
258 void CodeGenModule::createCUDARuntime() {
259  CUDARuntime.reset(CreateNVCUDARuntime(*this));
260 }
261 
262 void CodeGenModule::createHLSLRuntime() {
263  HLSLRuntime.reset(new CGHLSLRuntime(*this));
264 }
265 
266 void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
267  Replacements[Name] = C;
268 }
269 
270 void CodeGenModule::applyReplacements() {
271  for (auto &I : Replacements) {
272  StringRef MangledName = I.first();
273  llvm::Constant *Replacement = I.second;
274  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
275  if (!Entry)
276  continue;
277  auto *OldF = cast<llvm::Function>(Entry);
278  auto *NewF = dyn_cast<llvm::Function>(Replacement);
279  if (!NewF) {
280  if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
281  NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
282  } else {
283  auto *CE = cast<llvm::ConstantExpr>(Replacement);
284  assert(CE->getOpcode() == llvm::Instruction::BitCast ||
285  CE->getOpcode() == llvm::Instruction::GetElementPtr);
286  NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
287  }
288  }
289 
290  // Replace old with new, but keep the old order.
291  OldF->replaceAllUsesWith(Replacement);
292  if (NewF) {
293  NewF->removeFromParent();
294  OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
295  NewF);
296  }
297  OldF->eraseFromParent();
298  }
299 }
300 
301 void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
302  GlobalValReplacements.push_back(std::make_pair(GV, C));
303 }
304 
305 void CodeGenModule::applyGlobalValReplacements() {
306  for (auto &I : GlobalValReplacements) {
307  llvm::GlobalValue *GV = I.first;
308  llvm::Constant *C = I.second;
309 
310  GV->replaceAllUsesWith(C);
311  GV->eraseFromParent();
312  }
313 }
314 
315 // This is only used in aliases that we created and we know they have a
316 // linear structure.
317 static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) {
318  const llvm::Constant *C;
319  if (auto *GA = dyn_cast<llvm::GlobalAlias>(GV))
320  C = GA->getAliasee();
321  else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(GV))
322  C = GI->getResolver();
323  else
324  return GV;
325 
326  const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(C->stripPointerCasts());
327  if (!AliaseeGV)
328  return nullptr;
329 
330  const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject();
331  if (FinalGV == GV)
332  return nullptr;
333 
334  return FinalGV;
335 }
336 
338  SourceLocation Location, bool IsIFunc,
339  const llvm::GlobalValue *Alias,
340  const llvm::GlobalValue *&GV) {
341  GV = getAliasedGlobal(Alias);
342  if (!GV) {
343  Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
344  return false;
345  }
346 
347  if (GV->isDeclaration()) {
348  Diags.Report(Location, diag::err_alias_to_undefined) << IsIFunc << IsIFunc;
349  return false;
350  }
351 
352  if (IsIFunc) {
353  // Check resolver function type.
354  const auto *F = dyn_cast<llvm::Function>(GV);
355  if (!F) {
356  Diags.Report(Location, diag::err_alias_to_undefined)
357  << IsIFunc << IsIFunc;
358  return false;
359  }
360 
361  llvm::FunctionType *FTy = F->getFunctionType();
362  if (!FTy->getReturnType()->isPointerTy()) {
363  Diags.Report(Location, diag::err_ifunc_resolver_return);
364  return false;
365  }
366  }
367 
368  return true;
369 }
370 
371 void CodeGenModule::checkAliases() {
372  // Check if the constructed aliases are well formed. It is really unfortunate
373  // that we have to do this in CodeGen, but we only construct mangled names
374  // and aliases during codegen.
375  bool Error = false;
376  DiagnosticsEngine &Diags = getDiags();
377  for (const GlobalDecl &GD : Aliases) {
378  const auto *D = cast<ValueDecl>(GD.getDecl());
379  SourceLocation Location;
380  bool IsIFunc = D->hasAttr<IFuncAttr>();
381  if (const Attr *A = D->getDefiningAttr())
382  Location = A->getLocation();
383  else
384  llvm_unreachable("Not an alias or ifunc?");
385 
386  StringRef MangledName = getMangledName(GD);
387  llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
388  const llvm::GlobalValue *GV = nullptr;
389  if (!checkAliasedGlobal(Diags, Location, IsIFunc, Alias, GV)) {
390  Error = true;
391  continue;
392  }
393 
394  llvm::Constant *Aliasee =
395  IsIFunc ? cast<llvm::GlobalIFunc>(Alias)->getResolver()
396  : cast<llvm::GlobalAlias>(Alias)->getAliasee();
397 
398  llvm::GlobalValue *AliaseeGV;
399  if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
400  AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
401  else
402  AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
403 
404  if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
405  StringRef AliasSection = SA->getName();
406  if (AliasSection != AliaseeGV->getSection())
407  Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
408  << AliasSection << IsIFunc << IsIFunc;
409  }
410 
411  // We have to handle alias to weak aliases in here. LLVM itself disallows
412  // this since the object semantics would not match the IL one. For
413  // compatibility with gcc we implement it by just pointing the alias
414  // to its aliasee's aliasee. We also warn, since the user is probably
415  // expecting the link to be weak.
416  if (auto *GA = dyn_cast<llvm::GlobalAlias>(AliaseeGV)) {
417  if (GA->isInterposable()) {
418  Diags.Report(Location, diag::warn_alias_to_weak_alias)
419  << GV->getName() << GA->getName() << IsIFunc;
420  Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
421  GA->getAliasee(), Alias->getType());
422 
423  if (IsIFunc)
424  cast<llvm::GlobalIFunc>(Alias)->setResolver(Aliasee);
425  else
426  cast<llvm::GlobalAlias>(Alias)->setAliasee(Aliasee);
427  }
428  }
429  }
430  if (!Error)
431  return;
432 
433  for (const GlobalDecl &GD : Aliases) {
434  StringRef MangledName = getMangledName(GD);
435  llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
436  Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
437  Alias->eraseFromParent();
438  }
439 }
440 
442  DeferredDeclsToEmit.clear();
443  EmittedDeferredDecls.clear();
444  if (OpenMPRuntime)
445  OpenMPRuntime->clear();
446 }
447 
449  StringRef MainFile) {
450  if (!hasDiagnostics())
451  return;
452  if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
453  if (MainFile.empty())
454  MainFile = "<stdin>";
455  Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
456  } else {
457  if (Mismatched > 0)
458  Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched;
459 
460  if (Missing > 0)
461  Diags.Report(diag::warn_profile_data_missing) << Visited << Missing;
462  }
463 }
464 
466  llvm::Module &M) {
467  if (!LO.VisibilityFromDLLStorageClass)
468  return;
469 
470  llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
471  CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
472  llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
473  CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
474  llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
475  CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
476  llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
478  LO.getExternDeclNoDLLStorageClassVisibility());
479 
480  for (llvm::GlobalValue &GV : M.global_values()) {
481  if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
482  continue;
483 
484  // Reset DSO locality before setting the visibility. This removes
485  // any effects that visibility options and annotations may have
486  // had on the DSO locality. Setting the visibility will implicitly set
487  // appropriate globals to DSO Local; however, this will be pessimistic
488  // w.r.t. to the normal compiler IRGen.
489  GV.setDSOLocal(false);
490 
491  if (GV.isDeclarationForLinker()) {
492  GV.setVisibility(GV.getDLLStorageClass() ==
493  llvm::GlobalValue::DLLImportStorageClass
494  ? ExternDeclDLLImportVisibility
495  : ExternDeclNoDLLStorageClassVisibility);
496  } else {
497  GV.setVisibility(GV.getDLLStorageClass() ==
498  llvm::GlobalValue::DLLExportStorageClass
499  ? DLLExportVisibility
500  : NoDLLStorageClassVisibility);
501  }
502 
503  GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
504  }
505 }
506 
508  Module *Primary = getContext().getModuleForCodeGen();
509  if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule())
510  EmitModuleInitializers(Primary);
511  EmitDeferred();
512  DeferredDecls.insert(EmittedDeferredDecls.begin(),
513  EmittedDeferredDecls.end());
514  EmittedDeferredDecls.clear();
515  EmitVTablesOpportunistically();
516  applyGlobalValReplacements();
517  applyReplacements();
518  emitMultiVersionFunctions();
519  if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition())
520  EmitCXXModuleInitFunc(Primary);
521  else
522  EmitCXXGlobalInitFunc();
523  EmitCXXGlobalCleanUpFunc();
524  registerGlobalDtorsWithAtExit();
525  EmitCXXThreadLocalInitFunc();
526  if (ObjCRuntime)
527  if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
528  AddGlobalCtor(ObjCInitFunction);
529  if (Context.getLangOpts().CUDA && CUDARuntime) {
530  if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
531  AddGlobalCtor(CudaCtorFunction);
532  }
533  if (OpenMPRuntime) {
534  if (llvm::Function *OpenMPRequiresDirectiveRegFun =
535  OpenMPRuntime->emitRequiresDirectiveRegFun()) {
536  AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
537  }
538  OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
539  OpenMPRuntime->clear();
540  }
541  if (PGOReader) {
542  getModule().setProfileSummary(
543  PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
544  llvm::ProfileSummary::PSK_Instr);
545  if (PGOStats.hasDiagnostics())
546  PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
547  }
548  llvm::stable_sort(GlobalCtors, [](const Structor &L, const Structor &R) {
549  return L.LexOrder < R.LexOrder;
550  });
551  EmitCtorList(GlobalCtors, "llvm.global_ctors");
552  EmitCtorList(GlobalDtors, "llvm.global_dtors");
554  EmitStaticExternCAliases();
555  checkAliases();
558  if (CoverageMapping)
559  CoverageMapping->emit();
560  if (CodeGenOpts.SanitizeCfiCrossDso) {
563  }
564  if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
566  emitAtAvailableLinkGuard();
567  if (Context.getTargetInfo().getTriple().isWasm())
569 
570  if (getTriple().isAMDGPU()) {
571  // Emit reference of __amdgpu_device_library_preserve_asan_functions to
572  // preserve ASAN functions in bitcode libraries.
573  if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
574  auto *FT = llvm::FunctionType::get(VoidTy, {});
575  auto *F = llvm::Function::Create(
577  "__amdgpu_device_library_preserve_asan_functions", &getModule());
578  auto *Var = new llvm::GlobalVariable(
579  getModule(), FT->getPointerTo(),
580  /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, F,
581  "__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
582  llvm::GlobalVariable::NotThreadLocal);
584  }
585  // Emit amdgpu_code_object_version module flag, which is code object version
586  // times 100.
587  if (getTarget().getTargetOpts().CodeObjectVersion !=
589  getModule().addModuleFlag(llvm::Module::Error,
590  "amdgpu_code_object_version",
591  getTarget().getTargetOpts().CodeObjectVersion);
592  }
593  }
594 
595  // Emit a global array containing all external kernels or device variables
596  // used by host functions and mark it as used for CUDA/HIP. This is necessary
597  // to get kernels or device variables in archives linked in even if these
598  // kernels or device variables are only used in host functions.
599  if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) {
601  for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) {
602  GlobalDecl GD;
603  if (auto *FD = dyn_cast<FunctionDecl>(D))
605  else
606  GD = GlobalDecl(D);
607  UsedArray.push_back(llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
608  GetAddrOfGlobal(GD), Int8PtrTy));
609  }
610 
611  llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
612 
613  auto *GV = new llvm::GlobalVariable(
615  llvm::ConstantArray::get(ATy, UsedArray), "__clang_gpu_used_external");
617  }
618 
619  emitLLVMUsed();
620  if (SanStats)
621  SanStats->finish();
622 
623  if (CodeGenOpts.Autolink &&
624  (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
625  EmitModuleLinkOptions();
626  }
627 
628  // On ELF we pass the dependent library specifiers directly to the linker
629  // without manipulating them. This is in contrast to other platforms where
630  // they are mapped to a specific linker option by the compiler. This
631  // difference is a result of the greater variety of ELF linkers and the fact
632  // that ELF linkers tend to handle libraries in a more complicated fashion
633  // than on other platforms. This forces us to defer handling the dependent
634  // libs to the linker.
635  //
636  // CUDA/HIP device and host libraries are different. Currently there is no
637  // way to differentiate dependent libraries for host or device. Existing
638  // usage of #pragma comment(lib, *) is intended for host libraries on
639  // Windows. Therefore emit llvm.dependent-libraries only for host.
640  if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
641  auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
642  for (auto *MD : ELFDependentLibraries)
643  NMD->addOperand(MD);
644  }
645 
646  // Record mregparm value now so it is visible through rest of codegen.
647  if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
648  getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
649  CodeGenOpts.NumRegisterParameters);
650 
651  if (CodeGenOpts.DwarfVersion) {
652  getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
653  CodeGenOpts.DwarfVersion);
654  }
655 
656  if (CodeGenOpts.Dwarf64)
657  getModule().addModuleFlag(llvm::Module::Max, "DWARF64", 1);
658 
659  if (Context.getLangOpts().SemanticInterposition)
660  // Require various optimization to respect semantic interposition.
661  getModule().setSemanticInterposition(true);
662 
663  if (CodeGenOpts.EmitCodeView) {
664  // Indicate that we want CodeView in the metadata.
665  getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
666  }
667  if (CodeGenOpts.CodeViewGHash) {
668  getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
669  }
670  if (CodeGenOpts.ControlFlowGuard) {
671  // Function ID tables and checks for Control Flow Guard (cfguard=2).
672  getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 2);
673  } else if (CodeGenOpts.ControlFlowGuardNoChecks) {
674  // Function ID tables for Control Flow Guard (cfguard=1).
675  getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
676  }
677  if (CodeGenOpts.EHContGuard) {
678  // Function ID tables for EH Continuation Guard.
679  getModule().addModuleFlag(llvm::Module::Warning, "ehcontguard", 1);
680  }
681  if (Context.getLangOpts().Kernel) {
682  // Note if we are compiling with /kernel.
683  getModule().addModuleFlag(llvm::Module::Warning, "ms-kernel", 1);
684  }
685  if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
686  // We don't support LTO with 2 with different StrictVTablePointers
687  // FIXME: we could support it by stripping all the information introduced
688  // by StrictVTablePointers.
689 
690  getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
691 
692  llvm::Metadata *Ops[2] = {
693  llvm::MDString::get(VMContext, "StrictVTablePointers"),
694  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
695  llvm::Type::getInt32Ty(VMContext), 1))};
696 
697  getModule().addModuleFlag(llvm::Module::Require,
698  "StrictVTablePointersRequirement",
699  llvm::MDNode::get(VMContext, Ops));
700  }
701  if (getModuleDebugInfo())
702  // We support a single version in the linked module. The LLVM
703  // parser will drop debug info with a different version number
704  // (and warn about it, too).
705  getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
706  llvm::DEBUG_METADATA_VERSION);
707 
708  // We need to record the widths of enums and wchar_t, so that we can generate
709  // the correct build attributes in the ARM backend. wchar_size is also used by
710  // TargetLibraryInfo.
711  uint64_t WCharWidth =
712  Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
713  getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
714 
715  llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
716  if ( Arch == llvm::Triple::arm
717  || Arch == llvm::Triple::armeb
718  || Arch == llvm::Triple::thumb
719  || Arch == llvm::Triple::thumbeb) {
720  // The minimum width of an enum in bytes
721  uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
722  getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
723  }
724 
725  if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
726  StringRef ABIStr = Target.getABI();
727  llvm::LLVMContext &Ctx = TheModule.getContext();
728  getModule().addModuleFlag(llvm::Module::Error, "target-abi",
729  llvm::MDString::get(Ctx, ABIStr));
730  }
731 
732  if (CodeGenOpts.SanitizeCfiCrossDso) {
733  // Indicate that we want cross-DSO control flow integrity checks.
734  getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
735  }
736 
737  if (CodeGenOpts.WholeProgramVTables) {
738  // Indicate whether VFE was enabled for this module, so that the
739  // vcall_visibility metadata added under whole program vtables is handled
740  // appropriately in the optimizer.
741  getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
742  CodeGenOpts.VirtualFunctionElimination);
743  }
744 
745  if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
746  getModule().addModuleFlag(llvm::Module::Override,
747  "CFI Canonical Jump Tables",
748  CodeGenOpts.SanitizeCfiCanonicalJumpTables);
749  }
750 
751  if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
752  getModule().addModuleFlag(llvm::Module::Override, "kcfi", 1);
753 
754  if (CodeGenOpts.CFProtectionReturn &&
756  // Indicate that we want to instrument return control flow protection.
757  getModule().addModuleFlag(llvm::Module::Min, "cf-protection-return",
758  1);
759  }
760 
761  if (CodeGenOpts.CFProtectionBranch &&
763  // Indicate that we want to instrument branch control flow protection.
764  getModule().addModuleFlag(llvm::Module::Min, "cf-protection-branch",
765  1);
766  }
767 
768  if (CodeGenOpts.IBTSeal)
769  getModule().addModuleFlag(llvm::Module::Min, "ibt-seal", 1);
770 
771  if (CodeGenOpts.FunctionReturnThunks)
772  getModule().addModuleFlag(llvm::Module::Override, "function_return_thunk_extern", 1);
773 
774  if (CodeGenOpts.IndirectBranchCSPrefix)
775  getModule().addModuleFlag(llvm::Module::Override, "indirect_branch_cs_prefix", 1);
776 
777  // Add module metadata for return address signing (ignoring
778  // non-leaf/all) and stack tagging. These are actually turned on by function
779  // attributes, but we use module metadata to emit build attributes. This is
780  // needed for LTO, where the function attributes are inside bitcode
781  // serialised into a global variable by the time build attributes are
782  // emitted, so we can't access them. LTO objects could be compiled with
783  // different flags therefore module flags are set to "Min" behavior to achieve
784  // the same end result of the normal build where e.g BTI is off if any object
785  // doesn't support it.
786  if (Context.getTargetInfo().hasFeature("ptrauth") &&
787  LangOpts.getSignReturnAddressScope() !=
789  getModule().addModuleFlag(llvm::Module::Override,
790  "sign-return-address-buildattr", 1);
791  if (LangOpts.Sanitize.has(SanitizerKind::MemtagStack))
792  getModule().addModuleFlag(llvm::Module::Override,
793  "tag-stack-memory-buildattr", 1);
794 
795  if (Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
796  Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
797  Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
798  Arch == llvm::Triple::aarch64_be) {
799  if (LangOpts.BranchTargetEnforcement)
800  getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
801  1);
802  if (LangOpts.hasSignReturnAddress())
803  getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
804  if (LangOpts.isSignReturnAddressScopeAll())
805  getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
806  1);
807  if (!LangOpts.isSignReturnAddressWithAKey())
808  getModule().addModuleFlag(llvm::Module::Min,
809  "sign-return-address-with-bkey", 1);
810  }
811 
812  if (!CodeGenOpts.MemoryProfileOutput.empty()) {
813  llvm::LLVMContext &Ctx = TheModule.getContext();
814  getModule().addModuleFlag(
815  llvm::Module::Error, "MemProfProfileFilename",
816  llvm::MDString::get(Ctx, CodeGenOpts.MemoryProfileOutput));
817  }
818 
819  if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
820  // Indicate whether __nvvm_reflect should be configured to flush denormal
821  // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
822  // property.)
823  getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
824  CodeGenOpts.FP32DenormalMode.Output !=
825  llvm::DenormalMode::IEEE);
826  }
827 
828  if (LangOpts.EHAsynch)
829  getModule().addModuleFlag(llvm::Module::Warning, "eh-asynch", 1);
830 
831  // Indicate whether this Module was compiled with -fopenmp
832  if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
833  getModule().addModuleFlag(llvm::Module::Max, "openmp", LangOpts.OpenMP);
834  if (getLangOpts().OpenMPIsDevice)
835  getModule().addModuleFlag(llvm::Module::Max, "openmp-device",
836  LangOpts.OpenMP);
837 
838  // Emit OpenCL specific module metadata: OpenCL/SPIR version.
839  if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) {
840  EmitOpenCLMetadata();
841  // Emit SPIR version.
842  if (getTriple().isSPIR()) {
843  // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
844  // opencl.spir.version named metadata.
845  // C++ for OpenCL has a distinct mapping for version compatibility with
846  // OpenCL.
847  auto Version = LangOpts.getOpenCLCompatibleVersion();
848  llvm::Metadata *SPIRVerElts[] = {
849  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
850  Int32Ty, Version / 100)),
851  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
852  Int32Ty, (Version / 100 > 1) ? 0 : 2))};
853  llvm::NamedMDNode *SPIRVerMD =
854  TheModule.getOrInsertNamedMetadata("opencl.spir.version");
855  llvm::LLVMContext &Ctx = TheModule.getContext();
856  SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
857  }
858  }
859 
860  // HLSL related end of code gen work items.
861  if (LangOpts.HLSL)
863 
864  if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
865  assert(PLevel < 3 && "Invalid PIC Level");
866  getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
867  if (Context.getLangOpts().PIE)
868  getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
869  }
870 
871  if (getCodeGenOpts().CodeModel.size() > 0) {
872  unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
873  .Case("tiny", llvm::CodeModel::Tiny)
874  .Case("small", llvm::CodeModel::Small)
875  .Case("kernel", llvm::CodeModel::Kernel)
876  .Case("medium", llvm::CodeModel::Medium)
877  .Case("large", llvm::CodeModel::Large)
878  .Default(~0u);
879  if (CM != ~0u) {
880  llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
881  getModule().setCodeModel(codeModel);
882  }
883  }
884 
885  if (CodeGenOpts.NoPLT)
886  getModule().setRtLibUseGOT();
887  if (CodeGenOpts.UnwindTables)
888  getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables));
889 
890  switch (CodeGenOpts.getFramePointer()) {
892  // 0 ("none") is the default.
893  break;
895  getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
896  break;
898  getModule().setFramePointer(llvm::FramePointerKind::All);
899  break;
900  }
901 
902  SimplifyPersonality();
903 
904  if (getCodeGenOpts().EmitDeclMetadata)
905  EmitDeclMetadata();
906 
907  if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
908  EmitCoverageFile();
909 
910  if (CGDebugInfo *DI = getModuleDebugInfo())
911  DI->finalize();
912 
913  if (getCodeGenOpts().EmitVersionIdentMetadata)
914  EmitVersionIdentMetadata();
915 
916  if (!getCodeGenOpts().RecordCommandLine.empty())
917  EmitCommandLineMetadata();
918 
919  if (!getCodeGenOpts().StackProtectorGuard.empty())
920  getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard);
921  if (!getCodeGenOpts().StackProtectorGuardReg.empty())
922  getModule().setStackProtectorGuardReg(
923  getCodeGenOpts().StackProtectorGuardReg);
924  if (!getCodeGenOpts().StackProtectorGuardSymbol.empty())
925  getModule().setStackProtectorGuardSymbol(
926  getCodeGenOpts().StackProtectorGuardSymbol);
927  if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX)
928  getModule().setStackProtectorGuardOffset(
929  getCodeGenOpts().StackProtectorGuardOffset);
930  if (getCodeGenOpts().StackAlignment)
931  getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment);
932  if (getCodeGenOpts().SkipRaxSetup)
933  getModule().addModuleFlag(llvm::Module::Override, "SkipRaxSetup", 1);
934 
935  getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
936 
937  EmitBackendOptionsMetadata(getCodeGenOpts());
938 
939  // If there is device offloading code embed it in the host now.
940  EmbedObject(&getModule(), CodeGenOpts, getDiags());
941 
942  // Set visibility from DLL storage class
943  // We do this at the end of LLVM IR generation; after any operation
944  // that might affect the DLL storage class or the visibility, and
945  // before anything that might act on these.
947 }
948 
949 void CodeGenModule::EmitOpenCLMetadata() {
950  // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
951  // opencl.ocl.version named metadata node.
952  // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL.
953  auto Version = LangOpts.getOpenCLCompatibleVersion();
954  llvm::Metadata *OCLVerElts[] = {
955  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
956  Int32Ty, Version / 100)),
957  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
958  Int32Ty, (Version % 100) / 10))};
959  llvm::NamedMDNode *OCLVerMD =
960  TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
961  llvm::LLVMContext &Ctx = TheModule.getContext();
962  OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
963 }
964 
965 void CodeGenModule::EmitBackendOptionsMetadata(
966  const CodeGenOptions CodeGenOpts) {
967  if (getTriple().isRISCV()) {
968  getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
969  CodeGenOpts.SmallDataLimit);
970  }
971 }
972 
974  // Make sure that this type is translated.
975  Types.UpdateCompletedType(TD);
976 }
977 
979  // Make sure that this type is translated.
980  Types.RefreshTypeCacheForClass(RD);
981 }
982 
984  if (!TBAA)
985  return nullptr;
986  return TBAA->getTypeInfo(QTy);
987 }
988 
990  if (!TBAA)
991  return TBAAAccessInfo();
992  if (getLangOpts().CUDAIsDevice) {
993  // As CUDA builtin surface/texture types are replaced, skip generating TBAA
994  // access info.
995  if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
996  if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
997  nullptr)
998  return TBAAAccessInfo();
999  } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
1000  if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
1001  nullptr)
1002  return TBAAAccessInfo();
1003  }
1004  }
1005  return TBAA->getAccessInfo(AccessType);
1006 }
1007 
1009 CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
1010  if (!TBAA)
1011  return TBAAAccessInfo();
1012  return TBAA->getVTablePtrAccessInfo(VTablePtrType);
1013 }
1014 
1016  if (!TBAA)
1017  return nullptr;
1018  return TBAA->getTBAAStructInfo(QTy);
1019 }
1020 
1022  if (!TBAA)
1023  return nullptr;
1024  return TBAA->getBaseTypeInfo(QTy);
1025 }
1026 
1028  if (!TBAA)
1029  return nullptr;
1030  return TBAA->getAccessTagInfo(Info);
1031 }
1032 
1035  if (!TBAA)
1036  return TBAAAccessInfo();
1037  return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
1038 }
1039 
1042  TBAAAccessInfo InfoB) {
1043  if (!TBAA)
1044  return TBAAAccessInfo();
1045  return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
1046 }
1047 
1050  TBAAAccessInfo SrcInfo) {
1051  if (!TBAA)
1052  return TBAAAccessInfo();
1053  return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
1054 }
1055 
1056 void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
1057  TBAAAccessInfo TBAAInfo) {
1058  if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
1059  Inst->setMetadata(llvm::LLVMContext::MD_tbaa, Tag);
1060 }
1061 
1063  llvm::Instruction *I, const CXXRecordDecl *RD) {
1064  I->setMetadata(llvm::LLVMContext::MD_invariant_group,
1065  llvm::MDNode::get(getLLVMContext(), {}));
1066 }
1067 
1068 void CodeGenModule::Error(SourceLocation loc, StringRef message) {
1069  unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
1070  getDiags().Report(Context.getFullLoc(loc), diagID) << message;
1071 }
1072 
1073 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1074 /// specified stmt yet.
1075 void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
1077  "cannot compile this %0 yet");
1078  std::string Msg = Type;
1079  getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
1080  << Msg << S->getSourceRange();
1081 }
1082 
1083 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1084 /// specified decl yet.
1085 void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
1087  "cannot compile this %0 yet");
1088  std::string Msg = Type;
1089  getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
1090 }
1091 
1092 llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
1093  return llvm::ConstantInt::get(SizeTy, size.getQuantity());
1094 }
1095 
1096 void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
1097  const NamedDecl *D) const {
1098  // Internal definitions always have default visibility.
1099  if (GV->hasLocalLinkage()) {
1100  GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1101  return;
1102  }
1103  if (!D)
1104  return;
1105  // Set visibility for definitions, and for declarations if requested globally
1106  // or set explicitly.
1108  if (GV->hasDLLExportStorageClass() || GV->hasDLLImportStorageClass()) {
1109  // Reject incompatible dlllstorage and visibility annotations.
1110  if (!LV.isVisibilityExplicit())
1111  return;
1112  if (GV->hasDLLExportStorageClass()) {
1113  if (LV.getVisibility() == HiddenVisibility)
1114  getDiags().Report(D->getLocation(),
1115  diag::err_hidden_visibility_dllexport);
1116  } else if (LV.getVisibility() != DefaultVisibility) {
1117  getDiags().Report(D->getLocation(),
1118  diag::err_non_default_visibility_dllimport);
1119  }
1120  return;
1121  }
1122 
1123  if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
1124  !GV->isDeclarationForLinker())
1125  GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
1126 }
1127 
1128 static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
1129  llvm::GlobalValue *GV) {
1130  if (GV->hasLocalLinkage())
1131  return true;
1132 
1133  if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
1134  return true;
1135 
1136  // DLLImport explicitly marks the GV as external.
1137  if (GV->hasDLLImportStorageClass())
1138  return false;
1139 
1140  const llvm::Triple &TT = CGM.getTriple();
1141  if (TT.isWindowsGNUEnvironment()) {
1142  // In MinGW, variables without DLLImport can still be automatically
1143  // imported from a DLL by the linker; don't mark variables that
1144  // potentially could come from another DLL as DSO local.
1145 
1146  // With EmulatedTLS, TLS variables can be autoimported from other DLLs
1147  // (and this actually happens in the public interface of libstdc++), so
1148  // such variables can't be marked as DSO local. (Native TLS variables
1149  // can't be dllimported at all, though.)
1150  if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
1151  (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS))
1152  return false;
1153  }
1154 
1155  // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
1156  // remain unresolved in the link, they can be resolved to zero, which is
1157  // outside the current DSO.
1158  if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
1159  return false;
1160 
1161  // Every other GV is local on COFF.
1162  // Make an exception for windows OS in the triple: Some firmware builds use
1163  // *-win32-macho triples. This (accidentally?) produced windows relocations
1164  // without GOT tables in older clang versions; Keep this behaviour.
1165  // FIXME: even thread local variables?
1166  if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
1167  return true;
1168 
1169  // Only handle COFF and ELF for now.
1170  if (!TT.isOSBinFormatELF())
1171  return false;
1172 
1173  // If this is not an executable, don't assume anything is local.
1174  const auto &CGOpts = CGM.getCodeGenOpts();
1175  llvm::Reloc::Model RM = CGOpts.RelocationModel;
1176  const auto &LOpts = CGM.getLangOpts();
1177  if (RM != llvm::Reloc::Static && !LOpts.PIE) {
1178  // On ELF, if -fno-semantic-interposition is specified and the target
1179  // supports local aliases, there will be neither CC1
1180  // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
1181  // dso_local on the function if using a local alias is preferable (can avoid
1182  // PLT indirection).
1183  if (!(isa<llvm::Function>(GV) && GV->canBenefitFromLocalAlias()))
1184  return false;
1185  return !(CGM.getLangOpts().SemanticInterposition ||
1186  CGM.getLangOpts().HalfNoSemanticInterposition);
1187  }
1188 
1189  // A definition cannot be preempted from an executable.
1190  if (!GV->isDeclarationForLinker())
1191  return true;
1192 
1193  // Most PIC code sequences that assume that a symbol is local cannot produce a
1194  // 0 if it turns out the symbol is undefined. While this is ABI and relocation
1195  // depended, it seems worth it to handle it here.
1196  if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
1197  return false;
1198 
1199  // PowerPC64 prefers TOC indirection to avoid copy relocations.
1200  if (TT.isPPC64())
1201  return false;
1202 
1203  if (CGOpts.DirectAccessExternalData) {
1204  // If -fdirect-access-external-data (default for -fno-pic), set dso_local
1205  // for non-thread-local variables. If the symbol is not defined in the
1206  // executable, a copy relocation will be needed at link time. dso_local is
1207  // excluded for thread-local variables because they generally don't support
1208  // copy relocations.
1209  if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
1210  if (!Var->isThreadLocal())
1211  return true;
1212 
1213  // -fno-pic sets dso_local on a function declaration to allow direct
1214  // accesses when taking its address (similar to a data symbol). If the
1215  // function is not defined in the executable, a canonical PLT entry will be
1216  // needed at link time. -fno-direct-access-external-data can avoid the
1217  // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
1218  // it could just cause trouble without providing perceptible benefits.
1219  if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
1220  return true;
1221  }
1222 
1223  // If we can use copy relocations we can assume it is local.
1224 
1225  // Otherwise don't assume it is local.
1226  return false;
1227 }
1228 
1229 void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
1230  GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
1231 }
1232 
1233 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1234  GlobalDecl GD) const {
1235  const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
1236  // C++ destructors have a few C++ ABI specific special cases.
1237  if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
1239  return;
1240  }
1241  setDLLImportDLLExport(GV, D);
1242 }
1243 
1244 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1245  const NamedDecl *D) const {
1246  if (D && D->isExternallyVisible()) {
1247  if (D->hasAttr<DLLImportAttr>())
1248  GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
1249  else if ((D->hasAttr<DLLExportAttr>() ||
1251  !GV->isDeclarationForLinker())
1252  GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
1253  }
1254 }
1255 
1256 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1257  GlobalDecl GD) const {
1258  setDLLImportDLLExport(GV, GD);
1259  setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
1260 }
1261 
1262 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1263  const NamedDecl *D) const {
1264  setDLLImportDLLExport(GV, D);
1265  setGVPropertiesAux(GV, D);
1266 }
1267 
1268 void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
1269  const NamedDecl *D) const {
1270  setGlobalVisibility(GV, D);
1271  setDSOLocal(GV);
1272  GV->setPartition(CodeGenOpts.SymbolPartition);
1273 }
1274 
1275 static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
1276  return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
1277  .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
1278  .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
1279  .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
1280  .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
1281 }
1282 
1283 llvm::GlobalVariable::ThreadLocalMode
1285  switch (CodeGenOpts.getDefaultTLSModel()) {
1287  return llvm::GlobalVariable::GeneralDynamicTLSModel;
1289  return llvm::GlobalVariable::LocalDynamicTLSModel;
1291  return llvm::GlobalVariable::InitialExecTLSModel;
1293  return llvm::GlobalVariable::LocalExecTLSModel;
1294  }
1295  llvm_unreachable("Invalid TLS model!");
1296 }
1297 
1298 void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
1299  assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
1300 
1301  llvm::GlobalValue::ThreadLocalMode TLM;
1302  TLM = GetDefaultLLVMTLSModel();
1303 
1304  // Override the TLS model if it is explicitly specified.
1305  if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
1306  TLM = GetLLVMTLSModel(Attr->getModel());
1307  }
1308 
1309  GV->setThreadLocalMode(TLM);
1310 }
1311 
1313  StringRef Name) {
1314  const TargetInfo &Target = CGM.getTarget();
1315  return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
1316 }
1317 
1319  const CPUSpecificAttr *Attr,
1320  unsigned CPUIndex,
1321  raw_ostream &Out) {
1322  // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
1323  // supported.
1324  if (Attr)
1325  Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
1326  else if (CGM.getTarget().supportsIFunc())
1327  Out << ".resolver";
1328 }
1329 
1330 static void AppendTargetMangling(const CodeGenModule &CGM,
1331  const TargetAttr *Attr, raw_ostream &Out) {
1332  if (Attr->isDefaultVersion())
1333  return;
1334 
1335  Out << '.';
1336  const TargetInfo &Target = CGM.getTarget();
1337  ParsedTargetAttr Info = Target.parseTargetAttr(Attr->getFeaturesStr());
1338  llvm::sort(Info.Features, [&Target](StringRef LHS, StringRef RHS) {
1339  // Multiversioning doesn't allow "no-${feature}", so we can
1340  // only have "+" prefixes here.
1341  assert(LHS.startswith("+") && RHS.startswith("+") &&
1342  "Features should always have a prefix.");
1343  return Target.multiVersionSortPriority(LHS.substr(1)) >
1344  Target.multiVersionSortPriority(RHS.substr(1));
1345  });
1346 
1347  bool IsFirst = true;
1348 
1349  if (!Info.CPU.empty()) {
1350  IsFirst = false;
1351  Out << "arch_" << Info.CPU;
1352  }
1353 
1354  for (StringRef Feat : Info.Features) {
1355  if (!IsFirst)
1356  Out << '_';
1357  IsFirst = false;
1358  Out << Feat.substr(1);
1359  }
1360 }
1361 
1362 // Returns true if GD is a function decl with internal linkage and
1363 // needs a unique suffix after the mangled name.
1365  CodeGenModule &CGM) {
1366  const Decl *D = GD.getDecl();
1367  return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(D) &&
1369 }
1370 
1372  const TargetClonesAttr *Attr,
1373  unsigned VersionIndex,
1374  raw_ostream &Out) {
1375  Out << '.';
1376  StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
1377  if (FeatureStr.startswith("arch="))
1378  Out << "arch_" << FeatureStr.substr(sizeof("arch=") - 1);
1379  else
1380  Out << FeatureStr;
1381 
1382  Out << '.' << Attr->getMangledIndex(VersionIndex);
1383 }
1384 
1386  const NamedDecl *ND,
1387  bool OmitMultiVersionMangling = false) {
1388  SmallString<256> Buffer;
1389  llvm::raw_svector_ostream Out(Buffer);
1391  if (!CGM.getModuleNameHash().empty())
1393  bool ShouldMangle = MC.shouldMangleDeclName(ND);
1394  if (ShouldMangle)
1395  MC.mangleName(GD.getWithDecl(ND), Out);
1396  else {
1397  IdentifierInfo *II = ND->getIdentifier();
1398  assert(II && "Attempt to mangle unnamed decl.");
1399  const auto *FD = dyn_cast<FunctionDecl>(ND);
1400 
1401  if (FD &&
1402  FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
1403  Out << "__regcall3__" << II->getName();
1404  } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
1406  Out << "__device_stub__" << II->getName();
1407  } else {
1408  Out << II->getName();
1409  }
1410  }
1411 
1412  // Check if the module name hash should be appended for internal linkage
1413  // symbols. This should come before multi-version target suffixes are
1414  // appended. This is to keep the name and module hash suffix of the
1415  // internal linkage function together. The unique suffix should only be
1416  // added when name mangling is done to make sure that the final name can
1417  // be properly demangled. For example, for C functions without prototypes,
1418  // name mangling is not done and the unique suffix should not be appeneded
1419  // then.
1420  if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
1421  assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&
1422  "Hash computed when not explicitly requested");
1423  Out << CGM.getModuleNameHash();
1424  }
1425 
1426  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
1427  if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
1428  switch (FD->getMultiVersionKind()) {
1432  FD->getAttr<CPUSpecificAttr>(),
1433  GD.getMultiVersionIndex(), Out);
1434  break;
1436  AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
1437  break;
1439  AppendTargetClonesMangling(CGM, FD->getAttr<TargetClonesAttr>(),
1440  GD.getMultiVersionIndex(), Out);
1441  break;
1443  llvm_unreachable("None multiversion type isn't valid here");
1444  }
1445  }
1446 
1447  // Make unique name for device side static file-scope variable for HIP.
1448  if (CGM.getContext().shouldExternalize(ND) &&
1449  CGM.getLangOpts().GPURelocatableDeviceCode &&
1450  CGM.getLangOpts().CUDAIsDevice)
1451  CGM.printPostfixForExternalizedDecl(Out, ND);
1452 
1453  return std::string(Out.str());
1454 }
1455 
1456 void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
1457  const FunctionDecl *FD,
1458  StringRef &CurName) {
1459  if (!FD->isMultiVersion())
1460  return;
1461 
1462  // Get the name of what this would be without the 'target' attribute. This
1463  // allows us to lookup the version that was emitted when this wasn't a
1464  // multiversion function.
1465  std::string NonTargetName =
1466  getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
1467  GlobalDecl OtherGD;
1468  if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
1469  assert(OtherGD.getCanonicalDecl()
1470  .getDecl()
1471  ->getAsFunction()
1472  ->isMultiVersion() &&
1473  "Other GD should now be a multiversioned function");
1474  // OtherFD is the version of this function that was mangled BEFORE
1475  // becoming a MultiVersion function. It potentially needs to be updated.
1476  const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
1477  .getDecl()
1478  ->getAsFunction()
1479  ->getMostRecentDecl();
1480  std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
1481  // This is so that if the initial version was already the 'default'
1482  // version, we don't try to update it.
1483  if (OtherName != NonTargetName) {
1484  // Remove instead of erase, since others may have stored the StringRef
1485  // to this.
1486  const auto ExistingRecord = Manglings.find(NonTargetName);
1487  if (ExistingRecord != std::end(Manglings))
1488  Manglings.remove(&(*ExistingRecord));
1489  auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
1490  StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
1491  Result.first->first();
1492  // If this is the current decl is being created, make sure we update the name.
1493  if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
1494  CurName = OtherNameRef;
1495  if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
1496  Entry->setName(OtherName);
1497  }
1498  }
1499 }
1500 
1502  GlobalDecl CanonicalGD = GD.getCanonicalDecl();
1503 
1504  // Some ABIs don't have constructor variants. Make sure that base and
1505  // complete constructors get mangled the same.
1506  if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
1507  if (!getTarget().getCXXABI().hasConstructorVariants()) {
1508  CXXCtorType OrigCtorType = GD.getCtorType();
1509  assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
1510  if (OrigCtorType == Ctor_Base)
1511  CanonicalGD = GlobalDecl(CD, Ctor_Complete);
1512  }
1513  }
1514 
1515  // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
1516  // static device variable depends on whether the variable is referenced by
1517  // a host or device host function. Therefore the mangled name cannot be
1518  // cached.
1519  if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(GD.getDecl())) {
1520  auto FoundName = MangledDeclNames.find(CanonicalGD);
1521  if (FoundName != MangledDeclNames.end())
1522  return FoundName->second;
1523  }
1524 
1525  // Keep the first result in the case of a mangling collision.
1526  const auto *ND = cast<NamedDecl>(GD.getDecl());
1527  std::string MangledName = getMangledNameImpl(*this, GD, ND);
1528 
1529  // Ensure either we have different ABIs between host and device compilations,
1530  // says host compilation following MSVC ABI but device compilation follows
1531  // Itanium C++ ABI or, if they follow the same ABI, kernel names after
1532  // mangling should be the same after name stubbing. The later checking is
1533  // very important as the device kernel name being mangled in host-compilation
1534  // is used to resolve the device binaries to be executed. Inconsistent naming
1535  // result in undefined behavior. Even though we cannot check that naming
1536  // directly between host- and device-compilations, the host- and
1537  // device-mangling in host compilation could help catching certain ones.
1538  assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
1539  getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice ||
1540  (getContext().getAuxTargetInfo() &&
1541  (getContext().getAuxTargetInfo()->getCXXABI() !=
1542  getContext().getTargetInfo().getCXXABI())) ||
1543  getCUDARuntime().getDeviceSideName(ND) ==
1545  *this,
1547  ND));
1548 
1549  auto Result = Manglings.insert(std::make_pair(MangledName, GD));
1550  return MangledDeclNames[CanonicalGD] = Result.first->first();
1551 }
1552 
1554  const BlockDecl *BD) {
1555  MangleContext &MangleCtx = getCXXABI().getMangleContext();
1556  const Decl *D = GD.getDecl();
1557 
1558  SmallString<256> Buffer;
1559  llvm::raw_svector_ostream Out(Buffer);
1560  if (!D)
1561  MangleCtx.mangleGlobalBlock(BD,
1562  dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
1563  else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
1564  MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
1565  else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
1566  MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
1567  else
1568  MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
1569 
1570  auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
1571  return Result.first->first();
1572 }
1573 
1575  auto it = MangledDeclNames.begin();
1576  while (it != MangledDeclNames.end()) {
1577  if (it->second == Name)
1578  return it->first;
1579  it++;
1580  }
1581  return GlobalDecl();
1582 }
1583 
1584 llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
1585  return getModule().getNamedValue(Name);
1586 }
1587 
1588 /// AddGlobalCtor - Add a function to the list that will be called before
1589 /// main() runs.
1590 void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
1591  unsigned LexOrder,
1592  llvm::Constant *AssociatedData) {
1593  // FIXME: Type coercion of void()* types.
1594  GlobalCtors.push_back(Structor(Priority, LexOrder, Ctor, AssociatedData));
1595 }
1596 
1597 /// AddGlobalDtor - Add a function to the list that will be called
1598 /// when the module is unloaded.
1599 void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
1600  bool IsDtorAttrFunc) {
1601  if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
1602  (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
1603  DtorsUsingAtExit[Priority].push_back(Dtor);
1604  return;
1605  }
1606 
1607  // FIXME: Type coercion of void()* types.
1608  GlobalDtors.push_back(Structor(Priority, ~0U, Dtor, nullptr));
1609 }
1610 
1611 void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
1612  if (Fns.empty()) return;
1613 
1614  // Ctor function type is void()*.
1615  llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
1616  llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
1617  TheModule.getDataLayout().getProgramAddressSpace());
1618 
1619  // Get the type of a ctor entry, { i32, void ()*, i8* }.
1620  llvm::StructType *CtorStructTy = llvm::StructType::get(
1621  Int32Ty, CtorPFTy, VoidPtrTy);
1622 
1623  // Construct the constructor and destructor arrays.
1624  ConstantInitBuilder builder(*this);
1625  auto ctors = builder.beginArray(CtorStructTy);
1626  for (const auto &I : Fns) {
1627  auto ctor = ctors.beginStruct(CtorStructTy);
1628  ctor.addInt(Int32Ty, I.Priority);
1629  ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
1630  if (I.AssociatedData)
1631  ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
1632  else
1633  ctor.addNullPointer(VoidPtrTy);
1634  ctor.finishAndAddTo(ctors);
1635  }
1636 
1637  auto list =
1638  ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
1639  /*constant*/ false,
1640  llvm::GlobalValue::AppendingLinkage);
1641 
1642  // The LTO linker doesn't seem to like it when we set an alignment
1643  // on appending variables. Take it off as a workaround.
1644  list->setAlignment(llvm::None);
1645 
1646  Fns.clear();
1647 }
1648 
1649 llvm::GlobalValue::LinkageTypes
1651  const auto *D = cast<FunctionDecl>(GD.getDecl());
1652 
1654 
1655  if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
1656  return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
1657 
1658  if (isa<CXXConstructorDecl>(D) &&
1659  cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
1660  Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1661  // Our approach to inheriting constructors is fundamentally different from
1662  // that used by the MS ABI, so keep our inheriting constructor thunks
1663  // internal rather than trying to pick an unambiguous mangling for them.
1665  }
1666 
1667  return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
1668 }
1669 
1670 llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
1671  llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
1672  if (!MDS) return nullptr;
1673 
1674  return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
1675 }
1676 
1678  if (auto *FnType = T->getAs<FunctionProtoType>())
1680  FnType->getReturnType(), FnType->getParamTypes(),
1681  FnType->getExtProtoInfo().withExceptionSpec(EST_None));
1682 
1683  std::string OutName;
1684  llvm::raw_string_ostream Out(OutName);
1686 
1687  return llvm::ConstantInt::get(Int32Ty,
1688  static_cast<uint32_t>(llvm::xxHash64(OutName)));
1689 }
1690 
1692  const CGFunctionInfo &Info,
1693  llvm::Function *F, bool IsThunk) {
1694  unsigned CallingConv;
1695  llvm::AttributeList PAL;
1696  ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv,
1697  /*AttrOnCallSite=*/false, IsThunk);
1698  F->setAttributes(PAL);
1699  F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1700 }
1701 
1703  std::string ReadOnlyQual("__read_only");
1704  std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
1705  if (ReadOnlyPos != std::string::npos)
1706  // "+ 1" for the space after access qualifier.
1707  TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
1708  else {
1709  std::string WriteOnlyQual("__write_only");
1710  std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
1711  if (WriteOnlyPos != std::string::npos)
1712  TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
1713  else {
1714  std::string ReadWriteQual("__read_write");
1715  std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
1716  if (ReadWritePos != std::string::npos)
1717  TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
1718  }
1719  }
1720 }
1721 
1722 // Returns the address space id that should be produced to the
1723 // kernel_arg_addr_space metadata. This is always fixed to the ids
1724 // as specified in the SPIR 2.0 specification in order to differentiate
1725 // for example in clGetKernelArgInfo() implementation between the address
1726 // spaces with targets without unique mapping to the OpenCL address spaces
1727 // (basically all single AS CPUs).
1728 static unsigned ArgInfoAddressSpace(LangAS AS) {
1729  switch (AS) {
1730  case LangAS::opencl_global:
1731  return 1;
1733  return 2;
1734  case LangAS::opencl_local:
1735  return 3;
1737  return 4; // Not in SPIR 2.0 specs.
1739  return 5;
1741  return 6;
1742  default:
1743  return 0; // Assume private.
1744  }
1745 }
1746 
1747 void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn,
1748  const FunctionDecl *FD,
1749  CodeGenFunction *CGF) {
1750  assert(((FD && CGF) || (!FD && !CGF)) &&
1751  "Incorrect use - FD and CGF should either be both null or not!");
1752  // Create MDNodes that represent the kernel arg metadata.
1753  // Each MDNode is a list in the form of "key", N number of values which is
1754  // the same number of values as their are kernel arguments.
1755 
1756  const PrintingPolicy &Policy = Context.getPrintingPolicy();
1757 
1758  // MDNode for the kernel argument address space qualifiers.
1759  SmallVector<llvm::Metadata *, 8> addressQuals;
1760 
1761  // MDNode for the kernel argument access qualifiers (images only).
1763 
1764  // MDNode for the kernel argument type names.
1765  SmallVector<llvm::Metadata *, 8> argTypeNames;
1766 
1767  // MDNode for the kernel argument base type names.
1768  SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
1769 
1770  // MDNode for the kernel argument type qualifiers.
1771  SmallVector<llvm::Metadata *, 8> argTypeQuals;
1772 
1773  // MDNode for the kernel argument names.
1775 
1776  if (FD && CGF)
1777  for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
1778  const ParmVarDecl *parm = FD->getParamDecl(i);
1779  // Get argument name.
1780  argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
1781 
1782  if (!getLangOpts().OpenCL)
1783  continue;
1784  QualType ty = parm->getType();
1785  std::string typeQuals;
1786 
1787  // Get image and pipe access qualifier:
1788  if (ty->isImageType() || ty->isPipeType()) {
1789  const Decl *PDecl = parm;
1790  if (const auto *TD = ty->getAs<TypedefType>())
1791  PDecl = TD->getDecl();
1792  const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
1793  if (A && A->isWriteOnly())
1794  accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
1795  else if (A && A->isReadWrite())
1796  accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
1797  else
1798  accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
1799  } else
1800  accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
1801 
1802  auto getTypeSpelling = [&](QualType Ty) {
1803  auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
1804 
1805  if (Ty.isCanonical()) {
1806  StringRef typeNameRef = typeName;
1807  // Turn "unsigned type" to "utype"
1808  if (typeNameRef.consume_front("unsigned "))
1809  return std::string("u") + typeNameRef.str();
1810  if (typeNameRef.consume_front("signed "))
1811  return typeNameRef.str();
1812  }
1813 
1814  return typeName;
1815  };
1816 
1817  if (ty->isPointerType()) {
1818  QualType pointeeTy = ty->getPointeeType();
1819 
1820  // Get address qualifier.
1821  addressQuals.push_back(
1822  llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
1823  ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
1824 
1825  // Get argument type name.
1826  std::string typeName = getTypeSpelling(pointeeTy) + "*";
1827  std::string baseTypeName =
1828  getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
1829  argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1830  argBaseTypeNames.push_back(
1831  llvm::MDString::get(VMContext, baseTypeName));
1832 
1833  // Get argument type qualifiers:
1834  if (ty.isRestrictQualified())
1835  typeQuals = "restrict";
1836  if (pointeeTy.isConstQualified() ||
1837  (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
1838  typeQuals += typeQuals.empty() ? "const" : " const";
1839  if (pointeeTy.isVolatileQualified())
1840  typeQuals += typeQuals.empty() ? "volatile" : " volatile";
1841  } else {
1842  uint32_t AddrSpc = 0;
1843  bool isPipe = ty->isPipeType();
1844  if (ty->isImageType() || isPipe)
1846 
1847  addressQuals.push_back(
1848  llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
1849 
1850  // Get argument type name.
1851  ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
1852  std::string typeName = getTypeSpelling(ty);
1853  std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
1854 
1855  // Remove access qualifiers on images
1856  // (as they are inseparable from type in clang implementation,
1857  // but OpenCL spec provides a special query to get access qualifier
1858  // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
1859  if (ty->isImageType()) {
1860  removeImageAccessQualifier(typeName);
1861  removeImageAccessQualifier(baseTypeName);
1862  }
1863 
1864  argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1865  argBaseTypeNames.push_back(
1866  llvm::MDString::get(VMContext, baseTypeName));
1867 
1868  if (isPipe)
1869  typeQuals = "pipe";
1870  }
1871  argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
1872  }
1873 
1874  if (getLangOpts().OpenCL) {
1875  Fn->setMetadata("kernel_arg_addr_space",
1876  llvm::MDNode::get(VMContext, addressQuals));
1877  Fn->setMetadata("kernel_arg_access_qual",
1878  llvm::MDNode::get(VMContext, accessQuals));
1879  Fn->setMetadata("kernel_arg_type",
1880  llvm::MDNode::get(VMContext, argTypeNames));
1881  Fn->setMetadata("kernel_arg_base_type",
1882  llvm::MDNode::get(VMContext, argBaseTypeNames));
1883  Fn->setMetadata("kernel_arg_type_qual",
1884  llvm::MDNode::get(VMContext, argTypeQuals));
1885  }
1886  if (getCodeGenOpts().EmitOpenCLArgMetadata ||
1887  getCodeGenOpts().HIPSaveKernelArgName)
1888  Fn->setMetadata("kernel_arg_name",
1889  llvm::MDNode::get(VMContext, argNames));
1890 }
1891 
1892 /// Determines whether the language options require us to model
1893 /// unwind exceptions. We treat -fexceptions as mandating this
1894 /// except under the fragile ObjC ABI with only ObjC exceptions
1895 /// enabled. This means, for example, that C with -fexceptions
1896 /// enables this.
1897 static bool hasUnwindExceptions(const LangOptions &LangOpts) {
1898  // If exceptions are completely disabled, obviously this is false.
1899  if (!LangOpts.Exceptions) return false;
1900 
1901  // If C++ exceptions are enabled, this is true.
1902  if (LangOpts.CXXExceptions) return true;
1903 
1904  // If ObjC exceptions are enabled, this depends on the ABI.
1905  if (LangOpts.ObjCExceptions) {
1906  return LangOpts.ObjCRuntime.hasUnwindExceptions();
1907  }
1908 
1909  return true;
1910 }
1911 
1913  const CXXMethodDecl *MD) {
1914  // Check that the type metadata can ever actually be used by a call.
1915  if (!CGM.getCodeGenOpts().LTOUnit ||
1916  !CGM.HasHiddenLTOVisibility(MD->getParent()))
1917  return false;
1918 
1919  // Only functions whose address can be taken with a member function pointer
1920  // need this sort of type metadata.
1921  return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
1922  !isa<CXXDestructorDecl>(MD);
1923 }
1924 
1925 std::vector<const CXXRecordDecl *>
1927  llvm::SetVector<const CXXRecordDecl *> MostBases;
1928 
1929  std::function<void (const CXXRecordDecl *)> CollectMostBases;
1930  CollectMostBases = [&](const CXXRecordDecl *RD) {
1931  if (RD->getNumBases() == 0)
1932  MostBases.insert(RD);
1933  for (const CXXBaseSpecifier &B : RD->bases())
1934  CollectMostBases(B.getType()->getAsCXXRecordDecl());
1935  };
1936  CollectMostBases(RD);
1937  return MostBases.takeVector();
1938 }
1939 
1940 llvm::GlobalVariable *
1942  auto It = RTTIProxyMap.find(Addr);
1943  if (It != RTTIProxyMap.end())
1944  return It->second;
1945 
1946  auto *FTRTTIProxy = new llvm::GlobalVariable(
1947  TheModule, Addr->getType(),
1948  /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Addr,
1949  "__llvm_rtti_proxy");
1950  FTRTTIProxy->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1951 
1952  RTTIProxyMap[Addr] = FTRTTIProxy;
1953  return FTRTTIProxy;
1954 }
1955 
1957  llvm::Function *F) {
1958  llvm::AttrBuilder B(F->getContext());
1959 
1960  if ((!D || !D->hasAttr<NoUwtableAttr>()) && CodeGenOpts.UnwindTables)
1961  B.addUWTableAttr(llvm::UWTableKind(CodeGenOpts.UnwindTables));
1962 
1963  if (CodeGenOpts.StackClashProtector)
1964  B.addAttribute("probe-stack", "inline-asm");
1965 
1966  if (!hasUnwindExceptions(LangOpts))
1967  B.addAttribute(llvm::Attribute::NoUnwind);
1968 
1969  if (D && D->hasAttr<NoStackProtectorAttr>())
1970  ; // Do nothing.
1971  else if (D && D->hasAttr<StrictGuardStackCheckAttr>() &&
1972  LangOpts.getStackProtector() == LangOptions::SSPOn)
1973  B.addAttribute(llvm::Attribute::StackProtectStrong);
1974  else if (LangOpts.getStackProtector() == LangOptions::SSPOn)
1975  B.addAttribute(llvm::Attribute::StackProtect);
1976  else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
1977  B.addAttribute(llvm::Attribute::StackProtectStrong);
1978  else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
1979  B.addAttribute(llvm::Attribute::StackProtectReq);
1980 
1981  if (!D) {
1982  // If we don't have a declaration to control inlining, the function isn't
1983  // explicitly marked as alwaysinline for semantic reasons, and inlining is
1984  // disabled, mark the function as noinline.
1985  if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
1986  CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
1987  B.addAttribute(llvm::Attribute::NoInline);
1988 
1989  F->addFnAttrs(B);
1990  return;
1991  }
1992 
1993  // Track whether we need to add the optnone LLVM attribute,
1994  // starting with the default for this optimization level.
1995  bool ShouldAddOptNone =
1996  !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
1997  // We can't add optnone in the following cases, it won't pass the verifier.
1998  ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
1999  ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
2000 
2001  // Add optnone, but do so only if the function isn't always_inline.
2002  if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
2003  !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
2004  B.addAttribute(llvm::Attribute::OptimizeNone);
2005 
2006  // OptimizeNone implies noinline; we should not be inlining such functions.
2007  B.addAttribute(llvm::Attribute::NoInline);
2008 
2009  // We still need to handle naked functions even though optnone subsumes
2010  // much of their semantics.
2011  if (D->hasAttr<NakedAttr>())
2012  B.addAttribute(llvm::Attribute::Naked);
2013 
2014  // OptimizeNone wins over OptimizeForSize and MinSize.
2015  F->removeFnAttr(llvm::Attribute::OptimizeForSize);
2016  F->removeFnAttr(llvm::Attribute::MinSize);
2017  } else if (D->hasAttr<NakedAttr>()) {
2018  // Naked implies noinline: we should not be inlining such functions.
2019  B.addAttribute(llvm::Attribute::Naked);
2020  B.addAttribute(llvm::Attribute::NoInline);
2021  } else if (D->hasAttr<NoDuplicateAttr>()) {
2022  B.addAttribute(llvm::Attribute::NoDuplicate);
2023  } else if (D->hasAttr<NoInlineAttr>() && !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
2024  // Add noinline if the function isn't always_inline.
2025  B.addAttribute(llvm::Attribute::NoInline);
2026  } else if (D->hasAttr<AlwaysInlineAttr>() &&
2027  !F->hasFnAttribute(llvm::Attribute::NoInline)) {
2028  // (noinline wins over always_inline, and we can't specify both in IR)
2029  B.addAttribute(llvm::Attribute::AlwaysInline);
2030  } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
2031  // If we're not inlining, then force everything that isn't always_inline to
2032  // carry an explicit noinline attribute.
2033  if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
2034  B.addAttribute(llvm::Attribute::NoInline);
2035  } else {
2036  // Otherwise, propagate the inline hint attribute and potentially use its
2037  // absence to mark things as noinline.
2038  if (auto *FD = dyn_cast<FunctionDecl>(D)) {
2039  // Search function and template pattern redeclarations for inline.
2040  auto CheckForInline = [](const FunctionDecl *FD) {
2041  auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
2042  return Redecl->isInlineSpecified();
2043  };
2044  if (any_of(FD->redecls(), CheckRedeclForInline))
2045  return true;
2046  const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
2047  if (!Pattern)
2048  return false;
2049  return any_of(Pattern->redecls(), CheckRedeclForInline);
2050  };
2051  if (CheckForInline(FD)) {
2052  B.addAttribute(llvm::Attribute::InlineHint);
2053  } else if (CodeGenOpts.getInlining() ==
2055  !FD->isInlined() &&
2056  !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
2057  B.addAttribute(llvm::Attribute::NoInline);
2058  }
2059  }
2060  }
2061 
2062  // Add other optimization related attributes if we are optimizing this
2063  // function.
2064  if (!D->hasAttr<OptimizeNoneAttr>()) {
2065  if (D->hasAttr<ColdAttr>()) {
2066  if (!ShouldAddOptNone)
2067  B.addAttribute(llvm::Attribute::OptimizeForSize);
2068  B.addAttribute(llvm::Attribute::Cold);
2069  }
2070  if (D->hasAttr<HotAttr>())
2071  B.addAttribute(llvm::Attribute::Hot);
2072  if (D->hasAttr<MinSizeAttr>())
2073  B.addAttribute(llvm::Attribute::MinSize);
2074  }
2075 
2076  F->addFnAttrs(B);
2077 
2078  unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
2079  if (alignment)
2080  F->setAlignment(llvm::Align(alignment));
2081 
2082  if (!D->hasAttr<AlignedAttr>())
2083  if (LangOpts.FunctionAlignment)
2084  F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment));
2085 
2086  // Some C++ ABIs require 2-byte alignment for member functions, in order to
2087  // reserve a bit for differentiating between virtual and non-virtual member
2088  // functions. If the current target's C++ ABI requires this and this is a
2089  // member function, set its alignment accordingly.
2090  if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
2091  if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
2092  F->setAlignment(llvm::Align(2));
2093  }
2094 
2095  // In the cross-dso CFI mode with canonical jump tables, we want !type
2096  // attributes on definitions only.
2097  if (CodeGenOpts.SanitizeCfiCrossDso &&
2098  CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
2099  if (auto *FD = dyn_cast<FunctionDecl>(D)) {
2100  // Skip available_externally functions. They won't be codegen'ed in the
2101  // current module anyway.
2102  if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
2104  }
2105  }
2106 
2107  // Emit type metadata on member functions for member function pointer checks.
2108  // These are only ever necessary on definitions; we're guaranteed that the
2109  // definition will be present in the LTO unit as a result of LTO visibility.
2110  auto *MD = dyn_cast<CXXMethodDecl>(D);
2111  if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
2112  for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
2113  llvm::Metadata *Id =
2115  MD->getType(), Context.getRecordType(Base).getTypePtr()));
2116  F->addTypeMetadata(0, Id);
2117  }
2118  }
2119 }
2120 
2122  llvm::Function *F) {
2123  if (D->hasAttr<StrictFPAttr>()) {
2124  llvm::AttrBuilder FuncAttrs(F->getContext());
2125  FuncAttrs.addAttribute("strictfp");
2126  F->addFnAttrs(FuncAttrs);
2127  }
2128 }
2129 
2130 void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
2131  const Decl *D = GD.getDecl();
2132  if (isa_and_nonnull<NamedDecl>(D))
2133  setGVProperties(GV, GD);
2134  else
2135  GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
2136 
2137  if (D && D->hasAttr<UsedAttr>())
2139 
2140  if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
2141  const auto *VD = cast<VarDecl>(D);
2142  if (VD->getType().isConstQualified() &&
2143  VD->getStorageDuration() == SD_Static)
2145  }
2146 }
2147 
2148 bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
2149  llvm::AttrBuilder &Attrs) {
2150  // Add target-cpu and target-features attributes to functions. If
2151  // we have a decl for the function and it has a target attribute then
2152  // parse that and add it to the feature set.
2153  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
2154  StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
2155  std::vector<std::string> Features;
2156  const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
2157  FD = FD ? FD->getMostRecentDecl() : FD;
2158  const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
2159  const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
2160  const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr;
2161  bool AddedAttr = false;
2162  if (TD || SD || TC) {
2163  llvm::StringMap<bool> FeatureMap;
2164  getContext().getFunctionFeatureMap(FeatureMap, GD);
2165 
2166  // Produce the canonical string for this set of features.
2167  for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
2168  Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
2169 
2170  // Now add the target-cpu and target-features to the function.
2171  // While we populated the feature map above, we still need to
2172  // get and parse the target attribute so we can get the cpu for
2173  // the function.
2174  if (TD) {
2176  Target.parseTargetAttr(TD->getFeaturesStr());
2177  if (!ParsedAttr.CPU.empty() &&
2178  getTarget().isValidCPUName(ParsedAttr.CPU)) {
2179  TargetCPU = ParsedAttr.CPU;
2180  TuneCPU = ""; // Clear the tune CPU.
2181  }
2182  if (!ParsedAttr.Tune.empty() &&
2183  getTarget().isValidCPUName(ParsedAttr.Tune))
2184  TuneCPU = ParsedAttr.Tune;
2185  }
2186 
2187  if (SD) {
2188  // Apply the given CPU name as the 'tune-cpu' so that the optimizer can
2189  // favor this processor.
2190  TuneCPU = getTarget().getCPUSpecificTuneName(
2191  SD->getCPUName(GD.getMultiVersionIndex())->getName());
2192  }
2193  } else {
2194  // Otherwise just add the existing target cpu and target features to the
2195  // function.
2196  Features = getTarget().getTargetOpts().Features;
2197  }
2198 
2199  if (!TargetCPU.empty()) {
2200  Attrs.addAttribute("target-cpu", TargetCPU);
2201  AddedAttr = true;
2202  }
2203  if (!TuneCPU.empty()) {
2204  Attrs.addAttribute("tune-cpu", TuneCPU);
2205  AddedAttr = true;
2206  }
2207  if (!Features.empty()) {
2208  llvm::sort(Features);
2209  Attrs.addAttribute("target-features", llvm::join(Features, ","));
2210  AddedAttr = true;
2211  }
2212 
2213  return AddedAttr;
2214 }
2215 
2216 void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
2217  llvm::GlobalObject *GO) {
2218  const Decl *D = GD.getDecl();
2219  SetCommonAttributes(GD, GO);
2220 
2221  if (D) {
2222  if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
2223  if (D->hasAttr<RetainAttr>())
2224  addUsedGlobal(GV);
2225  if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
2226  GV->addAttribute("bss-section", SA->getName());
2227  if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
2228  GV->addAttribute("data-section", SA->getName());
2229  if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
2230  GV->addAttribute("rodata-section", SA->getName());
2231  if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
2232  GV->addAttribute("relro-section", SA->getName());
2233  }
2234 
2235  if (auto *F = dyn_cast<llvm::Function>(GO)) {
2236  if (D->hasAttr<RetainAttr>())
2237  addUsedGlobal(F);
2238  if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
2239  if (!D->getAttr<SectionAttr>())
2240  F->addFnAttr("implicit-section-name", SA->getName());
2241 
2242  llvm::AttrBuilder Attrs(F->getContext());
2243  if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
2244  // We know that GetCPUAndFeaturesAttributes will always have the
2245  // newest set, since it has the newest possible FunctionDecl, so the
2246  // new ones should replace the old.
2247  llvm::AttributeMask RemoveAttrs;
2248  RemoveAttrs.addAttribute("target-cpu");
2249  RemoveAttrs.addAttribute("target-features");
2250  RemoveAttrs.addAttribute("tune-cpu");
2251  F->removeFnAttrs(RemoveAttrs);
2252  F->addFnAttrs(Attrs);
2253  }
2254  }
2255 
2256  if (const auto *CSA = D->getAttr<CodeSegAttr>())
2257  GO->setSection(CSA->getName());
2258  else if (const auto *SA = D->getAttr<SectionAttr>())
2259  GO->setSection(SA->getName());
2260  }
2261 
2262  getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
2263 }
2264 
2266  llvm::Function *F,
2267  const CGFunctionInfo &FI) {
2268  const Decl *D = GD.getDecl();
2269  SetLLVMFunctionAttributes(GD, FI, F, /*IsThunk=*/false);
2271 
2272  F->setLinkage(llvm::Function::InternalLinkage);
2273 
2274  setNonAliasAttributes(GD, F);
2275 }
2276 
2277 static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
2278  // Set linkage and visibility in case we never see a definition.
2280  // Don't set internal linkage on declarations.
2281  // "extern_weak" is overloaded in LLVM; we probably should have
2282  // separate linkage types for this.
2283  if (isExternallyVisible(LV.getLinkage()) &&
2284  (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
2285  GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
2286 }
2287 
2289  llvm::Function *F) {
2290  // Only if we are checking indirect calls.
2291  if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
2292  return;
2293 
2294  // Non-static class methods are handled via vtable or member function pointer
2295  // checks elsewhere.
2296  if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
2297  return;
2298 
2299  llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
2300  F->addTypeMetadata(0, MD);
2301  F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
2302 
2303  // Emit a hash-based bit set entry for cross-DSO calls.
2304  if (CodeGenOpts.SanitizeCfiCrossDso)
2305  if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
2306  F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
2307 }
2308 
2309 void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
2310  if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
2311  return;
2312 
2313  llvm::LLVMContext &Ctx = F->getContext();
2314  llvm::MDBuilder MDB(Ctx);
2315  F->setMetadata(llvm::LLVMContext::MD_kcfi_type,
2316  llvm::MDNode::get(
2317  Ctx, MDB.createConstant(CreateKCFITypeId(FD->getType()))));
2318 }
2319 
2320 static bool allowKCFIIdentifier(StringRef Name) {
2321  // KCFI type identifier constants are only necessary for external assembly
2322  // functions, which means it's safe to skip unusual names. Subset of
2323  // MCAsmInfo::isAcceptableChar() and MCAsmInfoXCOFF::isAcceptableChar().
2324  return llvm::all_of(Name, [](const char &C) {
2325  return llvm::isAlnum(C) || C == '_' || C == '.';
2326  });
2327 }
2328 
2330  llvm::Module &M = getModule();
2331  for (auto &F : M.functions()) {
2332  // Remove KCFI type metadata from non-address-taken local functions.
2333  bool AddressTaken = F.hasAddressTaken();
2334  if (!AddressTaken && F.hasLocalLinkage())
2335  F.eraseMetadata(llvm::LLVMContext::MD_kcfi_type);
2336 
2337  // Generate a constant with the expected KCFI type identifier for all
2338  // address-taken function declarations to support annotating indirectly
2339  // called assembly functions.
2340  if (!AddressTaken || !F.isDeclaration())
2341  continue;
2342 
2343  const llvm::ConstantInt *Type;
2344  if (const llvm::MDNode *MD = F.getMetadata(llvm::LLVMContext::MD_kcfi_type))
2345  Type = llvm::mdconst::extract<llvm::ConstantInt>(MD->getOperand(0));
2346  else
2347  continue;
2348 
2349  StringRef Name = F.getName();
2350  if (!allowKCFIIdentifier(Name))
2351  continue;
2352 
2353  std::string Asm = (".weak __kcfi_typeid_" + Name + "\n.set __kcfi_typeid_" +
2354  Name + ", " + Twine(Type->getZExtValue()) + "\n")
2355  .str();
2356  M.appendModuleInlineAsm(Asm);
2357  }
2358 }
2359 
2360 void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
2361  bool IsIncompleteFunction,
2362  bool IsThunk) {
2363 
2364  if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
2365  // If this is an intrinsic function, set the function's attributes
2366  // to the intrinsic's attributes.
2367  F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
2368  return;
2369  }
2370 
2371  const auto *FD = cast<FunctionDecl>(GD.getDecl());
2372 
2373  if (!IsIncompleteFunction)
2374  SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F,
2375  IsThunk);
2376 
2377  // Add the Returned attribute for "this", except for iOS 5 and earlier
2378  // where substantial code, including the libstdc++ dylib, was compiled with
2379  // GCC and does not actually return "this".
2380  if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
2381  !(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
2382  assert(!F->arg_empty() &&
2383  F->arg_begin()->getType()
2384  ->canLosslesslyBitCastTo(F->getReturnType()) &&
2385  "unexpected this return");
2386  F->addParamAttr(0, llvm::Attribute::Returned);
2387  }
2388 
2389  // Only a few attributes are set on declarations; these may later be
2390  // overridden by a definition.
2391 
2392  setLinkageForGV(F, FD);
2393  setGVProperties(F, FD);
2394 
2395  // Setup target-specific attributes.
2396  if (!IsIncompleteFunction && F->isDeclaration())
2397  getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
2398 
2399  if (const auto *CSA = FD->getAttr<CodeSegAttr>())
2400  F->setSection(CSA->getName());
2401  else if (const auto *SA = FD->getAttr<SectionAttr>())
2402  F->setSection(SA->getName());
2403 
2404  if (const auto *EA = FD->getAttr<ErrorAttr>()) {
2405  if (EA->isError())
2406  F->addFnAttr("dontcall-error", EA->getUserDiagnostic());
2407  else if (EA->isWarning())
2408  F->addFnAttr("dontcall-warn", EA->getUserDiagnostic());
2409  }
2410 
2411  // If we plan on emitting this inline builtin, we can't treat it as a builtin.
2412  if (FD->isInlineBuiltinDeclaration()) {
2413  const FunctionDecl *FDBody;
2414  bool HasBody = FD->hasBody(FDBody);
2415  (void)HasBody;
2416  assert(HasBody && "Inline builtin declarations should always have an "
2417  "available body!");
2418  if (shouldEmitFunction(FDBody))
2419  F->addFnAttr(llvm::Attribute::NoBuiltin);
2420  }
2421 
2423  // A replaceable global allocation function does not act like a builtin by
2424  // default, only if it is invoked by a new-expression or delete-expression.
2425  F->addFnAttr(llvm::Attribute::NoBuiltin);
2426  }
2427 
2428  if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
2429  F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2430  else if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
2431  if (MD->isVirtual())
2432  F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2433 
2434  // Don't emit entries for function declarations in the cross-DSO mode. This
2435  // is handled with better precision by the receiving DSO. But if jump tables
2436  // are non-canonical then we need type metadata in order to produce the local
2437  // jump table.
2438  if (!CodeGenOpts.SanitizeCfiCrossDso ||
2439  !CodeGenOpts.SanitizeCfiCanonicalJumpTables)
2441 
2442  if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
2443  setKCFIType(FD, F);
2444 
2445  if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
2447 
2448  if (CodeGenOpts.InlineMaxStackSize != UINT_MAX)
2449  F->addFnAttr("inline-max-stacksize", llvm::utostr(CodeGenOpts.InlineMaxStackSize));
2450 
2451  if (const auto *CB = FD->getAttr<CallbackAttr>()) {
2452  // Annotate the callback behavior as metadata:
2453  // - The callback callee (as argument number).
2454  // - The callback payloads (as argument numbers).
2455  llvm::LLVMContext &Ctx = F->getContext();
2456  llvm::MDBuilder MDB(Ctx);
2457 
2458  // The payload indices are all but the first one in the encoding. The first
2459  // identifies the callback callee.
2460  int CalleeIdx = *CB->encoding_begin();
2461  ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
2462  F->addMetadata(llvm::LLVMContext::MD_callback,
2463  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
2464  CalleeIdx, PayloadIndices,
2465  /* VarArgsArePassed */ false)}));
2466  }
2467 }
2468 
2469 void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
2470  assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
2471  "Only globals with definition can force usage.");
2472  LLVMUsed.emplace_back(GV);
2473 }
2474 
2475 void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
2476  assert(!GV->isDeclaration() &&
2477  "Only globals with definition can force usage.");
2478  LLVMCompilerUsed.emplace_back(GV);
2479 }
2480 
2481 void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) {
2482  assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
2483  "Only globals with definition can force usage.");
2484  if (getTriple().isOSBinFormatELF())
2485  LLVMCompilerUsed.emplace_back(GV);
2486  else
2487  LLVMUsed.emplace_back(GV);
2488 }
2489 
2490 static void emitUsed(CodeGenModule &CGM, StringRef Name,
2491  std::vector<llvm::WeakTrackingVH> &List) {
2492  // Don't create llvm.used if there is no need.
2493  if (List.empty())
2494  return;
2495 
2496  // Convert List to what ConstantArray needs.
2498  UsedArray.resize(List.size());
2499  for (unsigned i = 0, e = List.size(); i != e; ++i) {
2500  UsedArray[i] =
2501  llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2502  cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
2503  }
2504 
2505  if (UsedArray.empty())
2506  return;
2507  llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
2508 
2509  auto *GV = new llvm::GlobalVariable(
2510  CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
2511  llvm::ConstantArray::get(ATy, UsedArray), Name);
2512 
2513  GV->setSection("llvm.metadata");
2514 }
2515 
2516 void CodeGenModule::emitLLVMUsed() {
2517  emitUsed(*this, "llvm.used", LLVMUsed);
2518  emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
2519 }
2520 
2522  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
2523  LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2524 }
2525 
2526 void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
2529  if (Opt.empty())
2530  return;
2531  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2532  LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2533 }
2534 
2535 void CodeGenModule::AddDependentLib(StringRef Lib) {
2536  auto &C = getLLVMContext();
2537  if (getTarget().getTriple().isOSBinFormatELF()) {
2538  ELFDependentLibraries.push_back(
2539  llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
2540  return;
2541  }
2542 
2545  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2546  LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
2547 }
2548 
2549 /// Add link options implied by the given module, including modules
2550 /// it depends on, using a postorder walk.
2554  // Import this module's parent.
2555  if (Mod->Parent && Visited.insert(Mod->Parent).second) {
2556  addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
2557  }
2558 
2559  // Import this module's dependencies.
2560  for (Module *Import : llvm::reverse(Mod->Imports)) {
2561  if (Visited.insert(Import).second)
2562  addLinkOptionsPostorder(CGM, Import, Metadata, Visited);
2563  }
2564 
2565  // Add linker options to link against the libraries/frameworks
2566  // described by this module.
2567  llvm::LLVMContext &Context = CGM.getLLVMContext();
2568  bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
2569 
2570  // For modules that use export_as for linking, use that module
2571  // name instead.
2572  if (Mod->UseExportAsModuleLinkName)
2573  return;
2574 
2575  for (const Module::LinkLibrary &LL : llvm::reverse(Mod->LinkLibraries)) {
2576  // Link against a framework. Frameworks are currently Darwin only, so we
2577  // don't to ask TargetCodeGenInfo for the spelling of the linker option.
2578  if (LL.IsFramework) {
2579  llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"),
2580  llvm::MDString::get(Context, LL.Library)};
2581 
2582  Metadata.push_back(llvm::MDNode::get(Context, Args));
2583  continue;
2584  }
2585 
2586  // Link against a library.
2587  if (IsELF) {
2588  llvm::Metadata *Args[2] = {
2589  llvm::MDString::get(Context, "lib"),
2590  llvm::MDString::get(Context, LL.Library),
2591  };
2592  Metadata.push_back(llvm::MDNode::get(Context, Args));
2593  } else {
2595  CGM.getTargetCodeGenInfo().getDependentLibraryOption(LL.Library, Opt);
2596  auto *OptString = llvm::MDString::get(Context, Opt);
2597  Metadata.push_back(llvm::MDNode::get(Context, OptString));
2598  }
2599  }
2600 }
2601 
2602 void CodeGenModule::EmitModuleInitializers(clang::Module *Primary) {
2603  // Emit the initializers in the order that sub-modules appear in the
2604  // source, first Global Module Fragments, if present.
2605  if (auto GMF = Primary->getGlobalModuleFragment()) {
2606  for (Decl *D : getContext().getModuleInitializers(GMF)) {
2607  if (isa<ImportDecl>(D))
2608  continue;
2609  assert(isa<VarDecl>(D) && "GMF initializer decl is not a var?");
2610  EmitTopLevelDecl(D);
2611  }
2612  }
2613  // Second any associated with the module, itself.
2614  for (Decl *D : getContext().getModuleInitializers(Primary)) {
2615  // Skip import decls, the inits for those are called explicitly.
2616  if (isa<ImportDecl>(D))
2617  continue;
2618  EmitTopLevelDecl(D);
2619  }
2620  // Third any associated with the Privat eMOdule Fragment, if present.
2621  if (auto PMF = Primary->getPrivateModuleFragment()) {
2622  for (Decl *D : getContext().getModuleInitializers(PMF)) {
2623  assert(isa<VarDecl>(D) && "PMF initializer decl is not a var?");
2624  EmitTopLevelDecl(D);
2625  }
2626  }
2627 }
2628 
2629 void CodeGenModule::EmitModuleLinkOptions() {
2630  // Collect the set of all of the modules we want to visit to emit link
2631  // options, which is essentially the imported modules and all of their
2632  // non-explicit child modules.
2633  llvm::SetVector<clang::Module *> LinkModules;
2636 
2637  // Seed the stack with imported modules.
2638  for (Module *M : ImportedModules) {
2639  // Do not add any link flags when an implementation TU of a module imports
2640  // a header of that same module.
2641  if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
2642  !getLangOpts().isCompilingModule())
2643  continue;
2644  if (Visited.insert(M).second)
2645  Stack.push_back(M);
2646  }
2647 
2648  // Find all of the modules to import, making a little effort to prune
2649  // non-leaf modules.
2650  while (!Stack.empty()) {
2651  clang::Module *Mod = Stack.pop_back_val();
2652 
2653  bool AnyChildren = false;
2654 
2655  // Visit the submodules of this module.
2656  for (const auto &SM : Mod->submodules()) {
2657  // Skip explicit children; they need to be explicitly imported to be
2658  // linked against.
2659  if (SM->IsExplicit)
2660  continue;
2661 
2662  if (Visited.insert(SM).second) {
2663  Stack.push_back(SM);
2664  AnyChildren = true;
2665  }
2666  }
2667 
2668  // We didn't find any children, so add this module to the list of
2669  // modules to link against.
2670  if (!AnyChildren) {
2671  LinkModules.insert(Mod);
2672  }
2673  }
2674 
2675  // Add link options for all of the imported modules in reverse topological
2676  // order. We don't do anything to try to order import link flags with respect
2677  // to linker options inserted by things like #pragma comment().
2678  SmallVector<llvm::MDNode *, 16> MetadataArgs;
2679  Visited.clear();
2680  for (Module *M : LinkModules)
2681  if (Visited.insert(M).second)
2682  addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
2683  std::reverse(MetadataArgs.begin(), MetadataArgs.end());
2684  LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
2685 
2686  // Add the linker options metadata flag.
2687  auto *NMD = getModule().getOrInsertNamedMetadata("llvm.linker.options");
2688  for (auto *MD : LinkerOptionsMetadata)
2689  NMD->addOperand(MD);
2690 }
2691 
2692 void CodeGenModule::EmitDeferred() {
2693  // Emit deferred declare target declarations.
2694  if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
2696 
2697  // Emit code for any potentially referenced deferred decls. Since a
2698  // previously unused static decl may become used during the generation of code
2699  // for a static function, iterate until no changes are made.
2700 
2701  if (!DeferredVTables.empty()) {
2702  EmitDeferredVTables();
2703 
2704  // Emitting a vtable doesn't directly cause more vtables to
2705  // become deferred, although it can cause functions to be
2706  // emitted that then need those vtables.
2707  assert(DeferredVTables.empty());
2708  }
2709 
2710  // Emit CUDA/HIP static device variables referenced by host code only.
2711  // Note we should not clear CUDADeviceVarODRUsedByHost since it is still
2712  // needed for further handling.
2713  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
2714  llvm::append_range(DeferredDeclsToEmit,
2715  getContext().CUDADeviceVarODRUsedByHost);
2716 
2717  // Stop if we're out of both deferred vtables and deferred declarations.
2718  if (DeferredDeclsToEmit.empty())
2719  return;
2720 
2721  // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
2722  // work, it will not interfere with this.
2723  std::vector<GlobalDecl> CurDeclsToEmit;
2724  CurDeclsToEmit.swap(DeferredDeclsToEmit);
2725 
2726  for (GlobalDecl &D : CurDeclsToEmit) {
2727  // We should call GetAddrOfGlobal with IsForDefinition set to true in order
2728  // to get GlobalValue with exactly the type we need, not something that
2729  // might had been created for another decl with the same mangled name but
2730  // different type.
2731  llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
2733 
2734  // In case of different address spaces, we may still get a cast, even with
2735  // IsForDefinition equal to true. Query mangled names table to get
2736  // GlobalValue.
2737  if (!GV)
2738  GV = GetGlobalValue(getMangledName(D));
2739 
2740  // Make sure GetGlobalValue returned non-null.
2741  assert(GV);
2742 
2743  // Check to see if we've already emitted this. This is necessary
2744  // for a couple of reasons: first, decls can end up in the
2745  // deferred-decls queue multiple times, and second, decls can end
2746  // up with definitions in unusual ways (e.g. by an extern inline
2747  // function acquiring a strong function redefinition). Just
2748  // ignore these cases.
2749  if (!GV->isDeclaration())
2750  continue;
2751 
2752  // If this is OpenMP, check if it is legal to emit this global normally.
2753  if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(D))
2754  continue;
2755 
2756  // Otherwise, emit the definition and move on to the next one.
2757  EmitGlobalDefinition(D, GV);
2758 
2759  // If we found out that we need to emit more decls, do that recursively.
2760  // This has the advantage that the decls are emitted in a DFS and related
2761  // ones are close together, which is convenient for testing.
2762  if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
2763  EmitDeferred();
2764  assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
2765  }
2766  }
2767 }
2768 
2769 void CodeGenModule::EmitVTablesOpportunistically() {
2770  // Try to emit external vtables as available_externally if they have emitted
2771  // all inlined virtual functions. It runs after EmitDeferred() and therefore
2772  // is not allowed to create new references to things that need to be emitted
2773  // lazily. Note that it also uses fact that we eagerly emitting RTTI.
2774 
2775  assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
2776  && "Only emit opportunistic vtables with optimizations");
2777 
2778  for (const CXXRecordDecl *RD : OpportunisticVTables) {
2779  assert(getVTables().isVTableExternal(RD) &&
2780  "This queue should only contain external vtables");
2781  if (getCXXABI().canSpeculativelyEmitVTable(RD))
2782  VTables.GenerateClassData(RD);
2783  }
2784  OpportunisticVTables.clear();
2785 }
2786 
2788  if (Annotations.empty())
2789  return;
2790 
2791  // Create a new global variable for the ConstantStruct in the Module.
2792  llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
2793  Annotations[0]->getType(), Annotations.size()), Annotations);
2794  auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
2795  llvm::GlobalValue::AppendingLinkage,
2796  Array, "llvm.global.annotations");
2797  gv->setSection(AnnotationSection);
2798 }
2799 
2800 llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
2801  llvm::Constant *&AStr = AnnotationStrings[Str];
2802  if (AStr)
2803  return AStr;
2804 
2805  // Not found yet, create a new global.
2806  llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
2807  auto *gv =
2808  new llvm::GlobalVariable(getModule(), s->getType(), true,
2809  llvm::GlobalValue::PrivateLinkage, s, ".str");
2810  gv->setSection(AnnotationSection);
2811  gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2812  AStr = gv;
2813  return gv;
2814 }
2815 
2818  PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2819  if (PLoc.isValid())
2820  return EmitAnnotationString(PLoc.getFilename());
2821  return EmitAnnotationString(SM.getBufferName(Loc));
2822 }
2823 
2826  PresumedLoc PLoc = SM.getPresumedLoc(L);
2827  unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
2828  SM.getExpansionLineNumber(L);
2829  return llvm::ConstantInt::get(Int32Ty, LineNo);
2830 }
2831 
2832 llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
2833  ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
2834  if (Exprs.empty())
2835  return llvm::ConstantPointerNull::get(GlobalsInt8PtrTy);
2836 
2837  llvm::FoldingSetNodeID ID;
2838  for (Expr *E : Exprs) {
2839  ID.Add(cast<clang::ConstantExpr>(E)->getAPValueResult());
2840  }
2841  llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()];
2842  if (Lookup)
2843  return Lookup;
2844 
2846  LLVMArgs.reserve(Exprs.size());
2847  ConstantEmitter ConstEmiter(*this);
2848  llvm::transform(Exprs, std::back_inserter(LLVMArgs), [&](const Expr *E) {
2849  const auto *CE = cast<clang::ConstantExpr>(E);
2850  return ConstEmiter.emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(),
2851  CE->getType());
2852  });
2853  auto *Struct = llvm::ConstantStruct::getAnon(LLVMArgs);
2854  auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true,
2855  llvm::GlobalValue::PrivateLinkage, Struct,
2856  ".args");
2857  GV->setSection(AnnotationSection);
2858  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2859  auto *Bitcasted = llvm::ConstantExpr::getBitCast(GV, GlobalsInt8PtrTy);
2860 
2861  Lookup = Bitcasted;
2862  return Bitcasted;
2863 }
2864 
2865 llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
2866  const AnnotateAttr *AA,
2867  SourceLocation L) {
2868  // Get the globals for file name, annotation, and the line number.
2869  llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
2870  *UnitGV = EmitAnnotationUnit(L),
2871  *LineNoCst = EmitAnnotationLineNo(L),
2872  *Args = EmitAnnotationArgs(AA);
2873 
2874  llvm::Constant *GVInGlobalsAS = GV;
2875  if (GV->getAddressSpace() !=
2876  getDataLayout().getDefaultGlobalsAddressSpace()) {
2877  GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast(
2878  GV, GV->getValueType()->getPointerTo(
2879  getDataLayout().getDefaultGlobalsAddressSpace()));
2880  }
2881 
2882  // Create the ConstantStruct for the global annotation.
2883  llvm::Constant *Fields[] = {
2884  llvm::ConstantExpr::getBitCast(GVInGlobalsAS, GlobalsInt8PtrTy),
2885  llvm::ConstantExpr::getBitCast(AnnoGV, GlobalsInt8PtrTy),
2886  llvm::ConstantExpr::getBitCast(UnitGV, GlobalsInt8PtrTy),
2887  LineNoCst,
2888  Args,
2889  };
2890  return llvm::ConstantStruct::getAnon(Fields);
2891 }
2892 
2894  llvm::GlobalValue *GV) {
2895  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2896  // Get the struct elements for these annotations.
2897  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2898  Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
2899 }
2900 
2902  SourceLocation Loc) const {
2903  const auto &NoSanitizeL = getContext().getNoSanitizeList();
2904  // NoSanitize by function name.
2905  if (NoSanitizeL.containsFunction(Kind, Fn->getName()))
2906  return true;
2907  // NoSanitize by location. Check "mainfile" prefix.
2908  auto &SM = Context.getSourceManager();
2909  const FileEntry &MainFile = *SM.getFileEntryForID(SM.getMainFileID());
2910  if (NoSanitizeL.containsMainFile(Kind, MainFile.getName()))
2911  return true;
2912 
2913  // Check "src" prefix.
2914  if (Loc.isValid())
2915  return NoSanitizeL.containsLocation(Kind, Loc);
2916  // If location is unknown, this may be a compiler-generated function. Assume
2917  // it's located in the main file.
2918  return NoSanitizeL.containsFile(Kind, MainFile.getName());
2919 }
2920 
2922  llvm::GlobalVariable *GV,
2923  SourceLocation Loc, QualType Ty,
2924  StringRef Category) const {
2925  const auto &NoSanitizeL = getContext().getNoSanitizeList();
2926  if (NoSanitizeL.containsGlobal(Kind, GV->getName(), Category))
2927  return true;
2928  auto &SM = Context.getSourceManager();
2929  if (NoSanitizeL.containsMainFile(
2930  Kind, SM.getFileEntryForID(SM.getMainFileID())->getName(), Category))
2931  return true;
2932  if (NoSanitizeL.containsLocation(Kind, Loc, Category))
2933  return true;
2934 
2935  // Check global type.
2936  if (!Ty.isNull()) {
2937  // Drill down the array types: if global variable of a fixed type is
2938  // not sanitized, we also don't instrument arrays of them.
2939  while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
2940  Ty = AT->getElementType();
2942  // Only record types (classes, structs etc.) are ignored.
2943  if (Ty->isRecordType()) {
2944  std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
2945  if (NoSanitizeL.containsType(Kind, TypeStr, Category))
2946  return true;
2947  }
2948  }
2949  return false;
2950 }
2951 
2952 bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
2953  StringRef Category) const {
2954  const auto &XRayFilter = getContext().getXRayFilter();
2955  using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
2956  auto Attr = ImbueAttr::NONE;
2957  if (Loc.isValid())
2958  Attr = XRayFilter.shouldImbueLocation(Loc, Category);
2959  if (Attr == ImbueAttr::NONE)
2960  Attr = XRayFilter.shouldImbueFunction(Fn->getName());
2961  switch (Attr) {
2962  case ImbueAttr::NONE:
2963  return false;
2964  case ImbueAttr::ALWAYS:
2965  Fn->addFnAttr("function-instrument", "xray-always");
2966  break;
2967  case ImbueAttr::ALWAYS_ARG1:
2968  Fn->addFnAttr("function-instrument", "xray-always");
2969  Fn->addFnAttr("xray-log-args", "1");
2970  break;
2971  case ImbueAttr::NEVER:
2972  Fn->addFnAttr("function-instrument", "xray-never");
2973  break;
2974  }
2975  return true;
2976 }
2977 
2980  SourceLocation Loc) const {
2981  const auto &ProfileList = getContext().getProfileList();
2982  // If the profile list is empty, then instrument everything.
2983  if (ProfileList.isEmpty())
2984  return ProfileList::Allow;
2986  // First, check the function name.
2987  if (auto V = ProfileList.isFunctionExcluded(Fn->getName(), Kind))
2988  return *V;
2989  // Next, check the source location.
2990  if (Loc.isValid())
2991  if (auto V = ProfileList.isLocationExcluded(Loc, Kind))
2992  return *V;
2993  // If location is unknown, this may be a compiler-generated function. Assume
2994  // it's located in the main file.
2995  auto &SM = Context.getSourceManager();
2996  if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID()))
2997  if (auto V = ProfileList.isFileExcluded(MainFile->getName(), Kind))
2998  return *V;
2999  return ProfileList.getDefault(Kind);
3000 }
3001 
3004  SourceLocation Loc) const {
3005  auto V = isFunctionBlockedByProfileList(Fn, Loc);
3006  if (V != ProfileList::Allow)
3007  return V;
3008 
3009  auto NumGroups = getCodeGenOpts().ProfileTotalFunctionGroups;
3010  if (NumGroups > 1) {
3011  auto Group = llvm::crc32(arrayRefFromStringRef(Fn->getName())) % NumGroups;
3012  if (Group != getCodeGenOpts().ProfileSelectedFunctionGroup)
3013  return ProfileList::Skip;
3014  }
3015  return ProfileList::Allow;
3016 }
3017 
3018 bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
3019  // Never defer when EmitAllDecls is specified.
3020  if (LangOpts.EmitAllDecls)
3021  return true;
3022 
3023  if (CodeGenOpts.KeepStaticConsts) {
3024  const auto *VD = dyn_cast<VarDecl>(Global);
3025  if (VD && VD->getType().isConstQualified() &&
3026  VD->getStorageDuration() == SD_Static)
3027  return true;
3028  }
3029 
3030  return getContext().DeclMustBeEmitted(Global);
3031 }
3032 
3033 bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
3034  // In OpenMP 5.0 variables and function may be marked as
3035  // device_type(host/nohost) and we should not emit them eagerly unless we sure
3036  // that they must be emitted on the host/device. To be sure we need to have
3037  // seen a declare target with an explicit mentioning of the function, we know
3038  // we have if the level of the declare target attribute is -1. Note that we
3039  // check somewhere else if we should emit this at all.
3040  if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) {
3042  OMPDeclareTargetDeclAttr::getActiveAttr(Global);
3043  if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1)
3044  return false;
3045  }
3046 
3047  if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
3049  // Implicit template instantiations may change linkage if they are later
3050  // explicitly instantiated, so they should not be emitted eagerly.
3051  return false;
3052  }
3053  if (const auto *VD = dyn_cast<VarDecl>(Global)) {
3054  if (Context.getInlineVariableDefinitionKind(VD) ==
3056  // A definition of an inline constexpr static data member may change
3057  // linkage later if it's redeclared outside the class.
3058  return false;
3059  if (CXX20ModuleInits && VD->getOwningModule() &&
3060  !VD->getOwningModule()->isModuleMapModule()) {
3061  // For CXX20, module-owned initializers need to be deferred, since it is
3062  // not known at this point if they will be run for the current module or
3063  // as part of the initializer for an imported one.
3064  return false;
3065  }
3066  }
3067  // If OpenMP is enabled and threadprivates must be generated like TLS, delay
3068  // codegen for global variables, because they may be marked as threadprivate.
3069  if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
3070  getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
3071  !isTypeConstant(Global->getType(), false) &&
3072  !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
3073  return false;
3074 
3075  return true;
3076 }
3077 
3079  StringRef Name = getMangledName(GD);
3080 
3081  // The UUID descriptor should be pointer aligned.
3083 
3084  // Look for an existing global.
3085  if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
3086  return ConstantAddress(GV, GV->getValueType(), Alignment);
3087 
3088  ConstantEmitter Emitter(*this);
3089  llvm::Constant *Init;
3090 
3091  APValue &V = GD->getAsAPValue();
3092  if (!V.isAbsent()) {
3093  // If possible, emit the APValue version of the initializer. In particular,
3094  // this gets the type of the constant right.
3095  Init = Emitter.emitForInitializer(
3096  GD->getAsAPValue(), GD->getType().getAddressSpace(), GD->getType());
3097  } else {
3098  // As a fallback, directly construct the constant.
3099  // FIXME: This may get padding wrong under esoteric struct layout rules.
3100  // MSVC appears to create a complete type 'struct __s_GUID' that it
3101  // presumably uses to represent these constants.
3102  MSGuidDecl::Parts Parts = GD->getParts();
3103  llvm::Constant *Fields[4] = {
3104  llvm::ConstantInt::get(Int32Ty, Parts.Part1),
3105  llvm::ConstantInt::get(Int16Ty, Parts.Part2),
3106  llvm::ConstantInt::get(Int16Ty, Parts.Part3),
3107  llvm::ConstantDataArray::getRaw(
3108  StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), 8,
3109  Int8Ty)};
3110  Init = llvm::ConstantStruct::getAnon(Fields);
3111  }
3112 
3113  auto *GV = new llvm::GlobalVariable(
3114  getModule(), Init->getType(),
3115  /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
3116  if (supportsCOMDAT())
3117  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
3118  setDSOLocal(GV);
3119 
3120  if (!V.isAbsent()) {
3121  Emitter.finalize(GV);
3122  return ConstantAddress(GV, GV->getValueType(), Alignment);
3123  }
3124 
3125  llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
3126  llvm::Constant *Addr = llvm::ConstantExpr::getBitCast(
3127  GV, Ty->getPointerTo(GV->getAddressSpace()));
3128  return ConstantAddress(Addr, Ty, Alignment);
3129 }
3130 
3132  const UnnamedGlobalConstantDecl *GCD) {
3133  CharUnits Alignment = getContext().getTypeAlignInChars(GCD->getType());
3134 
3135  llvm::GlobalVariable **Entry = nullptr;
3136  Entry = &UnnamedGlobalConstantDeclMap[GCD];
3137  if (*Entry)
3138  return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment);
3139 
3140  ConstantEmitter Emitter(*this);
3141  llvm::Constant *Init;
3142 
3143  const APValue &V = GCD->getValue();
3144 
3145  assert(!V.isAbsent());
3146  Init = Emitter.emitForInitializer(V, GCD->getType().getAddressSpace(),
3147  GCD->getType());
3148 
3149  auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
3150  /*isConstant=*/true,
3151  llvm::GlobalValue::PrivateLinkage, Init,
3152  ".constant");
3153  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3154  GV->setAlignment(Alignment.getAsAlign());
3155 
3156  Emitter.finalize(GV);
3157 
3158  *Entry = GV;
3159  return ConstantAddress(GV, GV->getValueType(), Alignment);
3160 }
3161 
3163  const TemplateParamObjectDecl *TPO) {
3164  StringRef Name = getMangledName(TPO);
3165  CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
3166 
3167  if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
3168  return ConstantAddress(GV, GV->getValueType(), Alignment);
3169 
3170  ConstantEmitter Emitter(*this);
3171  llvm::Constant *Init = Emitter.emitForInitializer(
3172  TPO->getValue(), TPO->getType().getAddressSpace(), TPO->getType());
3173 
3174  if (!Init) {
3175  ErrorUnsupported(TPO, "template parameter object");
3176  return ConstantAddress::invalid();
3177  }
3178 
3179  auto *GV = new llvm::GlobalVariable(
3180  getModule(), Init->getType(),
3181  /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
3182  if (supportsCOMDAT())
3183  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
3184  Emitter.finalize(GV);
3185 
3186  return ConstantAddress(GV, GV->getValueType(), Alignment);
3187 }
3188 
3190  const AliasAttr *AA = VD->getAttr<AliasAttr>();
3191  assert(AA && "No alias?");
3192 
3193  CharUnits Alignment = getContext().getDeclAlign(VD);
3194  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
3195 
3196  // See if there is already something with the target's name in the module.
3197  llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
3198  if (Entry) {
3199  unsigned AS = getTypes().getTargetAddressSpace(VD->getType());
3200  auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
3201  return ConstantAddress(Ptr, DeclTy, Alignment);
3202  }
3203 
3204  llvm::Constant *Aliasee;
3205  if (isa<llvm::FunctionType>(DeclTy))
3206  Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
3207  GlobalDecl(cast<FunctionDecl>(VD)),
3208  /*ForVTable=*/false);
3209  else
3210  Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
3211  nullptr);
3212 
3213  auto *F = cast<llvm::GlobalValue>(Aliasee);
3214  F->setLinkage(llvm::Function::ExternalWeakLinkage);
3215  WeakRefReferences.insert(F);
3216 
3217  return ConstantAddress(Aliasee, DeclTy, Alignment);
3218 }
3219 
3221  const auto *Global = cast<ValueDecl>(GD.getDecl());
3222 
3223  // Weak references don't produce any output by themselves.
3224  if (Global->hasAttr<WeakRefAttr>())
3225  return;
3226 
3227  // If this is an alias definition (which otherwise looks like a declaration)
3228  // emit it now.
3229  if (Global->hasAttr<AliasAttr>())
3230  return EmitAliasDefinition(GD);
3231 
3232  // IFunc like an alias whose value is resolved at runtime by calling resolver.
3233  if (Global->hasAttr<IFuncAttr>())
3234  return emitIFuncDefinition(GD);
3235 
3236  // If this is a cpu_dispatch multiversion function, emit the resolver.
3237  if (Global->hasAttr<CPUDispatchAttr>())
3238  return emitCPUDispatchDefinition(GD);
3239 
3240  // If this is CUDA, be selective about which declarations we emit.
3241  if (LangOpts.CUDA) {
3242  if (LangOpts.CUDAIsDevice) {
3243  if (!Global->hasAttr<CUDADeviceAttr>() &&
3244  !Global->hasAttr<CUDAGlobalAttr>() &&
3245  !Global->hasAttr<CUDAConstantAttr>() &&
3246  !Global->hasAttr<CUDASharedAttr>() &&
3247  !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
3249  return;
3250  } else {
3251  // We need to emit host-side 'shadows' for all global
3252  // device-side variables because the CUDA runtime needs their
3253  // size and host-side address in order to provide access to
3254  // their device-side incarnations.
3255 
3256  // So device-only functions are the only things we skip.
3257  if (isa<FunctionDecl>(Global) && !Global->hasAttr<CUDAHostAttr>() &&
3258  Global->hasAttr<CUDADeviceAttr>())
3259  return;
3260 
3261  assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
3262  "Expected Variable or Function");
3263  }
3264  }
3265 
3266  if (LangOpts.OpenMP) {
3267  // If this is OpenMP, check if it is legal to emit this global normally.
3268  if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
3269  return;
3270  if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Global)) {
3271  if (MustBeEmitted(Global))
3273  return;
3274  } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
3275  if (MustBeEmitted(Global))
3276  EmitOMPDeclareMapper(DMD);
3277  return;
3278  }
3279  }
3280 
3281  // Ignore declarations, they will be emitted on their first use.
3282  if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
3283  // Forward declarations are emitted lazily on first use.
3284  if (!FD->doesThisDeclarationHaveABody()) {
3286  return;
3287 
3288  StringRef MangledName = getMangledName(GD);
3289 
3290  // Compute the function info and LLVM type.
3292  llvm::Type *Ty = getTypes().GetFunctionType(FI);
3293 
3294  GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
3295  /*DontDefer=*/false);
3296  return;
3297  }
3298  } else {
3299  const auto *VD = cast<VarDecl>(Global);
3300  assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
3301  if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
3302  !Context.isMSStaticDataMemberInlineDefinition(VD)) {
3303  if (LangOpts.OpenMP) {
3304  // Emit declaration of the must-be-emitted declare target variable.
3306  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
3307  bool UnifiedMemoryEnabled =
3309  if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3310  *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3311  !UnifiedMemoryEnabled) {
3312  (void)GetAddrOfGlobalVar(VD);
3313  } else {
3314  assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
3315  ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3316  *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3317  UnifiedMemoryEnabled)) &&
3318  "Link clause or to clause with unified memory expected.");
3320  }
3321 
3322  return;
3323  }
3324  }
3325  // If this declaration may have caused an inline variable definition to
3326  // change linkage, make sure that it's emitted.
3327  if (Context.getInlineVariableDefinitionKind(VD) ==
3329  GetAddrOfGlobalVar(VD);
3330  return;
3331  }
3332  }
3333 
3334  // Defer code generation to first use when possible, e.g. if this is an inline
3335  // function. If the global must always be emitted, do it eagerly if possible
3336  // to benefit from cache locality.
3337  if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
3338  // Emit the definition if it can't be deferred.
3339  EmitGlobalDefinition(GD);
3340  return;
3341  }
3342 
3343  // If we're deferring emission of a C++ variable with an
3344  // initializer, remember the order in which it appeared in the file.
3345  if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
3346  cast<VarDecl>(Global)->hasInit()) {
3347  DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
3348  CXXGlobalInits.push_back(nullptr);
3349  }
3350 
3351  StringRef MangledName = getMangledName(GD);
3352  if (GetGlobalValue(MangledName) != nullptr) {
3353  // The value has already been used and should therefore be emitted.
3354  addDeferredDeclToEmit(GD);
3355  } else if (MustBeEmitted(Global)) {
3356  // The value must be emitted, but cannot be emitted eagerly.
3357  assert(!MayBeEmittedEagerly(Global));
3358  addDeferredDeclToEmit(GD);
3359  EmittedDeferredDecls[MangledName] = GD;
3360  } else {
3361  // Otherwise, remember that we saw a deferred decl with this name. The
3362  // first use of the mangled name will cause it to move into
3363  // DeferredDeclsToEmit.
3364  DeferredDecls[MangledName] = GD;
3365  }
3366 }
3367 
3368 // Check if T is a class type with a destructor that's not dllimport.
3370  if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
3371  if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
3372  if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
3373  return true;
3374 
3375  return false;
3376 }
3377 
3378 namespace {
3379  struct FunctionIsDirectlyRecursive
3380  : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
3381  const StringRef Name;
3382  const Builtin::Context &BI;
3383  FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
3384  : Name(N), BI(C) {}
3385 
3386  bool VisitCallExpr(const CallExpr *E) {
3387  const FunctionDecl *FD = E->getDirectCallee();
3388  if (!FD)
3389  return false;
3390  AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
3391  if (Attr && Name == Attr->getLabel())
3392  return true;
3393  unsigned BuiltinID = FD->getBuiltinID();
3394  if (!BuiltinID || !BI.isLibFunction(BuiltinID))
3395  return false;
3396  StringRef BuiltinName = BI.getName(BuiltinID);
3397  if (BuiltinName.startswith("__builtin_") &&
3398  Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
3399  return true;
3400  }
3401  return false;
3402  }
3403 
3404  bool VisitStmt(const Stmt *S) {
3405  for (const Stmt *Child : S->children())
3406  if (Child && this->Visit(Child))
3407  return true;
3408  return false;
3409  }
3410  };
3411 
3412  // Make sure we're not referencing non-imported vars or functions.
3413  struct DLLImportFunctionVisitor
3414  : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
3415  bool SafeToInline = true;
3416 
3417  bool shouldVisitImplicitCode() const { return true; }
3418 
3419  bool VisitVarDecl(VarDecl *VD) {
3420  if (VD->getTLSKind()) {
3421  // A thread-local variable cannot be imported.
3422  SafeToInline = false;
3423  return SafeToInline;
3424  }
3425 
3426  // A variable definition might imply a destructor call.
3427  if (VD->isThisDeclarationADefinition())
3428  SafeToInline = !HasNonDllImportDtor(VD->getType());
3429 
3430  return SafeToInline;
3431  }
3432 
3433  bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
3434  if (const auto *D = E->getTemporary()->getDestructor())
3435  SafeToInline = D->hasAttr<DLLImportAttr>();
3436  return SafeToInline;
3437  }
3438 
3439  bool VisitDeclRefExpr(DeclRefExpr *E) {
3440  ValueDecl *VD = E->getDecl();
3441  if (isa<FunctionDecl>(VD))
3442  SafeToInline = VD->hasAttr<DLLImportAttr>();
3443  else if (VarDecl *V = dyn_cast<VarDecl>(VD))
3444  SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
3445  return SafeToInline;
3446  }
3447 
3448  bool VisitCXXConstructExpr(CXXConstructExpr *E) {
3449  SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
3450  return SafeToInline;
3451  }
3452 
3453  bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
3454  CXXMethodDecl *M = E->getMethodDecl();
3455  if (!M) {
3456  // Call through a pointer to member function. This is safe to inline.
3457  SafeToInline = true;
3458  } else {
3459  SafeToInline = M->hasAttr<DLLImportAttr>();
3460  }
3461  return SafeToInline;
3462  }
3463 
3464  bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
3465  SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
3466  return SafeToInline;
3467  }
3468 
3469  bool VisitCXXNewExpr(CXXNewExpr *E) {
3470  SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
3471  return SafeToInline;
3472  }
3473  };
3474 }
3475 
3476 // isTriviallyRecursive - Check if this function calls another
3477 // decl that, because of the asm attribute or the other decl being a builtin,
3478 // ends up pointing to itself.
3479 bool
3480 CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
3481  StringRef Name;
3482  if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
3483  // asm labels are a special kind of mangling we have to support.
3484  AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
3485  if (!Attr)
3486  return false;
3487  Name = Attr->getLabel();
3488  } else {
3489  Name = FD->getName();
3490  }
3491 
3492  FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
3493  const Stmt *Body = FD->getBody();
3494  return Body ? Walker.Visit(Body) : false;
3495 }
3496 
3497 bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
3498  if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
3499  return true;
3500  const auto *F = cast<FunctionDecl>(GD.getDecl());
3501  if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
3502  return false;
3503 
3504  if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) {
3505  // Check whether it would be safe to inline this dllimport function.
3506  DLLImportFunctionVisitor Visitor;
3507  Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
3508  if (!Visitor.SafeToInline)
3509  return false;
3510 
3511  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(F)) {
3512  // Implicit destructor invocations aren't captured in the AST, so the
3513  // check above can't see them. Check for them manually here.
3514  for (const Decl *Member : Dtor->getParent()->decls())
3515  if (isa<FieldDecl>(Member))
3516  if (HasNonDllImportDtor(cast<FieldDecl>(Member)->getType()))
3517  return false;
3518  for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
3519  if (HasNonDllImportDtor(B.getType()))
3520  return false;
3521  }
3522  }
3523 
3524  // Inline builtins declaration must be emitted. They often are fortified
3525  // functions.
3526  if (F->isInlineBuiltinDeclaration())
3527  return true;
3528 
3529  // PR9614. Avoid cases where the source code is lying to us. An available
3530  // externally function should have an equivalent function somewhere else,
3531  // but a function that calls itself through asm label/`__builtin_` trickery is
3532  // clearly not equivalent to the real implementation.
3533  // This happens in glibc's btowc and in some configure checks.
3534  return !isTriviallyRecursive(F);
3535 }
3536 
3537 bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
3538  return CodeGenOpts.OptimizationLevel > 0;
3539 }
3540 
3541 void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
3542  llvm::GlobalValue *GV) {
3543  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3544 
3545  if (FD->isCPUSpecificMultiVersion()) {
3546  auto *Spec = FD->getAttr<CPUSpecificAttr>();
3547  for (unsigned I = 0; I < Spec->cpus_size(); ++I)
3548  EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3549  } else if (FD->isTargetClonesMultiVersion()) {
3550  auto *Clone = FD->getAttr<TargetClonesAttr>();
3551  for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
3552  if (Clone->isFirstOfVersion(I))
3553  EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3554  // Ensure that the resolver function is also emitted.
3555  GetOrCreateMultiVersionResolver(GD);
3556  } else
3557  EmitGlobalFunctionDefinition(GD, GV);
3558 }
3559 
3560 void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
3561  const auto *D = cast<ValueDecl>(GD.getDecl());
3562 
3563  PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
3564  Context.getSourceManager(),
3565  "Generating code for declaration");
3566 
3567  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3568  // At -O0, don't generate IR for functions with available_externally
3569  // linkage.
3570  if (!shouldEmitFunction(GD))
3571  return;
3572 
3573  llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
3574  std::string Name;
3575  llvm::raw_string_ostream OS(Name);
3576  FD->getNameForDiagnostic(OS, getContext().getPrintingPolicy(),
3577  /*Qualified=*/true);
3578  return Name;
3579  });
3580 
3581  if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
3582  // Make sure to emit the definition(s) before we emit the thunks.
3583  // This is necessary for the generation of certain thunks.
3584  if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
3585  ABI->emitCXXStructor(GD);
3586  else if (FD->isMultiVersion())
3587  EmitMultiVersionFunctionDefinition(GD, GV);
3588  else
3589  EmitGlobalFunctionDefinition(GD, GV);
3590 
3591  if (Method->isVirtual())
3592  getVTables().EmitThunks(GD);
3593 
3594  return;
3595  }
3596 
3597  if (FD->isMultiVersion())
3598  return EmitMultiVersionFunctionDefinition(GD, GV);
3599  return EmitGlobalFunctionDefinition(GD, GV);
3600  }
3601 
3602  if (const auto *VD = dyn_cast<VarDecl>(D))
3603  return EmitGlobalVarDefinition(VD, !VD->hasDefinition());
3604 
3605  llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
3606 }
3607 
3608 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
3609  llvm::Function *NewFn);
3610 
3611 static unsigned
3614  unsigned Priority = 0;
3615  for (StringRef Feat : RO.Conditions.Features)
3617 
3618  if (!RO.Conditions.Architecture.empty())
3619  Priority = std::max(
3621  return Priority;
3622 }
3623 
3624 // Multiversion functions should be at most 'WeakODRLinkage' so that a different
3625 // TU can forward declare the function without causing problems. Particularly
3626 // in the cases of CPUDispatch, this causes issues. This also makes sure we
3627 // work with internal linkage functions, so that the same function name can be
3628 // used with internal linkage in multiple TUs.
3629 llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
3630  GlobalDecl GD) {
3631  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3632  if (FD->getFormalLinkage() == InternalLinkage)
3634  return llvm::GlobalValue::WeakODRLinkage;
3635 }
3636 
3637 void CodeGenModule::emitMultiVersionFunctions() {
3638  std::vector<GlobalDecl> MVFuncsToEmit;
3639  MultiVersionFuncs.swap(MVFuncsToEmit);
3640  for (GlobalDecl GD : MVFuncsToEmit) {
3641  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3642  assert(FD && "Expected a FunctionDecl");
3643 
3645  if (FD->isTargetMultiVersion()) {
3647  FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
3648  GlobalDecl CurGD{
3649  (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
3650  StringRef MangledName = getMangledName(CurGD);
3651  llvm::Constant *Func = GetGlobalValue(MangledName);
3652  if (!Func) {
3653  if (CurFD->isDefined()) {
3654  EmitGlobalFunctionDefinition(CurGD, nullptr);
3655  Func = GetGlobalValue(MangledName);
3656  } else {
3657  const CGFunctionInfo &FI =
3659  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3660  Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
3661  /*DontDefer=*/false, ForDefinition);
3662  }
3663  assert(Func && "This should have just been created");
3664  }
3665 
3666  const auto *TA = CurFD->getAttr<TargetAttr>();
3668  TA->getAddedFeatures(Feats);
3669 
3670  Options.emplace_back(cast<llvm::Function>(Func),
3671  TA->getArchitecture(), Feats);
3672  });
3673  } else if (FD->isTargetClonesMultiVersion()) {
3674  const auto *TC = FD->getAttr<TargetClonesAttr>();
3675  for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
3676  ++VersionIndex) {
3677  if (!TC->isFirstOfVersion(VersionIndex))
3678  continue;
3679  GlobalDecl CurGD{(FD->isDefined() ? FD->getDefinition() : FD),
3680  VersionIndex};
3681  StringRef Version = TC->getFeatureStr(VersionIndex);
3682  StringRef MangledName = getMangledName(CurGD);
3683  llvm::Constant *Func = GetGlobalValue(MangledName);
3684  if (!Func) {
3685  if (FD->isDefined()) {
3686  EmitGlobalFunctionDefinition(CurGD, nullptr);
3687  Func = GetGlobalValue(MangledName);
3688  } else {
3689  const CGFunctionInfo &FI =
3691  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3692  Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
3693  /*DontDefer=*/false, ForDefinition);
3694  }
3695  assert(Func && "This should have just been created");
3696  }
3697 
3698  StringRef Architecture;
3700 
3701  if (Version.startswith("arch="))
3702  Architecture = Version.drop_front(sizeof("arch=") - 1);
3703  else if (Version != "default")
3704  Feature.push_back(Version);
3705 
3706  Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
3707  }
3708  } else {
3709  assert(0 && "Expected a target or target_clones multiversion function");
3710  continue;
3711  }
3712 
3713  llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD);
3714  if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(ResolverConstant))
3715  ResolverConstant = IFunc->getResolver();
3716  llvm::Function *ResolverFunc = cast<llvm::Function>(ResolverConstant);
3717 
3718  ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
3719 
3720  if (supportsCOMDAT())
3721  ResolverFunc->setComdat(
3722  getModule().getOrInsertComdat(ResolverFunc->getName()));
3723 
3724  const TargetInfo &TI = getTarget();
3725  llvm::stable_sort(
3726  Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
3728  return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
3729  });
3730  CodeGenFunction CGF(*this);
3731  CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3732  }
3733 
3734  // Ensure that any additions to the deferred decls list caused by emitting a
3735  // variant are emitted. This can happen when the variant itself is inline and
3736  // calls a function without linkage.
3737  if (!MVFuncsToEmit.empty())
3738  EmitDeferred();
3739 
3740  // Ensure that any additions to the multiversion funcs list from either the
3741  // deferred decls or the multiversion functions themselves are emitted.
3742  if (!MultiVersionFuncs.empty())
3743  emitMultiVersionFunctions();
3744 }
3745 
3746 void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
3747  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3748  assert(FD && "Not a FunctionDecl?");
3749  assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
3750  const auto *DD = FD->getAttr<CPUDispatchAttr>();
3751  assert(DD && "Not a cpu_dispatch Function?");
3752 
3754  llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
3755 
3756  StringRef ResolverName = getMangledName(GD);
3757  UpdateMultiVersionNames(GD, FD, ResolverName);
3758 
3759  llvm::Type *ResolverType;
3760  GlobalDecl ResolverGD;
3761  if (getTarget().supportsIFunc()) {
3762  ResolverType = llvm::FunctionType::get(
3763  llvm::PointerType::get(DeclTy,
3764  getTypes().getTargetAddressSpace(FD->getType())),
3765  false);
3766  }
3767  else {
3768  ResolverType = DeclTy;
3769  ResolverGD = GD;
3770  }
3771 
3772  auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
3773  ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
3774  ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
3775  if (supportsCOMDAT())
3776  ResolverFunc->setComdat(
3777  getModule().getOrInsertComdat(ResolverFunc->getName()));
3778 
3780  const TargetInfo &Target = getTarget();
3781  unsigned Index = 0;
3782  for (const IdentifierInfo *II : DD->cpus()) {
3783  // Get the name of the target function so we can look it up/create it.
3784  std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
3785  getCPUSpecificMangling(*this, II->getName());
3786 
3787  llvm::Constant *Func = GetGlobalValue(MangledName);
3788 
3789  if (!Func) {
3790  GlobalDecl ExistingDecl = Manglings.lookup(MangledName);
3791  if (ExistingDecl.getDecl() &&
3792  ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
3793  EmitGlobalFunctionDefinition(ExistingDecl, nullptr);
3794  Func = GetGlobalValue(MangledName);
3795  } else {
3796  if (!ExistingDecl.getDecl())
3797  ExistingDecl = GD.getWithMultiVersionIndex(Index);
3798 
3799  Func = GetOrCreateLLVMFunction(
3800  MangledName, DeclTy, ExistingDecl,
3801  /*ForVTable=*/false, /*DontDefer=*/true,
3802  /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
3803  }
3804  }
3805 
3807  Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
3808  llvm::transform(Features, Features.begin(),
3809  [](StringRef Str) { return Str.substr(1); });
3810  llvm::erase_if(Features, [&Target](StringRef Feat) {
3811  return !Target.validateCpuSupports(Feat);
3812  });
3813  Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
3814  ++Index;
3815  }
3816 
3817  llvm::stable_sort(
3818  Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
3820  return llvm::X86::getCpuSupportsMask(LHS.Conditions.Features) >
3821  llvm::X86::getCpuSupportsMask(RHS.Conditions.Features);
3822  });
3823 
3824  // If the list contains multiple 'default' versions, such as when it contains
3825  // 'pentium' and 'generic', don't emit the call to the generic one (since we
3826  // always run on at least a 'pentium'). We do this by deleting the 'least
3827  // advanced' (read, lowest mangling letter).
3828  while (Options.size() > 1 &&
3829  llvm::X86::getCpuSupportsMask(
3830  (Options.end() - 2)->Conditions.Features) == 0) {
3831  StringRef LHSName = (Options.end() - 2)->Function->getName();
3832  StringRef RHSName = (Options.end() - 1)->Function->getName();
3833  if (LHSName.compare(RHSName) < 0)
3834  Options.erase(Options.end() - 2);
3835  else
3836  Options.erase(Options.end() - 1);
3837  }
3838 
3839  CodeGenFunction CGF(*this);
3840  CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3841 
3842  if (getTarget().supportsIFunc()) {
3843  llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(*this, GD);
3844  auto *IFunc = cast<llvm::GlobalValue>(GetOrCreateMultiVersionResolver(GD));
3845 
3846  // Fix up function declarations that were created for cpu_specific before
3847  // cpu_dispatch was known
3848  if (!isa<llvm::GlobalIFunc>(IFunc)) {
3849  assert(cast<llvm::Function>(IFunc)->isDeclaration());
3850  auto *GI = llvm::GlobalIFunc::create(DeclTy, 0, Linkage, "", ResolverFunc,
3851  &getModule());
3852  GI->takeName(IFunc);
3853  IFunc->replaceAllUsesWith(GI);
3854  IFunc->eraseFromParent();
3855  IFunc = GI;
3856  }
3857 
3858  std::string AliasName = getMangledNameImpl(
3859  *this, GD, FD, /*OmitMultiVersionMangling=*/true);
3860  llvm::Constant *AliasFunc = GetGlobalValue(AliasName);
3861  if (!AliasFunc) {
3862  auto *GA = llvm::GlobalAlias::create(DeclTy, 0, Linkage, AliasName, IFunc,
3863  &getModule());
3864  SetCommonAttributes(GD, GA);
3865  }
3866  }
3867 }
3868 
3869 /// If a dispatcher for the specified mangled name is not in the module, create
3870 /// and return an llvm Function with the specified type.
3871 llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
3872  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3873  assert(FD && "Not a FunctionDecl?");
3874 
3875  std::string MangledName =
3876  getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
3877 
3878  // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
3879  // a separate resolver).
3880  std::string ResolverName = MangledName;
3881  if (getTarget().supportsIFunc())
3882  ResolverName += ".ifunc";
3883  else if (FD->isTargetMultiVersion())
3884  ResolverName += ".resolver";
3885 
3886  // If the resolver has already been created, just return it.
3887  if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
3888  return ResolverGV;
3889 
3891  llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
3892 
3893  // The resolver needs to be created. For target and target_clones, defer
3894  // creation until the end of the TU.
3896  MultiVersionFuncs.push_back(GD);
3897 
3898  // For cpu_specific, don't create an ifunc yet because we don't know if the
3899  // cpu_dispatch will be emitted in this translation unit.
3900  if (getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion()) {
3901  llvm::Type *ResolverType = llvm::FunctionType::get(
3902  llvm::PointerType::get(DeclTy,
3903  getTypes().getTargetAddressSpace(FD->getType())),
3904  false);
3905  llvm::Constant *Resolver = GetOrCreateLLVMFunction(
3906  MangledName + ".resolver", ResolverType, GlobalDecl{},
3907  /*ForVTable=*/false);
3908  llvm::GlobalIFunc *GIF =
3910  "", Resolver, &getModule());
3911  GIF->setName(ResolverName);
3912  SetCommonAttributes(FD, GIF);
3913 
3914  return GIF;
3915  }
3916 
3917  llvm::Constant *Resolver = GetOrCreateLLVMFunction(
3918  ResolverName, DeclTy, GlobalDecl{}, /*ForVTable=*/false);
3919  assert(isa<llvm::GlobalValue>(Resolver) &&
3920  "Resolver should be created for the first time");
3921  SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
3922  return Resolver;
3923 }
3924 
3925 /// GetOrCreateLLVMFunction - If the specified mangled name is not in the
3926 /// module, create and return an llvm Function with the specified type. If there
3927 /// is something in the module with the specified name, return it potentially
3928 /// bitcasted to the right type.
3929 ///
3930 /// If D is non-null, it specifies a decl that correspond to this. This is used
3931 /// to set the attributes on the function when it is first created.
3932 llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
3933  StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable,
3934  bool DontDefer, bool IsThunk, llvm::AttributeList ExtraAttrs,
3935  ForDefinition_t IsForDefinition) {
3936  const Decl *D = GD.getDecl();
3937 
3938  // Any attempts to use a MultiVersion function should result in retrieving
3939  // the iFunc instead. Name Mangling will handle the rest of the changes.
3940  if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
3941  // For the device mark the function as one that should be emitted.
3942  if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
3943  !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
3944  !DontDefer && !IsForDefinition) {
3945  if (const FunctionDecl *FDDef = FD->getDefinition()) {
3946  GlobalDecl GDDef;
3947  if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
3948  GDDef = GlobalDecl(CD, GD.getCtorType());
3949  else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
3950  GDDef = GlobalDecl(DD, GD.getDtorType());
3951  else
3952  GDDef = GlobalDecl(FDDef);
3953  EmitGlobal(GDDef);
3954  }
3955  }
3956 
3957  if (FD->isMultiVersion()) {
3958  UpdateMultiVersionNames(GD, FD, MangledName);
3959  if (!IsForDefinition)
3960  return GetOrCreateMultiVersionResolver(GD);
3961  }
3962  }
3963 
3964  // Lookup the entry, lazily creating it if necessary.
3965  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
3966  if (Entry) {
3967  if (WeakRefReferences.erase(Entry)) {
3968  const FunctionDecl *FD = cast_or_null<FunctionDecl>(D);
3969  if (FD && !FD->hasAttr<WeakAttr>())
3970  Entry->setLinkage(llvm::Function::ExternalLinkage);
3971  }
3972 
3973  // Handle dropped DLL attributes.
3974  if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
3975  !shouldMapVisibilityToDLLExport(cast_or_null<NamedDecl>(D))) {
3976  Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
3977  setDSOLocal(Entry);
3978  }
3979 
3980  // If there are two attempts to define the same mangled name, issue an
3981  // error.
3982  if (IsForDefinition && !Entry->isDeclaration()) {
3983  GlobalDecl OtherGD;
3984  // Check that GD is not yet in DiagnosedConflictingDefinitions is required
3985  // to make sure that we issue an error only once.
3986  if (lookupRepresentativeDecl(MangledName, OtherGD) &&
3987  (GD.getCanonicalDecl().getDecl() !=
3988  OtherGD.getCanonicalDecl().getDecl()) &&
3989  DiagnosedConflictingDefinitions.insert(GD).second) {
3990  getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
3991  << MangledName;
3992  getDiags().Report(OtherGD.getDecl()->getLocation(),
3993  diag::note_previous_definition);
3994  }
3995  }
3996 
3997  if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
3998  (Entry->getValueType() == Ty)) {
3999  return Entry;
4000  }
4001 
4002  // Make sure the result is of the correct type.
4003  // (If function is requested for a definition, we always need to create a new
4004  // function, not just return a bitcast.)
4005  if (!IsForDefinition)
4006  return llvm::ConstantExpr::getBitCast(
4007  Entry, Ty->getPointerTo(Entry->getAddressSpace()));
4008  }
4009 
4010  // This function doesn't have a complete type (for example, the return
4011  // type is an incomplete struct). Use a fake type instead, and make
4012  // sure not to try to set attributes.
4013  bool IsIncompleteFunction = false;
4014 
4015  llvm::FunctionType *FTy;
4016  if (isa<llvm::FunctionType>(Ty)) {
4017  FTy = cast<llvm::FunctionType>(Ty);
4018  } else {
4019  FTy = llvm::FunctionType::get(VoidTy, false);
4020  IsIncompleteFunction = true;
4021  }
4022 
4023  llvm::Function *F =
4024  llvm::Function::Create(FTy, llvm::Function::ExternalLinkage,
4025  Entry ? StringRef() : MangledName, &getModule());
4026 
4027  // If we already created a function with the same mangled name (but different
4028  // type) before, take its name and add it to the list of functions to be
4029  // replaced with F at the end of CodeGen.
4030  //
4031  // This happens if there is a prototype for a function (e.g. "int f()") and
4032  // then a definition of a different type (e.g. "int f(int x)").
4033  if (Entry) {
4034  F->takeName(Entry);
4035 
4036  // This might be an implementation of a function without a prototype, in
4037  // which case, try to do special replacement of calls which match the new
4038  // prototype. The really key thing here is that we also potentially drop
4039  // arguments from the call site so as to make a direct call, which makes the
4040  // inliner happier and suppresses a number of optimizer warnings (!) about
4041  // dropping arguments.
4042  if (!Entry->use_empty()) {
4044  Entry->removeDeadConstantUsers();
4045  }
4046 
4047  llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
4048  F, Entry->getValueType()->getPointerTo());
4049  addGlobalValReplacement(Entry, BC);
4050  }
4051 
4052  assert(F->getName() == MangledName && "name was uniqued!");
4053  if (D)
4054  SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
4055  if (ExtraAttrs.hasFnAttrs()) {
4056  llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
4057  F->addFnAttrs(B);
4058  }
4059 
4060  if (!DontDefer) {
4061  // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
4062  // each other bottoming out with the base dtor. Therefore we emit non-base
4063  // dtors on usage, even if there is no dtor definition in the TU.
4064  if (isa_and_nonnull<CXXDestructorDecl>(D) &&
4065  getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
4066  GD.getDtorType()))
4067  addDeferredDeclToEmit(GD);
4068 
4069  // This is the first use or definition of a mangled name. If there is a
4070  // deferred decl with this name, remember that we need to emit it at the end
4071  // of the file.
4072  auto DDI = DeferredDecls.find(MangledName);
4073  if (DDI != DeferredDecls.end()) {
4074  // Move the potentially referenced deferred decl to the
4075  // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
4076  // don't need it anymore).
4077  addDeferredDeclToEmit(DDI->second);
4078  EmittedDeferredDecls[DDI->first] = DDI->second;
4079  DeferredDecls.erase(DDI);
4080 
4081  // Otherwise, there are cases we have to worry about where we're
4082  // using a declaration for which we must emit a definition but where
4083  // we might not find a top-level definition:
4084  // - member functions defined inline in their classes
4085  // - friend functions defined inline in some class
4086  // - special member functions with implicit definitions
4087  // If we ever change our AST traversal to walk into class methods,
4088  // this will be unnecessary.
4089  //
4090  // We also don't emit a definition for a function if it's going to be an
4091  // entry in a vtable, unless it's already marked as used.
4092  } else if (getLangOpts().CPlusPlus && D) {
4093  // Look for a declaration that's lexically in a record.
4094  for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
4095  FD = FD->getPreviousDecl()) {
4096  if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
4097  if (FD->doesThisDeclarationHaveABody()) {
4098  addDeferredDeclToEmit(GD.getWithDecl(FD));
4099  break;
4100  }
4101  }
4102  }
4103  }
4104  }
4105 
4106  // Make sure the result is of the requested type.
4107  if (!IsIncompleteFunction) {
4108  assert(F->getFunctionType() == Ty);
4109  return F;
4110  }
4111 
4112  llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
4113  return llvm::ConstantExpr::getBitCast(F, PTy);
4114 }
4115 
4116 /// GetAddrOfFunction - Return the address of the given function. If Ty is
4117 /// non-null, then this function will use the specified type if it has to
4118 /// create it (this occurs when we see a definition of the function).
4120  llvm::Type *Ty,
4121  bool ForVTable,
4122  bool DontDefer,
4123  ForDefinition_t IsForDefinition) {
4124  assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
4125  "consteval function should never be emitted");
4126  // If there was no specific requested type, just convert it now.
4127  if (!Ty) {
4128  const auto *FD = cast<FunctionDecl>(GD.getDecl());
4129  Ty = getTypes().ConvertType(FD->getType());
4130  }
4131 
4132  // Devirtualized destructor calls may come through here instead of via
4133  // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
4134  // of the complete destructor when necessary.
4135  if (const auto *DD = dyn_cast<CXXDestructorDecl>(GD.getDecl())) {
4136  if (getTarget().getCXXABI().isMicrosoft() &&
4137  GD.getDtorType() == Dtor_Complete &&
4138  DD->getParent()->getNumVBases() == 0)
4139  GD = GlobalDecl(DD, Dtor_Base);
4140  }
4141 
4142  StringRef MangledName = getMangledName(GD);
4143  auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
4144  /*IsThunk=*/false, llvm::AttributeList(),
4145  IsForDefinition);
4146  // Returns kernel handle for HIP kernel stub function.
4147  if (LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
4148  cast<FunctionDecl>(GD.getDecl())->hasAttr<CUDAGlobalAttr>()) {
4149  auto *Handle = getCUDARuntime().getKernelHandle(
4150  cast<llvm::Function>(F->stripPointerCasts()), GD);
4151  if (IsForDefinition)
4152  return F;
4153  return llvm::ConstantExpr::getBitCast(Handle, Ty->getPointerTo());
4154  }
4155  return F;
4156 }
4157 
4159  llvm::GlobalValue *F =
4160  cast<llvm::GlobalValue>(GetAddrOfFunction(Decl)->stripPointerCasts());
4161 
4162  return llvm::ConstantExpr::getBitCast(llvm::NoCFIValue::get(F),
4163  llvm::Type::getInt8PtrTy(VMContext));
4164 }
4165 
4166 static const FunctionDecl *
4167 GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
4168  TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
4170 
4171  IdentifierInfo &CII = C.Idents.get(Name);
4172  for (const auto *Result : DC->lookup(&CII))
4173  if (const auto *FD = dyn_cast<FunctionDecl>(Result))
4174  return FD;
4175 
4176  if (!C.getLangOpts().CPlusPlus)
4177  return nullptr;
4178 
4179  // Demangle the premangled name from getTerminateFn()
4180  IdentifierInfo &CXXII =
4181  (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
4182  ? C.Idents.get("terminate")
4183  : C.Idents.get(Name);
4184 
4185  for (const auto &N : {"__cxxabiv1", "std"}) {
4186  IdentifierInfo &NS = C.Idents.get(N);
4187  for (const auto *Result : DC->lookup(&NS)) {
4188  const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
4189  if (auto *LSD = dyn_cast<LinkageSpecDecl>(Result))
4190  for (const auto *Result : LSD->lookup(&NS))
4191  if ((ND = dyn_cast<NamespaceDecl>(Result)))
4192  break;
4193 
4194  if (ND)
4195  for (const auto *Result : ND->lookup(&CXXII))
4196  if (const auto *FD = dyn_cast<FunctionDecl>(Result))
4197  return FD;
4198  }
4199  }
4200 
4201  return nullptr;
4202 }
4203 
4204 /// CreateRuntimeFunction - Create a new runtime function with the specified
4205 /// type and name.
4206 llvm::FunctionCallee
4207 CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
4208  llvm::AttributeList ExtraAttrs, bool Local,
4209  bool AssumeConvergent) {
4210  if (AssumeConvergent) {
4211  ExtraAttrs =
4212  ExtraAttrs.addFnAttribute(VMContext, llvm::Attribute::Convergent);
4213  }
4214 
4215  llvm::Constant *C =
4216  GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
4217  /*DontDefer=*/false, /*IsThunk=*/false,
4218  ExtraAttrs);
4219 
4220  if (auto *F = dyn_cast<llvm::Function>(C)) {
4221  if (F->empty()) {
4222  F->setCallingConv(getRuntimeCC());
4223 
4224  // In Windows Itanium environments, try to mark runtime functions
4225  // dllimport. For Mingw and MSVC, don't. We don't really know if the user
4226  // will link their standard library statically or dynamically. Marking
4227  // functions imported when they are not imported can cause linker errors
4228  // and warnings.
4229  if (!Local && getTriple().isWindowsItaniumEnvironment() &&
4230  !getCodeGenOpts().LTOVisibilityPublicStd) {
4231  const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
4232  if (!FD || FD->hasAttr<DLLImportAttr>()) {
4233  F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
4234  F->setLinkage(llvm::GlobalValue::ExternalLinkage);
4235  }
4236  }
4237  setDSOLocal(F);
4238  }
4239  }
4240 
4241  return {FTy, C};
4242 }
4243 
4244 /// isTypeConstant - Determine whether an object of this type can be emitted
4245 /// as a constant.
4246 ///
4247 /// If ExcludeCtor is true, the duration when the object's constructor runs
4248 /// will not be considered. The caller will need to verify that the object is
4249 /// not written to during its construction.
4250 bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
4251  if (!Ty.isConstant(Context) && !Ty->isReferenceType())
4252  return false;
4253 
4254  if (Context.getLangOpts().CPlusPlus) {
4255  if (const CXXRecordDecl *Record
4256  = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
4257  return ExcludeCtor && !Record->hasMutableFields() &&
4258  Record->hasTrivialDestructor();
4259  }
4260 
4261  return true;
4262 }
4263 
4264 /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
4265 /// create and return an llvm GlobalVariable with the specified type and address
4266 /// space. If there is something in the module with the specified name, return
4267 /// it potentially bitcasted to the right type.
4268 ///
4269 /// If D is non-null, it specifies a decl that correspond to this. This is used
4270 /// to set the attributes on the global when it is first created.
4271 ///
4272 /// If IsForDefinition is true, it is guaranteed that an actual global with
4273 /// type Ty will be returned, not conversion of a variable with the same
4274 /// mangled name but some other type.
4275 llvm::Constant *
4276 CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
4277  LangAS AddrSpace, const VarDecl *D,
4278  ForDefinition_t IsForDefinition) {
4279  // Lookup the entry, lazily creating it if necessary.
4280  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4281  unsigned TargetAS = getContext().getTargetAddressSpace(AddrSpace);
4282  if (Entry) {
4283  if (WeakRefReferences.erase(Entry)) {
4284  if (D && !D->hasAttr<WeakAttr>())
4285  Entry->setLinkage(llvm::Function::ExternalLinkage);
4286  }
4287 
4288  // Handle dropped DLL attributes.
4289  if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
4291  Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
4292 
4293  if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
4295 
4296  if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS)
4297  return Entry;
4298 
4299  // If there are two attempts to define the same mangled name, issue an
4300  // error.
4301  if (IsForDefinition && !Entry->isDeclaration()) {
4302  GlobalDecl OtherGD;
4303  const VarDecl *OtherD;
4304 
4305  // Check that D is not yet in DiagnosedConflictingDefinitions is required
4306  // to make sure that we issue an error only once.
4307  if (D && lookupRepresentativeDecl(MangledName, OtherGD) &&
4308  (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) &&
4309  (OtherD = dyn_cast<VarDecl>(OtherGD.getDecl())) &&
4310  OtherD->hasInit() &&
4311  DiagnosedConflictingDefinitions.insert(D).second) {
4312  getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
4313  << MangledName;
4314  getDiags().Report(OtherGD.getDecl()->getLocation(),
4315  diag::note_previous_definition);
4316  }
4317  }
4318 
4319  // Make sure the result is of the correct type.
4320  if (Entry->getType()->getAddressSpace() != TargetAS) {
4321  return llvm::ConstantExpr::getAddrSpaceCast(Entry,
4322  Ty->getPointerTo(TargetAS));
4323  }
4324 
4325  // (If global is requested for a definition, we always need to create a new
4326  // global, not just return a bitcast.)
4327  if (!IsForDefinition)
4328  return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo(TargetAS));
4329  }
4330 
4331  auto DAddrSpace = GetGlobalVarAddressSpace(D);
4332 
4333  auto *GV = new llvm::GlobalVariable(
4334  getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr,
4335  MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal,
4336  getContext().getTargetAddressSpace(DAddrSpace));
4337 
4338  // If we already created a global with the same mangled name (but different
4339  // type) before, take its name and remove it from its parent.
4340  if (Entry) {
4341  GV->takeName(Entry);
4342 
4343  if (!Entry->use_empty()) {
4344  llvm::Constant *NewPtrForOldDecl =
4345  llvm::ConstantExpr::getBitCast(GV, Entry->getType());
4346  Entry->replaceAllUsesWith(NewPtrForOldDecl);
4347  }
4348 
4349  Entry->eraseFromParent();
4350  }
4351 
4352  // This is the first use or definition of a mangled name. If there is a
4353  // deferred decl with this name, remember that we need to emit it at the end
4354  // of the file.
4355  auto DDI = DeferredDecls.find(MangledName);
4356  if (DDI != DeferredDecls.end()) {
4357  // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
4358  // list, and remove it from DeferredDecls (since we don't need it anymore).
4359  addDeferredDeclToEmit(DDI->second);
4360  EmittedDeferredDecls[DDI->first] = DDI->second;
4361  DeferredDecls.erase(DDI);
4362  }
4363 
4364  // Handle things which are present even on external declarations.
4365  if (D) {
4366  if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
4368 
4369  // FIXME: This code is overly simple and should be merged with other global
4370  // handling.
4371  GV->setConstant(isTypeConstant(D->getType(), false));
4372 
4373  GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
4374 
4375  setLinkageForGV(GV, D);
4376 
4377  if (D->getTLSKind()) {
4378  if (D->getTLSKind() == VarDecl::TLS_Dynamic)
4379  CXXThreadLocals.push_back(D);
4380  setTLSMode(GV, *D);
4381  }
4382 
4383  setGVProperties(GV, D);
4384 
4385  // If required by the ABI, treat declarations of static data members with
4386  // inline initializers as definitions.
4387  if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
4388  EmitGlobalVarDefinition(D);
4389  }
4390 
4391  // Emit section information for extern variables.
4392  if (D->hasExternalStorage()) {
4393  if (const SectionAttr *SA = D->getAttr<SectionAttr>())
4394  GV->setSection(SA->getName());
4395  }
4396 
4397  // Handle XCore specific ABI requirements.
4398  if (getTriple().getArch() == llvm::Triple::xcore &&
4400  D->getType().isConstant(Context) &&
4402  GV->setSection(".cp.rodata");
4403 
4404  // Check if we a have a const declaration with an initializer, we may be
4405  // able to emit it as available_externally to expose it's value to the
4406  // optimizer.
4407  if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() &&
4408  D->getType().isConstQualified() && !GV->hasInitializer() &&
4409  !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) {
4410  const auto *Record =
4412  bool HasMutableFields = Record && Record->hasMutableFields();
4413  if (!HasMutableFields) {
4414  const VarDecl *InitDecl;
4415  const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4416  if (InitExpr) {
4417  ConstantEmitter emitter(*this);
4418  llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
4419  if (Init) {
4420  auto *InitType = Init->getType();
4421  if (GV->getValueType() != InitType) {
4422  // The type of the initializer does not match the definition.
4423  // This happens when an initializer has a different type from
4424  // the type of the global (because of padding at the end of a
4425  // structure for instance).
4426  GV->setName(StringRef());
4427  // Make a new global with the correct type, this is now guaranteed
4428  // to work.
4429  auto *NewGV = cast<llvm::GlobalVariable>(
4430  GetAddrOfGlobalVar(D, InitType, IsForDefinition)
4431  ->stripPointerCasts());
4432 
4433  // Erase the old global, since it is no longer used.
4434  GV->eraseFromParent();
4435  GV = NewGV;
4436  } else {
4437  GV->setInitializer(Init);
4438  GV->setConstant(true);
4439  GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
4440  }
4441  emitter.finalize(GV);
4442  }
4443  }
4444  }
4445  }
4446  }
4447 
4448  if (GV->isDeclaration()) {
4449  getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
4450  // External HIP managed variables needed to be recorded for transformation
4451  // in both device and host compilations.
4452  if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() &&
4453  D->hasExternalStorage())
4455  }
4456 
4457  if (D)
4458  SanitizerMD->reportGlobal(GV, *D);
4459 
4460  LangAS ExpectedAS =
4461  D ? D->getType().getAddressSpace()
4462  : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
4463  assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS);
4464  if (DAddrSpace != ExpectedAS) {
4466  *this, GV, DAddrSpace, ExpectedAS, Ty->getPointerTo(TargetAS));
4467  }
4468 
4469  return GV;
4470 }
4471 
4472 llvm::Constant *
4474  const Decl *D = GD.getDecl();
4475 
4476  if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
4477  return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
4478  /*DontDefer=*/false, IsForDefinition);
4479 
4480  if (isa<CXXMethodDecl>(D)) {
4481  auto FInfo =
4482  &getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(D));
4483  auto Ty = getTypes().GetFunctionType(*FInfo);
4484  return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
4485  IsForDefinition);
4486  }
4487 
4488  if (isa<FunctionDecl>(D)) {
4490  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
4491  return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
4492  IsForDefinition);
4493  }
4494 
4495  return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr, IsForDefinition);
4496 }
4497 
4499  StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
4500  unsigned Alignment) {
4501  llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
4502  llvm::GlobalVariable *OldGV = nullptr;
4503 
4504  if (GV) {
4505  // Check if the variable has the right type.
4506  if (GV->getValueType() == Ty)
4507  return GV;
4508 
4509  // Because C++ name mangling, the only way we can end up with an already
4510  // existing global with the same name is if it has been declared extern "C".
4511  assert(GV->isDeclaration() && "Declaration has wrong type!");
4512  OldGV = GV;
4513  }
4514 
4515  // Create a new variable.
4516  GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
4517  Linkage, nullptr, Name);
4518 
4519  if (OldGV) {
4520  // Replace occurrences of the old variable if needed.
4521  GV->takeName(OldGV);
4522 
4523  if (!OldGV->use_empty()) {
4524  llvm::Constant *NewPtrForOldDecl =
4525  llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
4526  OldGV->replaceAllUsesWith(NewPtrForOldDecl);
4527  }
4528 
4529  OldGV->eraseFromParent();
4530  }
4531 
4532  if (supportsCOMDAT() && GV->isWeakForLinker() &&
4533  !GV->hasAvailableExternallyLinkage())
4534  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
4535 
4536  GV->setAlignment(llvm::MaybeAlign(Alignment));
4537 
4538  return GV;
4539 }
4540 
4541 /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
4542 /// given global variable. If Ty is non-null and if the global doesn't exist,
4543 /// then it will be created with the specified type instead of whatever the
4544 /// normal requested type would be. If IsForDefinition is true, it is guaranteed
4545 /// that an actual global with type Ty will be returned, not conversion of a
4546 /// variable with the same mangled name but some other type.
4548  llvm::Type *Ty,
4549  ForDefinition_t IsForDefinition) {
4550  assert(D->hasGlobalStorage() && "Not a global variable");
4551  QualType ASTTy = D->getType();
4552  if (!Ty)
4553  Ty = getTypes().ConvertTypeForMem(ASTTy);
4554 
4555  StringRef MangledName = getMangledName(D);
4556  return GetOrCreateLLVMGlobal(MangledName, Ty, ASTTy.getAddressSpace(), D,
4557  IsForDefinition);
4558 }
4559 
4560 /// CreateRuntimeVariable - Create a new runtime global variable with the
4561 /// specified type and name.
4562 llvm::Constant *
4564  StringRef Name) {
4565  LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global
4566  : LangAS::Default;
4567  auto *Ret = GetOrCreateLLVMGlobal(Name, Ty, AddrSpace, nullptr);
4568  setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
4569  return Ret;
4570 }
4571 
4573  assert(!D->getInit() && "Cannot emit definite definitions here!");
4574 
4575  StringRef MangledName = getMangledName(D);
4576  llvm::GlobalValue *GV = GetGlobalValue(MangledName);
4577 
4578  // We already have a definition, not declaration, with the same mangled name.
4579  // Emitting of declaration is not required (and actually overwrites emitted
4580  // definition).
4581  if (GV && !GV->isDeclaration())
4582  return;
4583 
4584  // If we have not seen a reference to this variable yet, place it into the
4585  // deferred declarations table to be emitted if needed later.
4586  if (!MustBeEmitted(D) && !GV) {
4587  DeferredDecls[MangledName] = D;
4588  return;
4589  }
4590 
4591  // The tentative definition is the only definition.
4592  EmitGlobalVarDefinition(D);
4593 }
4594 
4596  EmitExternalVarDeclaration(D);
4597 }
4598 
4600  return Context.toCharUnitsFromBits(
4601  getDataLayout().getTypeStoreSizeInBits(Ty));
4602 }
4603 
4605  if (LangOpts.OpenCL) {
4607  assert(AS == LangAS::opencl_global ||
4610  AS == LangAS::opencl_constant ||
4611  AS == LangAS::opencl_local ||
4613  return AS;
4614  }
4615 
4616  if (LangOpts.SYCLIsDevice &&
4617  (!D || D->getType().getAddressSpace() == LangAS::Default))
4618  return LangAS::sycl_global;
4619 
4620  if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
4621  if (D && D->hasAttr<CUDAConstantAttr>())
4622  return LangAS::cuda_constant;
4623  else if (D && D->hasAttr<CUDASharedAttr>())
4624  return LangAS::cuda_shared;
4625  else if (D && D->hasAttr<CUDADeviceAttr>())
4626  return LangAS::cuda_device;
4627  else if (D && D->getType().isConstQualified())
4628  return LangAS::cuda_constant;
4629  else
4630  return LangAS::cuda_device;
4631  }
4632 
4633  if (LangOpts.OpenMP) {
4634  LangAS AS;
4635  if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(D, AS))
4636  return AS;
4637  }
4639 }
4640 
4642  // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
4643  if (LangOpts.OpenCL)
4644  return LangAS::opencl_constant;
4645  if (LangOpts.SYCLIsDevice)
4646  return LangAS::sycl_global;
4647  if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV())
4648  // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V)
4649  // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up
4650  // with OpVariable instructions with Generic storage class which is not
4651  // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V
4652  // UniformConstant storage class is not viable as pointers to it may not be
4653  // casted to Generic pointers which are used to model HIP's "flat" pointers.
4654  return LangAS::cuda_device;
4655  if (auto AS = getTarget().getConstantAddressSpace())
4656  return *AS;
4657  return LangAS::Default;
4658 }
4659 
4660 // In address space agnostic languages, string literals are in default address
4661 // space in AST. However, certain targets (e.g. amdgcn) request them to be
4662 // emitted in constant address space in LLVM IR. To be consistent with other
4663 // parts of AST, string literal global variables in constant address space
4664 // need to be casted to default address space before being put into address
4665 // map and referenced by other part of CodeGen.
4666 // In OpenCL, string literals are in constant address space in AST, therefore
4667 // they should not be casted to default address space.
4668 static llvm::Constant *
4670  llvm::GlobalVariable *GV) {
4671  llvm::Constant *Cast = GV;
4672  if (!CGM.getLangOpts().OpenCL) {
4673  auto AS = CGM.GetGlobalConstantAddressSpace();
4674  if (AS != LangAS::Default)
4676  CGM, GV, AS, LangAS::Default,
4677  GV->getValueType()->getPointerTo(
4679  }
4680  return Cast;
4681 }
4682 
4683 template<typename SomeDecl>
4685  llvm::GlobalValue *GV) {
4686  if (!getLangOpts().CPlusPlus)
4687  return;
4688 
4689  // Must have 'used' attribute, or else inline assembly can't rely on
4690  // the name existing.
4691  if (!D->template hasAttr<UsedAttr>())
4692  return;
4693 
4694  // Must have internal linkage and an ordinary name.
4695  if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
4696  return;
4697 
4698  // Must be in an extern "C" context. Entities declared directly within
4699  // a record are not extern "C" even if the record is in such a context.
4700  const SomeDecl *First = D->getFirstDecl();
4701  if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
4702  return;
4703 
4704  // OK, this is an internal linkage entity inside an extern "C" linkage
4705  // specification. Make a note of that so we can give it the "expected"
4706  // mangled name if nothing else is using that name.
4707  std::pair<StaticExternCMap::iterator, bool> R =
4708  StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
4709 
4710  // If we have multiple internal linkage entities with the same name
4711  // in extern "C" regions, none of them gets that name.
4712  if (!R.second)
4713  R.first->second = nullptr;
4714 }
4715 
4716 static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
4717  if (!CGM.supportsCOMDAT())
4718  return false;
4719 
4720  if (D.hasAttr<SelectAnyAttr>())
4721  return true;
4722 
4724  if (auto *VD = dyn_cast<VarDecl>(&D))
4726  else
4727  Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
4728 
4729  switch (Linkage) {
4730  case GVA_Internal:
4732  case GVA_StrongExternal:
4733  return false;
4734  case GVA_DiscardableODR:
4735  case GVA_StrongODR:
4736  return true;
4737  }
4738  llvm_unreachable("No such linkage");
4739 }
4740 
4742  llvm::GlobalObject &GO) {
4743  if (!shouldBeInCOMDAT(*this, D))
4744  return;
4745  GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
4746 }
4747 
4748 /// Pass IsTentative as true if you want to create a tentative definition.
4749 void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
4750  bool IsTentative) {
4751  // OpenCL global variables of sampler type are translated to function calls,
4752  // therefore no need to be translated.
4753  QualType ASTTy = D->getType();
4754  if (getLangOpts().OpenCL && ASTTy->isSamplerT())
4755  return;
4756 
4757  // If this is OpenMP device, check if it is legal to emit this global
4758  // normally.
4759  if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
4760  OpenMPRuntime->emitTargetGlobalVariable(D))
4761  return;
4762 
4763  llvm::TrackingVH<llvm::Constant> Init;
4764  bool NeedsGlobalCtor = false;
4765  bool NeedsGlobalDtor =
4767 
4768  const VarDecl *InitDecl;
4769  const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4770 
4771  Optional<ConstantEmitter> emitter;
4772 
4773  // CUDA E.2.4.1 "__shared__ variables cannot have an initialization
4774  // as part of their declaration." Sema has already checked for
4775  // error cases, so we just need to set Init to UndefValue.
4776  bool IsCUDASharedVar =
4777  getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
4778  // Shadows of initialized device-side global variables are also left
4779  // undefined.
4780  // Managed Variables should be initialized on both host side and device side.
4781  bool IsCUDAShadowVar =
4782  !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
4783  (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
4784  D->hasAttr<CUDASharedAttr>());
4785  bool IsCUDADeviceShadowVar =
4786  getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
4789  if (getLangOpts().CUDA &&
4790  (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
4791  Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
4792  else if (D->hasAttr<LoaderUninitializedAttr>())
4793  Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
4794  else if (!InitExpr) {
4795  // This is a tentative definition; tentative definitions are
4796  // implicitly initialized with { 0 }.
4797  //
4798  // Note that tentative definitions are only emitted at the end of
4799  // a translation unit, so they should never have incomplete
4800  // type. In addition, EmitTentativeDefinition makes sure that we
4801  // never attempt to emit a tentative definition if a real one
4802  // exists. A use may still exists, however, so we still may need
4803  // to do a RAUW.
4804  assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
4805  Init = EmitNullConstant(D->getType());
4806  } else {
4807  initializedGlobalDecl = GlobalDecl(D);
4808  emitter.emplace(*this);
4809  llvm::Constant *Initializer = emitter->tryEmitForInitializer(*InitDecl);
4810  if (!Initializer) {
4811  QualType T = InitExpr->getType();
4812  if (D->getType()->isReferenceType())
4813  T = D->getType();
4814 
4815  if (getLangOpts().CPlusPlus) {
4816  if (InitDecl->hasFlexibleArrayInit(getContext()))
4817  ErrorUnsupported(D, "flexible array initializer");
4818  Init = EmitNullConstant(T);
4819  NeedsGlobalCtor = true;
4820  } else {
4821  ErrorUnsupported(D, "static initializer");
4822  Init = llvm::UndefValue::get(getTypes().ConvertType(T));
4823  }
4824  } else {
4825  Init = Initializer;
4826  // We don't need an initializer, so remove the entry for the delayed
4827  // initializer position (just in case this entry was delayed) if we
4828  // also don't need to register a destructor.
4829  if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
4830  DelayedCXXInitPosition.erase(D);
4831 
4832 #ifndef NDEBUG
4833  CharUnits VarSize = getContext().getTypeSizeInChars(ASTTy) +
4836  getDataLayout().getTypeAllocSize(Init->getType()));
4837  assert(VarSize == CstSize && "Emitted constant has unexpected size");
4838 #endif
4839  }
4840  }
4841 
4842  llvm::Type* InitType = Init->getType();
4843  llvm::Constant *Entry =
4844  GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative));
4845 
4846  // Strip off pointer casts if we got them.
4847  Entry = Entry->stripPointerCasts();
4848 
4849  // Entry is now either a Function or GlobalVariable.
4850  auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
4851 
4852  // We have a definition after a declaration with the wrong type.
4853  // We must make a new GlobalVariable* and update everything that used OldGV
4854  // (a declaration or tentative definition) with the new GlobalVariable*
4855  // (which will be a definition).
4856  //
4857  // This happens if there is a prototype for a global (e.g.
4858  // "extern int x[];") and then a definition of a different type (e.g.
4859  // "int x[10];"). This also happens when an initializer has a different type
4860  // from the type of the global (this happens with unions).
4861  if (!GV || GV->getValueType() != InitType ||
4862  GV->getType()->getAddressSpace() !=
4863  getContext().getTargetAddressSpace(GetGlobalVarAddressSpace(D))) {
4864 
4865  // Move the old entry aside so that we'll create a new one.
4866  Entry->setName(StringRef());
4867 
4868  // Make a new global with the correct type, this is now guaranteed to work.
4869  GV = cast<llvm::GlobalVariable>(
4870  GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative))
4871  ->stripPointerCasts());
4872 
4873  // Replace all uses of the old global with the new global
4874  llvm::Constant *NewPtrForOldDecl =
4875  llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
4876  Entry->getType());
4877  Entry->replaceAllUsesWith(NewPtrForOldDecl);
4878 
4879  // Erase the old global, since it is no longer used.
4880  cast<llvm::GlobalValue>(Entry)->eraseFromParent();
4881  }
4882 
4884 
4885  if (D->hasAttr<AnnotateAttr>())
4886  AddGlobalAnnotations(D, GV);
4887 
4888  // Set the llvm linkage type as appropriate.
4889  llvm::GlobalValue::LinkageTypes Linkage =
4890  getLLVMLinkageVarDefinition(D, GV->isConstant());
4891 
4892  // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
4893  // the device. [...]"
4894  // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
4895  // __device__, declares a variable that: [...]
4896  // Is accessible from all the threads within the grid and from the host
4897  // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
4898  // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
4899  if (GV && LangOpts.CUDA) {
4900  if (LangOpts.CUDAIsDevice) {
4902  (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
4905  GV->setExternallyInitialized(true);
4906  } else {
4908  }
4910  }
4911 
4912  GV->setInitializer(Init);
4913  if (emitter)
4914  emitter->finalize(GV);
4915 
4916  // If it is safe to mark the global 'constant', do so now.
4917  GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
4918  isTypeConstant(D->getType(), true));
4919 
4920  // If it is in a read-only section, mark it 'constant'.
4921  if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
4922  const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
4923  if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
4924  GV->setConstant(true);
4925  }
4926 
4927  CharUnits AlignVal = getContext().getDeclAlign(D);
4928  // Check for alignment specifed in an 'omp allocate' directive.
4929  if (llvm::Optional<CharUnits> AlignValFromAllocate =
4931  AlignVal = *AlignValFromAllocate;
4932  GV->setAlignment(AlignVal.getAsAlign());
4933 
4934  // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
4935  // function is only defined alongside the variable, not also alongside
4936  // callers. Normally, all accesses to a thread_local go through the
4937  // thread-wrapper in order to ensure initialization has occurred, underlying
4938  // variable will never be used other than the thread-wrapper, so it can be
4939  // converted to internal linkage.
4940  //
4941  // However, if the variable has the 'constinit' attribute, it _can_ be
4942  // referenced directly, without calling the thread-wrapper, so the linkage
4943  // must not be changed.
4944  //
4945  // Additionally, if the variable isn't plain external linkage, e.g. if it's
4946  // weak or linkonce, the de-duplication semantics are important to preserve,
4947  // so we don't change the linkage.
4948  if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
4950  Context.getTargetInfo().getTriple().isOSDarwin() &&
4951  !D->hasAttr<ConstInitAttr>())
4953 
4954  GV->setLinkage(Linkage);
4955  if (D->hasAttr<DLLImportAttr>())
4956  GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
4957  else if (D->hasAttr<DLLExportAttr>())
4958  GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
4959  else
4960  GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
4961 
4962  if (Linkage == llvm::GlobalVariable::CommonLinkage) {
4963  // common vars aren't constant even if declared const.
4964  GV->setConstant(false);
4965  // Tentative definition of global variables may be initialized with
4966  // non-zero null pointers. In this case they should have weak linkage
4967  // since common linkage must have zero initializer and must not have
4968  // explicit section therefore cannot have non-zero initial value.
4969  if (!GV->getInitializer()->isNullValue())
4970  GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
4971  }
4972 
4973  setNonAliasAttributes(D, GV);
4974 
4975  if (D->getTLSKind() && !GV->isThreadLocal()) {
4976  if (D->getTLSKind() == VarDecl::TLS_Dynamic)
4977  CXXThreadLocals.push_back(D);
4978  setTLSMode(GV, *D);
4979  }
4980 
4981  maybeSetTrivialComdat(*D, *GV);
4982 
4983  // Emit the initializer function if necessary.
4984  if (NeedsGlobalCtor || NeedsGlobalDtor)
4985  EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
4986 
4987  SanitizerMD->reportGlobal(GV, *D, NeedsGlobalCtor);
4988 
4989  // Emit global variable debug information.
4990  if (CGDebugInfo *DI = getModuleDebugInfo())
4991  if (getCodeGenOpts().hasReducedDebugInfo())
4992  DI->EmitGlobalVariable(GV, D);
4993 }
4994 
4995 void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) {
4996  if (CGDebugInfo *DI = getModuleDebugInfo())
4997  if (getCodeGenOpts().hasReducedDebugInfo()) {
4998  QualType ASTTy = D->getType();
4999  llvm::Type *Ty = getTypes().ConvertTypeForMem(D->getType());
5000  llvm::Constant *GV =
5001  GetOrCreateLLVMGlobal(D->getName(), Ty, ASTTy.getAddressSpace(), D);
5002  DI->EmitExternalVariable(
5003  cast<llvm::GlobalVariable>(GV->stripPointerCasts()), D);
5004  }
5005 }
5006 
5007 static bool isVarDeclStrongDefinition(const ASTContext &Context,
5008  CodeGenModule &CGM, const VarDecl *D,
5009  bool NoCommon) {
5010  // Don't give variables common linkage if -fno-common was specified unless it
5011  // was overridden by a NoCommon attribute.
5012  if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
5013  return true;
5014 
5015  // C11 6.9.2/2:
5016  // A declaration of an identifier for an object that has file scope without
5017  // an initializer, and without a storage-class specifier or with the
5018  // storage-class specifier static, constitutes a tentative definition.
5019  if (D->getInit() || D->hasExternalStorage())
5020  return true;
5021 
5022  // A variable cannot be both common and exist in a section.
5023  if (D->hasAttr<SectionAttr>())
5024  return true;
5025 
5026  // A variable cannot be both common and exist in a section.
5027  // We don't try to determine which is the right section in the front-end.
5028  // If no specialized section name is applicable, it will resort to default.
5029  if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
5030  D->hasAttr<PragmaClangDataSectionAttr>() ||
5031  D->hasAttr<PragmaClangRelroSectionAttr>() ||
5032  D->hasAttr<PragmaClangRodataSectionAttr>())
5033  return true;
5034 
5035  // Thread local vars aren't considered common linkage.
5036  if (D->getTLSKind())
5037  return true;
5038 
5039  // Tentative definitions marked with WeakImportAttr are true definitions.
5040  if (D->hasAttr<WeakImportAttr>())
5041  return true;
5042 
5043  // A variable cannot be both common and exist in a comdat.
5044  if (shouldBeInCOMDAT(CGM, *D))
5045  return true;
5046 
5047  // Declarations with a required alignment do not have common linkage in MSVC
5048  // mode.
5049  if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
5050  if (D->hasAttr<AlignedAttr>())
5051  return true;
5052  QualType VarType = D->getType();
5053  if (Context.isAlignmentRequired(VarType))
5054  return true;
5055 
5056  if (const auto *RT = VarType->getAs<RecordType>()) {
5057  const RecordDecl *RD = RT->getDecl();
5058  for (const FieldDecl *FD : RD->fields()) {
5059  if (FD->isBitField())
5060  continue;
5061  if (FD->hasAttr<AlignedAttr>())
5062  return true;
5063  if (Context.isAlignmentRequired(FD->getType()))
5064  return true;
5065  }
5066  }
5067  }
5068 
5069  // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
5070  // common symbols, so symbols with greater alignment requirements cannot be
5071  // common.
5072  // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
5073  // alignments for common symbols via the aligncomm directive, so this
5074  // restriction only applies to MSVC environments.
5075  if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
5076  Context.getTypeAlignIfKnown(D->getType()) >
5077  Context.toBits(CharUnits::fromQuantity(32)))
5078  return true;
5079 
5080  return false;
5081 }
5082 
5083 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
5084  const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
5085  if (Linkage == GVA_Internal)
5087 
5088  if (D->hasAttr<WeakAttr>())
5089  return llvm::GlobalVariable::WeakAnyLinkage;
5090 
5091  if (const auto *FD = D->getAsFunction())
5093  return llvm::GlobalVariable::LinkOnceAnyLinkage;
5094 
5095  // We are guaranteed to have a strong definition somewhere else,
5096  // so we can use available_externally linkage.
5098  return llvm::GlobalValue::AvailableExternallyLinkage;
5099 
5100  // Note that Apple's kernel linker doesn't support symbol
5101  // coalescing, so we need to avoid linkonce and weak linkages there.
5102  // Normally, this means we just map to internal, but for explicit
5103  // instantiations we'll map to external.
5104 
5105  // In C++, the compiler has to emit a definition in every translation unit
5106  // that references the function. We should use linkonce_odr because
5107  // a) if all references in this translation unit are optimized away, we
5108  // don't need to codegen it. b) if the function persists, it needs to be
5109  // merged with other definitions. c) C++ has the ODR, so we know the
5110  // definition is dependable.
5111  if (Linkage == GVA_DiscardableODR)
5112  return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
5114 
5115  // An explicit instantiation of a template has weak linkage, since
5116  // explicit instantiations can occur in multiple translation units
5117  // and must all be equivalent. However, we are not allowed to
5118  // throw away these explicit instantiations.
5119  //
5120  // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
5121  // so say that CUDA templates are either external (for kernels) or internal.
5122  // This lets llvm perform aggressive inter-procedural optimizations. For
5123  // -fgpu-rdc case, device function calls across multiple TU's are allowed,
5124  // therefore we need to follow the normal linkage paradigm.
5125  if (Linkage == GVA_StrongODR) {
5126  if (getLangOpts().AppleKext)
5128  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
5129  !getLangOpts().GPURelocatableDeviceCode)
5130  return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
5132  return llvm::Function::WeakODRLinkage;
5133  }
5134 
5135  // C++ doesn't have tentative definitions and thus cannot have common
5136  // linkage.
5137  if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
5138  !isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D),
5139  CodeGenOpts.NoCommon))
5140  return llvm::GlobalVariable::CommonLinkage;
5141 
5142  // selectany symbols are externally visible, so use weak instead of
5143  // linkonce. MSVC optimizes away references to const selectany globals, so
5144  // all definitions should be the same and ODR linkage should be used.
5145  // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
5146  if (D->hasAttr<SelectAnyAttr>())
5147  return llvm::GlobalVariable::WeakODRLinkage;
5148 
5149  // Otherwise, we have strong external linkage.
5150  assert(Linkage == GVA_StrongExternal);
5152 }
5153 
5154 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
5155  const VarDecl *VD, bool IsConstant) {
5157  return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
5158 }
5159 
5160 /// Replace the uses of a function that was declared with a non-proto type.
5161 /// We want to silently drop extra arguments from call sites
5162 static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
5163  llvm::Function *newFn) {
5164  // Fast path.
5165  if (old->use_empty()) return;
5166 
5167  llvm::Type *newRetTy = newFn->getReturnType();
5169 
5170  for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
5171  ui != ue; ) {
5172  llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
5173  llvm::User *user = use->getUser();
5174 
5175  // Recognize and replace uses of bitcasts. Most calls to
5176  // unprototyped functions will use bitcasts.
5177  if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(user)) {
5178  if (bitcast->getOpcode() == llvm::Instruction::BitCast)
5179  replaceUsesOfNonProtoConstant(bitcast, newFn);
5180  continue;
5181  }
5182 
5183  // Recognize calls to the function.
5184  llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
5185  if (!callSite) continue;
5186  if (!callSite->isCallee(&*use))
5187  continue;
5188 
5189  // If the return types don't match exactly, then we can't
5190  // transform this call unless it's dead.
5191  if (callSite->getType() != newRetTy && !callSite->use_empty())
5192  continue;
5193 
5194  // Get the call site's attribute list.
5196  llvm::AttributeList oldAttrs = callSite->getAttributes();
5197 
5198  // If the function was passed too few arguments, don't transform.
5199  unsigned newNumArgs = newFn->arg_size();
5200  if (callSite->arg_size() < newNumArgs)
5201  continue;
5202 
5203  // If extra arguments were passed, we silently drop them.
5204  // If any of the types mismatch, we don't transform.
5205  unsigned argNo = 0;
5206  bool dontTransform = false;
5207  for (llvm::Argument &A : newFn->args()) {
5208  if (callSite->getArgOperand(argNo)->getType() != A.getType()) {
5209  dontTransform = true;
5210  break;
5211  }
5212 
5213  // Add any parameter attributes.
5214  newArgAttrs.push_back(oldAttrs.getParamAttrs(argNo));
5215  argNo++;
5216  }
5217  if (dontTransform)
5218  continue;
5219 
5220  // Okay, we can transform this. Create the new call instruction and copy
5221  // over the required information.
5222  newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
5223 
5224  // Copy over any operand bundles.
5226  callSite->getOperandBundlesAsDefs(newBundles);
5227 
5228  llvm::CallBase *newCall;
5229  if (isa<llvm::CallInst>(callSite)) {
5230  newCall =
5231  llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
5232  } else {
5233  auto *oldInvoke = cast<llvm::InvokeInst>(callSite);
5234  newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(),
5235  oldInvoke->getUnwindDest(), newArgs,
5236  newBundles, "", callSite);
5237  }
5238  newArgs.clear(); // for the next iteration
5239 
5240  if (!newCall->getType()->isVoidTy())
5241  newCall->takeName(callSite);
5242  newCall->setAttributes(
5243  llvm::AttributeList::get(newFn->getContext(), oldAttrs.getFnAttrs(),
5244  oldAttrs.getRetAttrs(), newArgAttrs));
5245  newCall->setCallingConv(callSite->getCallingConv());
5246 
5247  // Finally, remove the old call, replacing any uses with the new one.
5248  if (!callSite->use_empty())
5249  callSite->replaceAllUsesWith(newCall);
5250 
5251  // Copy debug location attached to CI.
5252  if (callSite->getDebugLoc())
5253  newCall->setDebugLoc(callSite->getDebugLoc());
5254 
5255  callSite->eraseFromParent();
5256  }
5257 }
5258 
5259 /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
5260 /// implement a function with no prototype, e.g. "int foo() {}". If there are
5261 /// existing call uses of the old function in the module, this adjusts them to
5262 /// call the new function directly.
5263 ///
5264 /// This is not just a cleanup: the always_inline pass requires direct calls to
5265 /// functions to be able to inline them. If there is a bitcast in the way, it
5266 /// won't inline them. Instcombine normally deletes these calls, but it isn't
5267 /// run at -O0.
5268 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
5269  llvm::Function *NewFn) {
5270  // If we're redefining a global as a function, don't transform it.
5271  if (!isa<llvm::Function>(Old)) return;
5272 
5273  replaceUsesOfNonProtoConstant(Old, NewFn);
5274 }
5275 
5277  auto DK = VD->isThisDeclarationADefinition();
5278  if (DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>())
5279  return;
5280 
5282  // If we have a definition, this might be a deferred decl. If the
5283  // instantiation is explicit, make sure we emit it at the end.
5285  GetAddrOfGlobalVar(VD);
5286 
5287  EmitTopLevelDecl(VD);
5288 }
5289 
5290 void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
5291  llvm::GlobalValue *GV) {
5292  const auto *D = cast<FunctionDecl>(GD.getDecl());
5293 
5294  // Compute the function info and LLVM type.
5296  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
5297 
5298  // Get or create the prototype for the function.
5299  if (!GV || (GV->getValueType() != Ty))
5300  GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
5301  /*DontDefer=*/true,
5302  ForDefinition));
5303 
5304  // Already emitted.
5305  if (!GV->isDeclaration())
5306  return;
5307 
5308  // We need to set linkage and visibility on the function before
5309  // generating code for it because various parts of IR generation
5310  // want to propagate this information down (e.g. to local static
5311  // declarations).
5312  auto *Fn = cast<llvm::Function>(GV);
5313  setFunctionLinkage(GD, Fn);
5314 
5315  // FIXME: this is redundant with part of setFunctionDefinitionAttributes
5316  setGVProperties(Fn, GD);
5317 
5319 
5320  maybeSetTrivialComdat(*D, *Fn);
5321 
5322  // Set CodeGen attributes that represent floating point environment.
5324 
5325  CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
5326 
5327  setNonAliasAttributes(GD, Fn);
5329 
5330  if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
5331  AddGlobalCtor(Fn, CA->getPriority());
5332  if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
5333  AddGlobalDtor(Fn, DA->getPriority(), true);
5334  if (D->hasAttr<AnnotateAttr>())
5335  AddGlobalAnnotations(D, Fn);
5336 }
5337 
5338 void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
5339  const auto *D = cast<ValueDecl>(GD.getDecl());
5340  const AliasAttr *AA = D->getAttr<AliasAttr>();
5341  assert(AA && "Not an alias?");
5342 
5343  StringRef MangledName = getMangledName(GD);
5344 
5345  if (AA->getAliasee() == MangledName) {
5346  Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
5347  return;
5348  }
5349 
5350  // If there is a definition in the module, then it wins over the alias.
5351  // This is dubious, but allow it to be safe. Just ignore the alias.
5352  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
5353  if (Entry && !Entry->isDeclaration())
5354  return;
5355 
5356  Aliases.push_back(GD);
5357 
5358  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
5359 
5360  // Create a reference to the named value. This ensures that it is emitted
5361  // if a deferred decl.
5362  llvm::Constant *Aliasee;
5363  llvm::GlobalValue::LinkageTypes LT;
5364  if (isa<llvm::FunctionType>(DeclTy)) {
5365  Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
5366  /*ForVTable=*/false);
5367  LT = getFunctionLinkage(GD);
5368  } else {
5369  Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
5370  /*D=*/nullptr);
5371  if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
5373  else
5374  LT = getFunctionLinkage(GD);
5375  }
5376 
5377  // Create the new alias itself, but don't set a name yet.
5378  unsigned AS = Aliasee->getType()->getPointerAddressSpace();
5379  auto *GA =
5380  llvm::GlobalAlias::create(DeclTy, AS, LT, "", Aliasee, &getModule());
5381 
5382  if (Entry) {
5383  if (GA->getAliasee() == Entry) {
5384  Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
5385  return;
5386  }
5387 
5388  assert(Entry->isDeclaration());
5389 
5390  // If there is a declaration in the module, then we had an extern followed
5391  // by the alias, as in:
5392  // extern int test6();
5393  // ...
5394  // int test6() __attribute__((alias("test7")));
5395  //
5396  // Remove it and replace uses of it with the alias.
5397  GA->takeName(Entry);
5398 
5399  Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
5400  Entry->getType()));
5401  Entry->eraseFromParent();
5402  } else {
5403  GA->setName(MangledName);
5404  }
5405 
5406  // Set attributes which are particular to an alias; this is a
5407  // specialization of the attributes which may be set on a global
5408  // variable/function.
5409  if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
5410  D->isWeakImported()) {
5411  GA->setLinkage(llvm::Function::WeakAnyLinkage);
5412  }
5413 
5414  if (const auto *VD = dyn_cast<VarDecl>(D))
5415  if (VD->getTLSKind())
5416  setTLSMode(GA, *VD);
5417 
5418  SetCommonAttributes(GD, GA);
5419 
5420  // Emit global alias debug information.
5421  if (isa<VarDecl>(D))
5422  if (CGDebugInfo *DI = getModuleDebugInfo())
5423  DI->EmitGlobalAlias(cast<llvm::GlobalValue>(GA->getAliasee()->stripPointerCasts()), GD);
5424 }
5425 
5426 void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
5427  const auto *D = cast<ValueDecl>(GD.getDecl());
5428  const IFuncAttr *IFA = D->getAttr<IFuncAttr>();
5429  assert(IFA && "Not an ifunc?");
5430 
5431  StringRef MangledName = getMangledName(GD);
5432 
5433  if (IFA->getResolver() == MangledName) {
5434  Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
5435  return;
5436  }
5437 
5438  // Report an error if some definition overrides ifunc.
5439  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
5440  if (Entry && !Entry->isDeclaration()) {
5441  GlobalDecl OtherGD;
5442  if (lookupRepresentativeDecl(MangledName, OtherGD) &&
5443  DiagnosedConflictingDefinitions.insert(GD).second) {
5444  Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name)
5445  << MangledName;
5446  Diags.Report(OtherGD.getDecl()->getLocation(),
5447  diag::note_previous_definition);
5448  }
5449  return;
5450  }
5451 
5452  Aliases.push_back(GD);
5453 
5454  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
5455  llvm::Type *ResolverTy = llvm::GlobalIFunc::getResolverFunctionType(DeclTy);
5456  llvm::Constant *Resolver =
5457  GetOrCreateLLVMFunction(IFA->getResolver(), ResolverTy, {},
5458  /*ForVTable=*/false);
5459  llvm::GlobalIFunc *GIF =
5461  "", Resolver, &getModule());
5462  if (Entry) {
5463  if (GIF->getResolver() == Entry) {
5464  Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
5465  return;
5466  }
5467  assert(Entry->isDeclaration());
5468 
5469  // If there is a declaration in the module, then we had an extern followed
5470  // by the ifunc, as in:
5471  // extern int test();
5472  // ...
5473  // int test() __attribute__((ifunc("resolver")));
5474  //
5475  // Remove it and replace uses of it with the ifunc.
5476  GIF->takeName(Entry);
5477 
5478  Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GIF,
5479  Entry->getType()));
5480  Entry->eraseFromParent();
5481  } else
5482  GIF->setName(MangledName);
5483 
5484  SetCommonAttributes(GD, GIF);
5485 }
5486 
5487 llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
5488  ArrayRef<llvm::Type*> Tys) {
5489  return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
5490  Tys);
5491 }
5492 
5493 static llvm::StringMapEntry<llvm::GlobalVariable *> &
5494 GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
5495  const StringLiteral *Literal, bool TargetIsLSB,
5496  bool &IsUTF16, unsigned &StringLength) {
5497  StringRef String = Literal->getString();
5498  unsigned NumBytes = String.size();
5499 
5500  // Check for simple case.
5501  if (!Literal->containsNonAsciiOrNull()) {
5502  StringLength = NumBytes;
5503  return *Map.insert(std::make_pair(String, nullptr)).first;
5504  }
5505 
5506  // Otherwise, convert the UTF8 literals into a string of shorts.
5507  IsUTF16 = true;
5508 
5509  SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
5510  const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
5511  llvm::UTF16 *ToPtr = &ToBuf[0];
5512 
5513  (void)llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
5514  ToPtr + NumBytes, llvm::strictConversion);
5515 
5516  // ConvertUTF8toUTF16 returns the length in ToPtr.
5517  StringLength = ToPtr - &ToBuf[0];
5518 
5519  // Add an explicit null.
5520  *ToPtr = 0;
5521  return *Map.insert(std::make_pair(
5522  StringRef(reinterpret_cast<const char *>(ToBuf.data()),
5523  (StringLength + 1) * 2),
5524  nullptr)).first;
5525 }
5526 
5529  unsigned StringLength = 0;
5530  bool isUTF16 = false;
5531  llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
5532  GetConstantCFStringEntry(CFConstantStringMap, Literal,
5533  getDataLayout().isLittleEndian(), isUTF16,
5534  StringLength);
5535 
5536  if (auto *C = Entry.second)
5537  return ConstantAddress(
5538  C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
5539 
5540  llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
5541  llvm::Constant *Zeros[] = { Zero, Zero };
5542 
5543  const ASTContext &Context = getContext();
5544  const llvm::Triple &Triple = getTriple();
5545 
5546  const auto CFRuntime = getLangOpts().CFRuntime;
5547  const bool IsSwiftABI =
5548  static_cast<unsigned>(CFRuntime) >=
5549  static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
5550  const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
5551 
5552  // If we don't already have it, get __CFConstantStringClassReference.
5553  if (!CFConstantStringClassRef) {
5554  const char *CFConstantStringClassName = "__CFConstantStringClassReference";
5555  llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
5556  Ty = llvm::ArrayType::get(Ty, 0);
5557 
5558  switch (CFRuntime) {
5559  default: break;
5560  case LangOptions::CoreFoundationABI::Swift: [[fallthrough]];
5562  CFConstantStringClassName =
5563  Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
5564  : "$s10Foundation19_NSCFConstantStringCN";
5565  Ty = IntPtrTy;
5566  break;
5568  CFConstantStringClassName =
5569  Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
5570  : "$S10Foundation19_NSCFConstantStringCN";
5571  Ty = IntPtrTy;
5572  break;
5574  CFConstantStringClassName =
5575  Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
5576  : "__T010Foundation19_NSCFConstantStringCN";
5577  Ty = IntPtrTy;
5578  break;
5579  }
5580 
5581  llvm::Constant *C = CreateRuntimeVariable(Ty, CFConstantStringClassName);
5582 
5583  if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
5584  llvm::GlobalValue *GV = nullptr;
5585 
5586  if ((GV = dyn_cast<llvm::GlobalValue>(C))) {
5587  IdentifierInfo &II = Context.Idents.get(GV->getName());
5588  TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
5590 
5591  const VarDecl *VD = nullptr;
5592  for (const auto *Result : DC->lookup(&II))
5593  if ((VD = dyn_cast<VarDecl>(Result)))
5594  break;
5595 
5596  if (Triple.isOSBinFormatELF()) {
5597  if (!VD)
5598  GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
5599  } else {
5600  GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
5601  if (!VD || !VD->