clang  14.0.0git
CodeGenModule.cpp
Go to the documentation of this file.
1 //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-module state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenModule.h"
14 #include "CGBlocks.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGCall.h"
18 #include "CGDebugInfo.h"
19 #include "CGObjCRuntime.h"
20 #include "CGOpenCLRuntime.h"
21 #include "CGOpenMPRuntime.h"
22 #include "CGOpenMPRuntimeGPU.h"
23 #include "CodeGenFunction.h"
24 #include "CodeGenPGO.h"
25 #include "ConstantEmitter.h"
26 #include "CoverageMappingGen.h"
27 #include "TargetInfo.h"
28 #include "clang/AST/ASTContext.h"
29 #include "clang/AST/CharUnits.h"
30 #include "clang/AST/DeclCXX.h"
31 #include "clang/AST/DeclObjC.h"
32 #include "clang/AST/DeclTemplate.h"
33 #include "clang/AST/Mangle.h"
34 #include "clang/AST/RecordLayout.h"
36 #include "clang/AST/StmtVisitor.h"
37 #include "clang/Basic/Builtins.h"
38 #include "clang/Basic/CharInfo.h"
40 #include "clang/Basic/Diagnostic.h"
42 #include "clang/Basic/Module.h"
44 #include "clang/Basic/TargetInfo.h"
45 #include "clang/Basic/Version.h"
48 #include "llvm/ADT/StringSwitch.h"
49 #include "llvm/ADT/Triple.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
52 #include "llvm/IR/CallingConv.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/ProfileSummary.h"
58 #include "llvm/ProfileData/InstrProfReader.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/ConvertUTF.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/MD5.h"
64 #include "llvm/Support/TimeProfiler.h"
65 #include "llvm/Support/X86TargetParser.h"
66 
67 using namespace clang;
68 using namespace CodeGen;
69 
70 static llvm::cl::opt<bool> LimitedCoverage(
71  "limited-coverage-experimental", llvm::cl::ZeroOrMore, llvm::cl::Hidden,
72  llvm::cl::desc("Emit limited coverage mapping information (experimental)"),
73  llvm::cl::init(false));
74 
75 static const char AnnotationSection[] = "llvm.metadata";
76 
78  switch (CGM.getContext().getCXXABIKind()) {
79  case TargetCXXABI::AppleARM64:
80  case TargetCXXABI::Fuchsia:
81  case TargetCXXABI::GenericAArch64:
82  case TargetCXXABI::GenericARM:
83  case TargetCXXABI::iOS:
84  case TargetCXXABI::WatchOS:
85  case TargetCXXABI::GenericMIPS:
86  case TargetCXXABI::GenericItanium:
87  case TargetCXXABI::WebAssembly:
88  case TargetCXXABI::XL:
89  return CreateItaniumCXXABI(CGM);
90  case TargetCXXABI::Microsoft:
91  return CreateMicrosoftCXXABI(CGM);
92  }
93 
94  llvm_unreachable("invalid C++ ABI kind");
95 }
96 
97 CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
98  const PreprocessorOptions &PPO,
99  const CodeGenOptions &CGO, llvm::Module &M,
100  DiagnosticsEngine &diags,
101  CoverageSourceInfo *CoverageInfo)
102  : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
103  PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
104  Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
105  VMContext(M.getContext()), Types(*this), VTables(*this),
106  SanitizerMD(new SanitizerMetadata(*this)) {
107 
108  // Initialize the type cache.
109  llvm::LLVMContext &LLVMContext = M.getContext();
110  VoidTy = llvm::Type::getVoidTy(LLVMContext);
111  Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
112  Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
113  Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
114  Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
115  HalfTy = llvm::Type::getHalfTy(LLVMContext);
116  BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
117  FloatTy = llvm::Type::getFloatTy(LLVMContext);
118  DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
119  PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
121  C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
123  C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
125  C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
126  CharTy =
127  llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getCharWidth());
128  IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
129  IntPtrTy = llvm::IntegerType::get(LLVMContext,
130  C.getTargetInfo().getMaxPointerWidth());
131  Int8PtrTy = Int8Ty->getPointerTo(0);
132  Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
133  const llvm::DataLayout &DL = M.getDataLayout();
134  AllocaInt8PtrTy = Int8Ty->getPointerTo(DL.getAllocaAddrSpace());
135  GlobalsInt8PtrTy = Int8Ty->getPointerTo(DL.getDefaultGlobalsAddressSpace());
137 
139 
140  if (LangOpts.ObjC)
141  createObjCRuntime();
142  if (LangOpts.OpenCL)
143  createOpenCLRuntime();
144  if (LangOpts.OpenMP)
145  createOpenMPRuntime();
146  if (LangOpts.CUDA)
147  createCUDARuntime();
148 
149  // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
150  if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
151  (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
152  TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
153  getCXXABI().getMangleContext()));
154 
155  // If debug info or coverage generation is enabled, create the CGDebugInfo
156  // object.
157  if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
158  CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
159  DebugInfo.reset(new CGDebugInfo(*this));
160 
161  Block.GlobalUniqueCount = 0;
162 
163  if (C.getLangOpts().ObjC)
164  ObjCData.reset(new ObjCEntrypoints());
165 
166  if (CodeGenOpts.hasProfileClangUse()) {
167  auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
168  CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
169  if (auto E = ReaderOrErr.takeError()) {
170  unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
171  "Could not read profile %0: %1");
172  llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
173  getDiags().Report(DiagID) << CodeGenOpts.ProfileInstrumentUsePath
174  << EI.message();
175  });
176  } else
177  PGOReader = std::move(ReaderOrErr.get());
178  }
179 
180  // If coverage mapping generation is enabled, create the
181  // CoverageMappingModuleGen object.
182  if (CodeGenOpts.CoverageMapping)
183  CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
184 
185  // Generate the module name hash here if needed.
186  if (CodeGenOpts.UniqueInternalLinkageNames &&
187  !getModule().getSourceFileName().empty()) {
188  std::string Path = getModule().getSourceFileName();
189  // Check if a path substitution is needed from the MacroPrefixMap.
190  for (const auto &Entry : LangOpts.MacroPrefixMap)
191  if (Path.rfind(Entry.first, 0) != std::string::npos) {
192  Path = Entry.second + Path.substr(Entry.first.size());
193  break;
194  }
195  llvm::MD5 Md5;
196  Md5.update(Path);
197  llvm::MD5::MD5Result R;
198  Md5.final(R);
199  SmallString<32> Str;
200  llvm::MD5::stringifyResult(R, Str);
201  // Convert MD5hash to Decimal. Demangler suffixes can either contain
202  // numbers or characters but not both.
203  llvm::APInt IntHash(128, Str.str(), 16);
204  // Prepend "__uniq" before the hash for tools like profilers to understand
205  // that this symbol is of internal linkage type. The "__uniq" is the
206  // pre-determined prefix that is used to tell tools that this symbol was
207  // created with -funique-internal-linakge-symbols and the tools can strip or
208  // keep the prefix as needed.
209  ModuleNameHash = (Twine(".__uniq.") +
210  Twine(toString(IntHash, /* Radix = */ 10, /* Signed = */false))).str();
211  }
212 }
213 
215 
216 void CodeGenModule::createObjCRuntime() {
217  // This is just isGNUFamily(), but we want to force implementors of
218  // new ABIs to decide how best to do this.
219  switch (LangOpts.ObjCRuntime.getKind()) {
221  case ObjCRuntime::GCC:
222  case ObjCRuntime::ObjFW:
223  ObjCRuntime.reset(CreateGNUObjCRuntime(*this));
224  return;
225 
227  case ObjCRuntime::MacOSX:
228  case ObjCRuntime::iOS:
230  ObjCRuntime.reset(CreateMacObjCRuntime(*this));
231  return;
232  }
233  llvm_unreachable("bad runtime kind");
234 }
235 
236 void CodeGenModule::createOpenCLRuntime() {
237  OpenCLRuntime.reset(new CGOpenCLRuntime(*this));
238 }
239 
240 void CodeGenModule::createOpenMPRuntime() {
241  // Select a specialized code generation class based on the target, if any.
242  // If it does not exist use the default implementation.
243  switch (getTriple().getArch()) {
244  case llvm::Triple::nvptx:
245  case llvm::Triple::nvptx64:
246  case llvm::Triple::amdgcn:
247  assert(getLangOpts().OpenMPIsDevice &&
248  "OpenMP AMDGPU/NVPTX is only prepared to deal with device code.");
249  OpenMPRuntime.reset(new CGOpenMPRuntimeGPU(*this));
250  break;
251  default:
252  if (LangOpts.OpenMPSimd)
253  OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
254  else
255  OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
256  break;
257  }
258 }
259 
260 void CodeGenModule::createCUDARuntime() {
261  CUDARuntime.reset(CreateNVCUDARuntime(*this));
262 }
263 
264 void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
265  Replacements[Name] = C;
266 }
267 
268 void CodeGenModule::applyReplacements() {
269  for (auto &I : Replacements) {
270  StringRef MangledName = I.first();
271  llvm::Constant *Replacement = I.second;
272  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
273  if (!Entry)
274  continue;
275  auto *OldF = cast<llvm::Function>(Entry);
276  auto *NewF = dyn_cast<llvm::Function>(Replacement);
277  if (!NewF) {
278  if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
279  NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
280  } else {
281  auto *CE = cast<llvm::ConstantExpr>(Replacement);
282  assert(CE->getOpcode() == llvm::Instruction::BitCast ||
283  CE->getOpcode() == llvm::Instruction::GetElementPtr);
284  NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
285  }
286  }
287 
288  // Replace old with new, but keep the old order.
289  OldF->replaceAllUsesWith(Replacement);
290  if (NewF) {
291  NewF->removeFromParent();
292  OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
293  NewF);
294  }
295  OldF->eraseFromParent();
296  }
297 }
298 
299 void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
300  GlobalValReplacements.push_back(std::make_pair(GV, C));
301 }
302 
303 void CodeGenModule::applyGlobalValReplacements() {
304  for (auto &I : GlobalValReplacements) {
305  llvm::GlobalValue *GV = I.first;
306  llvm::Constant *C = I.second;
307 
308  GV->replaceAllUsesWith(C);
309  GV->eraseFromParent();
310  }
311 }
312 
313 // This is only used in aliases that we created and we know they have a
314 // linear structure.
315 static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) {
316  const llvm::Constant *C;
317  if (auto *GA = dyn_cast<llvm::GlobalAlias>(GV))
318  C = GA->getAliasee();
319  else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(GV))
320  C = GI->getResolver();
321  else
322  return GV;
323 
324  const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(C->stripPointerCasts());
325  if (!AliaseeGV)
326  return nullptr;
327 
328  const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject();
329  if (FinalGV == GV)
330  return nullptr;
331 
332  return FinalGV;
333 }
334 
336  SourceLocation Location, bool IsIFunc,
337  const llvm::GlobalValue *Alias,
338  const llvm::GlobalValue *&GV) {
339  GV = getAliasedGlobal(Alias);
340  if (!GV) {
341  Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
342  return false;
343  }
344 
345  if (GV->isDeclaration()) {
346  Diags.Report(Location, diag::err_alias_to_undefined) << IsIFunc << IsIFunc;
347  return false;
348  }
349 
350  if (IsIFunc) {
351  // Check resolver function type.
352  const auto *F = dyn_cast<llvm::Function>(GV);
353  if (!F) {
354  Diags.Report(Location, diag::err_alias_to_undefined)
355  << IsIFunc << IsIFunc;
356  return false;
357  }
358 
359  llvm::FunctionType *FTy = F->getFunctionType();
360  if (!FTy->getReturnType()->isPointerTy()) {
361  Diags.Report(Location, diag::err_ifunc_resolver_return);
362  return false;
363  }
364  }
365 
366  return true;
367 }
368 
369 void CodeGenModule::checkAliases() {
370  // Check if the constructed aliases are well formed. It is really unfortunate
371  // that we have to do this in CodeGen, but we only construct mangled names
372  // and aliases during codegen.
373  bool Error = false;
374  DiagnosticsEngine &Diags = getDiags();
375  for (const GlobalDecl &GD : Aliases) {
376  const auto *D = cast<ValueDecl>(GD.getDecl());
377  SourceLocation Location;
378  bool IsIFunc = D->hasAttr<IFuncAttr>();
379  if (const Attr *A = D->getDefiningAttr())
380  Location = A->getLocation();
381  else
382  llvm_unreachable("Not an alias or ifunc?");
383 
384  StringRef MangledName = getMangledName(GD);
385  llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
386  const llvm::GlobalValue *GV = nullptr;
387  if (!checkAliasedGlobal(Diags, Location, IsIFunc, Alias, GV)) {
388  Error = true;
389  continue;
390  }
391 
392  llvm::Constant *Aliasee =
393  IsIFunc ? cast<llvm::GlobalIFunc>(Alias)->getResolver()
394  : cast<llvm::GlobalAlias>(Alias)->getAliasee();
395 
396  llvm::GlobalValue *AliaseeGV;
397  if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
398  AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
399  else
400  AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
401 
402  if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
403  StringRef AliasSection = SA->getName();
404  if (AliasSection != AliaseeGV->getSection())
405  Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
406  << AliasSection << IsIFunc << IsIFunc;
407  }
408 
409  // We have to handle alias to weak aliases in here. LLVM itself disallows
410  // this since the object semantics would not match the IL one. For
411  // compatibility with gcc we implement it by just pointing the alias
412  // to its aliasee's aliasee. We also warn, since the user is probably
413  // expecting the link to be weak.
414  if (auto *GA = dyn_cast<llvm::GlobalAlias>(AliaseeGV)) {
415  if (GA->isInterposable()) {
416  Diags.Report(Location, diag::warn_alias_to_weak_alias)
417  << GV->getName() << GA->getName() << IsIFunc;
418  Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
419  GA->getAliasee(), Alias->getType());
420 
421  if (IsIFunc)
422  cast<llvm::GlobalIFunc>(Alias)->setResolver(Aliasee);
423  else
424  cast<llvm::GlobalAlias>(Alias)->setAliasee(Aliasee);
425  }
426  }
427  }
428  if (!Error)
429  return;
430 
431  for (const GlobalDecl &GD : Aliases) {
432  StringRef MangledName = getMangledName(GD);
433  llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
434  Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
435  Alias->eraseFromParent();
436  }
437 }
438 
440  DeferredDeclsToEmit.clear();
441  if (OpenMPRuntime)
442  OpenMPRuntime->clear();
443 }
444 
446  StringRef MainFile) {
447  if (!hasDiagnostics())
448  return;
449  if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
450  if (MainFile.empty())
451  MainFile = "<stdin>";
452  Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
453  } else {
454  if (Mismatched > 0)
455  Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched;
456 
457  if (Missing > 0)
458  Diags.Report(diag::warn_profile_data_missing) << Visited << Missing;
459  }
460 }
461 
463  llvm::Module &M) {
464  if (!LO.VisibilityFromDLLStorageClass)
465  return;
466 
467  llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
468  CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
469  llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
470  CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
471  llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
472  CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
473  llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
475  LO.getExternDeclNoDLLStorageClassVisibility());
476 
477  for (llvm::GlobalValue &GV : M.global_values()) {
478  if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
479  continue;
480 
481  // Reset DSO locality before setting the visibility. This removes
482  // any effects that visibility options and annotations may have
483  // had on the DSO locality. Setting the visibility will implicitly set
484  // appropriate globals to DSO Local; however, this will be pessimistic
485  // w.r.t. to the normal compiler IRGen.
486  GV.setDSOLocal(false);
487 
488  if (GV.isDeclarationForLinker()) {
489  GV.setVisibility(GV.getDLLStorageClass() ==
490  llvm::GlobalValue::DLLImportStorageClass
491  ? ExternDeclDLLImportVisibility
492  : ExternDeclNoDLLStorageClassVisibility);
493  } else {
494  GV.setVisibility(GV.getDLLStorageClass() ==
495  llvm::GlobalValue::DLLExportStorageClass
496  ? DLLExportVisibility
497  : NoDLLStorageClassVisibility);
498  }
499 
500  GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
501  }
502 }
503 
505  EmitDeferred();
506  EmitVTablesOpportunistically();
507  applyGlobalValReplacements();
508  applyReplacements();
509  checkAliases();
510  emitMultiVersionFunctions();
511  EmitCXXGlobalInitFunc();
512  EmitCXXGlobalCleanUpFunc();
513  registerGlobalDtorsWithAtExit();
514  EmitCXXThreadLocalInitFunc();
515  if (ObjCRuntime)
516  if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
517  AddGlobalCtor(ObjCInitFunction);
518  if (Context.getLangOpts().CUDA && CUDARuntime) {
519  if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
520  AddGlobalCtor(CudaCtorFunction);
521  }
522  if (OpenMPRuntime) {
523  if (llvm::Function *OpenMPRequiresDirectiveRegFun =
524  OpenMPRuntime->emitRequiresDirectiveRegFun()) {
525  AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
526  }
527  OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
528  OpenMPRuntime->clear();
529  }
530  if (PGOReader) {
531  getModule().setProfileSummary(
532  PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
533  llvm::ProfileSummary::PSK_Instr);
534  if (PGOStats.hasDiagnostics())
535  PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
536  }
537  EmitCtorList(GlobalCtors, "llvm.global_ctors");
538  EmitCtorList(GlobalDtors, "llvm.global_dtors");
540  EmitStaticExternCAliases();
543  if (CoverageMapping)
544  CoverageMapping->emit();
545  if (CodeGenOpts.SanitizeCfiCrossDso) {
548  }
549  emitAtAvailableLinkGuard();
550  if (Context.getTargetInfo().getTriple().isWasm() &&
551  !Context.getTargetInfo().getTriple().isOSEmscripten()) {
553  }
554 
555  // Emit reference of __amdgpu_device_library_preserve_asan_functions to
556  // preserve ASAN functions in bitcode libraries.
557  if (LangOpts.Sanitize.has(SanitizerKind::Address) && getTriple().isAMDGPU()) {
558  auto *FT = llvm::FunctionType::get(VoidTy, {});
559  auto *F = llvm::Function::Create(
561  "__amdgpu_device_library_preserve_asan_functions", &getModule());
562  auto *Var = new llvm::GlobalVariable(
563  getModule(), FT->getPointerTo(),
564  /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, F,
565  "__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
566  llvm::GlobalVariable::NotThreadLocal);
568  getModule().addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
569  }
570 
571  emitLLVMUsed();
572  if (SanStats)
573  SanStats->finish();
574 
575  if (CodeGenOpts.Autolink &&
576  (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
577  EmitModuleLinkOptions();
578  }
579 
580  // On ELF we pass the dependent library specifiers directly to the linker
581  // without manipulating them. This is in contrast to other platforms where
582  // they are mapped to a specific linker option by the compiler. This
583  // difference is a result of the greater variety of ELF linkers and the fact
584  // that ELF linkers tend to handle libraries in a more complicated fashion
585  // than on other platforms. This forces us to defer handling the dependent
586  // libs to the linker.
587  //
588  // CUDA/HIP device and host libraries are different. Currently there is no
589  // way to differentiate dependent libraries for host or device. Existing
590  // usage of #pragma comment(lib, *) is intended for host libraries on
591  // Windows. Therefore emit llvm.dependent-libraries only for host.
592  if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
593  auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
594  for (auto *MD : ELFDependentLibraries)
595  NMD->addOperand(MD);
596  }
597 
598  // Record mregparm value now so it is visible through rest of codegen.
599  if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
600  getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
601  CodeGenOpts.NumRegisterParameters);
602 
603  if (CodeGenOpts.DwarfVersion) {
604  getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
605  CodeGenOpts.DwarfVersion);
606  }
607 
608  if (CodeGenOpts.Dwarf64)
609  getModule().addModuleFlag(llvm::Module::Max, "DWARF64", 1);
610 
611  if (Context.getLangOpts().SemanticInterposition)
612  // Require various optimization to respect semantic interposition.
613  getModule().setSemanticInterposition(true);
614 
615  if (CodeGenOpts.EmitCodeView) {
616  // Indicate that we want CodeView in the metadata.
617  getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
618  }
619  if (CodeGenOpts.CodeViewGHash) {
620  getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
621  }
622  if (CodeGenOpts.ControlFlowGuard) {
623  // Function ID tables and checks for Control Flow Guard (cfguard=2).
624  getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 2);
625  } else if (CodeGenOpts.ControlFlowGuardNoChecks) {
626  // Function ID tables for Control Flow Guard (cfguard=1).
627  getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
628  }
629  if (CodeGenOpts.EHContGuard) {
630  // Function ID tables for EH Continuation Guard.
631  getModule().addModuleFlag(llvm::Module::Warning, "ehcontguard", 1);
632  }
633  if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
634  // We don't support LTO with 2 with different StrictVTablePointers
635  // FIXME: we could support it by stripping all the information introduced
636  // by StrictVTablePointers.
637 
638  getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
639 
640  llvm::Metadata *Ops[2] = {
641  llvm::MDString::get(VMContext, "StrictVTablePointers"),
642  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
643  llvm::Type::getInt32Ty(VMContext), 1))};
644 
645  getModule().addModuleFlag(llvm::Module::Require,
646  "StrictVTablePointersRequirement",
647  llvm::MDNode::get(VMContext, Ops));
648  }
649  if (getModuleDebugInfo())
650  // We support a single version in the linked module. The LLVM
651  // parser will drop debug info with a different version number
652  // (and warn about it, too).
653  getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
654  llvm::DEBUG_METADATA_VERSION);
655 
656  // We need to record the widths of enums and wchar_t, so that we can generate
657  // the correct build attributes in the ARM backend. wchar_size is also used by
658  // TargetLibraryInfo.
659  uint64_t WCharWidth =
660  Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
661  getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
662 
663  llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
664  if ( Arch == llvm::Triple::arm
665  || Arch == llvm::Triple::armeb
666  || Arch == llvm::Triple::thumb
667  || Arch == llvm::Triple::thumbeb) {
668  // The minimum width of an enum in bytes
669  uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
670  getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
671  }
672 
673  if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
674  StringRef ABIStr = Target.getABI();
675  llvm::LLVMContext &Ctx = TheModule.getContext();
676  getModule().addModuleFlag(llvm::Module::Error, "target-abi",
677  llvm::MDString::get(Ctx, ABIStr));
678  }
679 
680  if (CodeGenOpts.SanitizeCfiCrossDso) {
681  // Indicate that we want cross-DSO control flow integrity checks.
682  getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
683  }
684 
685  if (CodeGenOpts.WholeProgramVTables) {
686  // Indicate whether VFE was enabled for this module, so that the
687  // vcall_visibility metadata added under whole program vtables is handled
688  // appropriately in the optimizer.
689  getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
690  CodeGenOpts.VirtualFunctionElimination);
691  }
692 
693  if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
694  getModule().addModuleFlag(llvm::Module::Override,
695  "CFI Canonical Jump Tables",
696  CodeGenOpts.SanitizeCfiCanonicalJumpTables);
697  }
698 
699  if (CodeGenOpts.CFProtectionReturn &&
701  // Indicate that we want to instrument return control flow protection.
702  getModule().addModuleFlag(llvm::Module::Override, "cf-protection-return",
703  1);
704  }
705 
706  if (CodeGenOpts.CFProtectionBranch &&
708  // Indicate that we want to instrument branch control flow protection.
709  getModule().addModuleFlag(llvm::Module::Override, "cf-protection-branch",
710  1);
711  }
712 
713  // Add module metadata for return address signing (ignoring
714  // non-leaf/all) and stack tagging. These are actually turned on by function
715  // attributes, but we use module metadata to emit build attributes. This is
716  // needed for LTO, where the function attributes are inside bitcode
717  // serialised into a global variable by the time build attributes are
718  // emitted, so we can't access them.
719  if (Context.getTargetInfo().hasFeature("ptrauth") &&
720  LangOpts.getSignReturnAddressScope() !=
722  getModule().addModuleFlag(llvm::Module::Override,
723  "sign-return-address-buildattr", 1);
724  if (LangOpts.Sanitize.has(SanitizerKind::MemTag))
725  getModule().addModuleFlag(llvm::Module::Override,
726  "tag-stack-memory-buildattr", 1);
727 
728  if (Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
729  Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
730  Arch == llvm::Triple::aarch64_be) {
731  getModule().addModuleFlag(llvm::Module::Error, "branch-target-enforcement",
732  LangOpts.BranchTargetEnforcement);
733 
734  getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
735  LangOpts.hasSignReturnAddress());
736 
737  getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
738  LangOpts.isSignReturnAddressScopeAll());
739 
740  if (Arch != llvm::Triple::thumb && Arch != llvm::Triple::thumbeb) {
741  getModule().addModuleFlag(llvm::Module::Error,
742  "sign-return-address-with-bkey",
743  !LangOpts.isSignReturnAddressWithAKey());
744  }
745  }
746 
747  if (!CodeGenOpts.MemoryProfileOutput.empty()) {
748  llvm::LLVMContext &Ctx = TheModule.getContext();
749  getModule().addModuleFlag(
750  llvm::Module::Error, "MemProfProfileFilename",
751  llvm::MDString::get(Ctx, CodeGenOpts.MemoryProfileOutput));
752  }
753 
754  if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
755  // Indicate whether __nvvm_reflect should be configured to flush denormal
756  // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
757  // property.)
758  getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
759  CodeGenOpts.FP32DenormalMode.Output !=
760  llvm::DenormalMode::IEEE);
761  }
762 
763  if (LangOpts.EHAsynch)
764  getModule().addModuleFlag(llvm::Module::Warning, "eh-asynch", 1);
765 
766  // Indicate whether this Module was compiled with -fopenmp
767  if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
768  getModule().addModuleFlag(llvm::Module::Max, "openmp", LangOpts.OpenMP);
769  if (getLangOpts().OpenMPIsDevice)
770  getModule().addModuleFlag(llvm::Module::Max, "openmp-device",
771  LangOpts.OpenMP);
772 
773  // Emit OpenCL specific module metadata: OpenCL/SPIR version.
774  if (LangOpts.OpenCL) {
775  EmitOpenCLMetadata();
776  // Emit SPIR version.
777  if (getTriple().isSPIR()) {
778  // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
779  // opencl.spir.version named metadata.
780  // C++ for OpenCL has a distinct mapping for version compatibility with
781  // OpenCL.
782  auto Version = LangOpts.getOpenCLCompatibleVersion();
783  llvm::Metadata *SPIRVerElts[] = {
784  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
785  Int32Ty, Version / 100)),
786  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
787  Int32Ty, (Version / 100 > 1) ? 0 : 2))};
788  llvm::NamedMDNode *SPIRVerMD =
789  TheModule.getOrInsertNamedMetadata("opencl.spir.version");
790  llvm::LLVMContext &Ctx = TheModule.getContext();
791  SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
792  }
793  }
794 
795  if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
796  assert(PLevel < 3 && "Invalid PIC Level");
797  getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
798  if (Context.getLangOpts().PIE)
799  getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
800  }
801 
802  if (getCodeGenOpts().CodeModel.size() > 0) {
803  unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
804  .Case("tiny", llvm::CodeModel::Tiny)
805  .Case("small", llvm::CodeModel::Small)
806  .Case("kernel", llvm::CodeModel::Kernel)
807  .Case("medium", llvm::CodeModel::Medium)
808  .Case("large", llvm::CodeModel::Large)
809  .Default(~0u);
810  if (CM != ~0u) {
811  llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
812  getModule().setCodeModel(codeModel);
813  }
814  }
815 
816  if (CodeGenOpts.NoPLT)
817  getModule().setRtLibUseGOT();
818  if (CodeGenOpts.UnwindTables)
819  getModule().setUwtable();
820 
821  switch (CodeGenOpts.getFramePointer()) {
823  // 0 ("none") is the default.
824  break;
826  getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
827  break;
829  getModule().setFramePointer(llvm::FramePointerKind::All);
830  break;
831  }
832 
833  SimplifyPersonality();
834 
835  if (getCodeGenOpts().EmitDeclMetadata)
836  EmitDeclMetadata();
837 
838  if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
839  EmitCoverageFile();
840 
841  if (CGDebugInfo *DI = getModuleDebugInfo())
842  DI->finalize();
843 
844  if (getCodeGenOpts().EmitVersionIdentMetadata)
845  EmitVersionIdentMetadata();
846 
847  if (!getCodeGenOpts().RecordCommandLine.empty())
848  EmitCommandLineMetadata();
849 
850  if (!getCodeGenOpts().StackProtectorGuard.empty())
851  getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard);
852  if (!getCodeGenOpts().StackProtectorGuardReg.empty())
853  getModule().setStackProtectorGuardReg(
854  getCodeGenOpts().StackProtectorGuardReg);
855  if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX)
856  getModule().setStackProtectorGuardOffset(
857  getCodeGenOpts().StackProtectorGuardOffset);
858  if (getCodeGenOpts().StackAlignment)
859  getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment);
860  if (getCodeGenOpts().SkipRaxSetup)
861  getModule().addModuleFlag(llvm::Module::Override, "SkipRaxSetup", 1);
862 
863  getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
864 
865  EmitBackendOptionsMetadata(getCodeGenOpts());
866 
867  // Set visibility from DLL storage class
868  // We do this at the end of LLVM IR generation; after any operation
869  // that might affect the DLL storage class or the visibility, and
870  // before anything that might act on these.
872 }
873 
874 void CodeGenModule::EmitOpenCLMetadata() {
875  // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
876  // opencl.ocl.version named metadata node.
877  // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL.
878  auto Version = LangOpts.getOpenCLCompatibleVersion();
879  llvm::Metadata *OCLVerElts[] = {
880  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
881  Int32Ty, Version / 100)),
882  llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
883  Int32Ty, (Version % 100) / 10))};
884  llvm::NamedMDNode *OCLVerMD =
885  TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
886  llvm::LLVMContext &Ctx = TheModule.getContext();
887  OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
888 }
889 
890 void CodeGenModule::EmitBackendOptionsMetadata(
891  const CodeGenOptions CodeGenOpts) {
892  switch (getTriple().getArch()) {
893  default:
894  break;
895  case llvm::Triple::riscv32:
896  case llvm::Triple::riscv64:
897  getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
898  CodeGenOpts.SmallDataLimit);
899  break;
900  }
901 }
902 
904  // Make sure that this type is translated.
905  Types.UpdateCompletedType(TD);
906 }
907 
909  // Make sure that this type is translated.
910  Types.RefreshTypeCacheForClass(RD);
911 }
912 
914  if (!TBAA)
915  return nullptr;
916  return TBAA->getTypeInfo(QTy);
917 }
918 
920  if (!TBAA)
921  return TBAAAccessInfo();
922  if (getLangOpts().CUDAIsDevice) {
923  // As CUDA builtin surface/texture types are replaced, skip generating TBAA
924  // access info.
925  if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
926  if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
927  nullptr)
928  return TBAAAccessInfo();
929  } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
930  if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
931  nullptr)
932  return TBAAAccessInfo();
933  }
934  }
935  return TBAA->getAccessInfo(AccessType);
936 }
937 
939 CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
940  if (!TBAA)
941  return TBAAAccessInfo();
942  return TBAA->getVTablePtrAccessInfo(VTablePtrType);
943 }
944 
946  if (!TBAA)
947  return nullptr;
948  return TBAA->getTBAAStructInfo(QTy);
949 }
950 
952  if (!TBAA)
953  return nullptr;
954  return TBAA->getBaseTypeInfo(QTy);
955 }
956 
958  if (!TBAA)
959  return nullptr;
960  return TBAA->getAccessTagInfo(Info);
961 }
962 
965  if (!TBAA)
966  return TBAAAccessInfo();
967  return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
968 }
969 
972  TBAAAccessInfo InfoB) {
973  if (!TBAA)
974  return TBAAAccessInfo();
975  return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
976 }
977 
980  TBAAAccessInfo SrcInfo) {
981  if (!TBAA)
982  return TBAAAccessInfo();
983  return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
984 }
985 
986 void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
987  TBAAAccessInfo TBAAInfo) {
988  if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
989  Inst->setMetadata(llvm::LLVMContext::MD_tbaa, Tag);
990 }
991 
993  llvm::Instruction *I, const CXXRecordDecl *RD) {
994  I->setMetadata(llvm::LLVMContext::MD_invariant_group,
995  llvm::MDNode::get(getLLVMContext(), {}));
996 }
997 
998 void CodeGenModule::Error(SourceLocation loc, StringRef message) {
999  unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
1000  getDiags().Report(Context.getFullLoc(loc), diagID) << message;
1001 }
1002 
1003 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1004 /// specified stmt yet.
1005 void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
1007  "cannot compile this %0 yet");
1008  std::string Msg = Type;
1009  getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
1010  << Msg << S->getSourceRange();
1011 }
1012 
1013 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1014 /// specified decl yet.
1015 void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
1017  "cannot compile this %0 yet");
1018  std::string Msg = Type;
1019  getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
1020 }
1021 
1022 llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
1023  return llvm::ConstantInt::get(SizeTy, size.getQuantity());
1024 }
1025 
1026 void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
1027  const NamedDecl *D) const {
1028  if (GV->hasDLLImportStorageClass())
1029  return;
1030  // Internal definitions always have default visibility.
1031  if (GV->hasLocalLinkage()) {
1032  GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1033  return;
1034  }
1035  if (!D)
1036  return;
1037  // Set visibility for definitions, and for declarations if requested globally
1038  // or set explicitly.
1040  if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
1041  !GV->isDeclarationForLinker())
1042  GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
1043 }
1044 
1045 static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
1046  llvm::GlobalValue *GV) {
1047  if (GV->hasLocalLinkage())
1048  return true;
1049 
1050  if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
1051  return true;
1052 
1053  // DLLImport explicitly marks the GV as external.
1054  if (GV->hasDLLImportStorageClass())
1055  return false;
1056 
1057  const llvm::Triple &TT = CGM.getTriple();
1058  if (TT.isWindowsGNUEnvironment()) {
1059  // In MinGW, variables without DLLImport can still be automatically
1060  // imported from a DLL by the linker; don't mark variables that
1061  // potentially could come from another DLL as DSO local.
1062 
1063  // With EmulatedTLS, TLS variables can be autoimported from other DLLs
1064  // (and this actually happens in the public interface of libstdc++), so
1065  // such variables can't be marked as DSO local. (Native TLS variables
1066  // can't be dllimported at all, though.)
1067  if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
1068  (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS))
1069  return false;
1070  }
1071 
1072  // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
1073  // remain unresolved in the link, they can be resolved to zero, which is
1074  // outside the current DSO.
1075  if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
1076  return false;
1077 
1078  // Every other GV is local on COFF.
1079  // Make an exception for windows OS in the triple: Some firmware builds use
1080  // *-win32-macho triples. This (accidentally?) produced windows relocations
1081  // without GOT tables in older clang versions; Keep this behaviour.
1082  // FIXME: even thread local variables?
1083  if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
1084  return true;
1085 
1086  // Only handle COFF and ELF for now.
1087  if (!TT.isOSBinFormatELF())
1088  return false;
1089 
1090  // If this is not an executable, don't assume anything is local.
1091  const auto &CGOpts = CGM.getCodeGenOpts();
1092  llvm::Reloc::Model RM = CGOpts.RelocationModel;
1093  const auto &LOpts = CGM.getLangOpts();
1094  if (RM != llvm::Reloc::Static && !LOpts.PIE) {
1095  // On ELF, if -fno-semantic-interposition is specified and the target
1096  // supports local aliases, there will be neither CC1
1097  // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
1098  // dso_local on the function if using a local alias is preferable (can avoid
1099  // PLT indirection).
1100  if (!(isa<llvm::Function>(GV) && GV->canBenefitFromLocalAlias()))
1101  return false;
1102  return !(CGM.getLangOpts().SemanticInterposition ||
1103  CGM.getLangOpts().HalfNoSemanticInterposition);
1104  }
1105 
1106  // A definition cannot be preempted from an executable.
1107  if (!GV->isDeclarationForLinker())
1108  return true;
1109 
1110  // Most PIC code sequences that assume that a symbol is local cannot produce a
1111  // 0 if it turns out the symbol is undefined. While this is ABI and relocation
1112  // depended, it seems worth it to handle it here.
1113  if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
1114  return false;
1115 
1116  // PowerPC64 prefers TOC indirection to avoid copy relocations.
1117  if (TT.isPPC64())
1118  return false;
1119 
1120  if (CGOpts.DirectAccessExternalData) {
1121  // If -fdirect-access-external-data (default for -fno-pic), set dso_local
1122  // for non-thread-local variables. If the symbol is not defined in the
1123  // executable, a copy relocation will be needed at link time. dso_local is
1124  // excluded for thread-local variables because they generally don't support
1125  // copy relocations.
1126  if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
1127  if (!Var->isThreadLocal())
1128  return true;
1129 
1130  // -fno-pic sets dso_local on a function declaration to allow direct
1131  // accesses when taking its address (similar to a data symbol). If the
1132  // function is not defined in the executable, a canonical PLT entry will be
1133  // needed at link time. -fno-direct-access-external-data can avoid the
1134  // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
1135  // it could just cause trouble without providing perceptible benefits.
1136  if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
1137  return true;
1138  }
1139 
1140  // If we can use copy relocations we can assume it is local.
1141 
1142  // Otherwise don't assume it is local.
1143  return false;
1144 }
1145 
1146 void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
1147  GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
1148 }
1149 
1150 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1151  GlobalDecl GD) const {
1152  const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
1153  // C++ destructors have a few C++ ABI specific special cases.
1154  if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
1156  return;
1157  }
1158  setDLLImportDLLExport(GV, D);
1159 }
1160 
1161 void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1162  const NamedDecl *D) const {
1163  if (D && D->isExternallyVisible()) {
1164  if (D->hasAttr<DLLImportAttr>())
1165  GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
1166  else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
1167  GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
1168  }
1169 }
1170 
1171 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1172  GlobalDecl GD) const {
1173  setDLLImportDLLExport(GV, GD);
1174  setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
1175 }
1176 
1177 void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1178  const NamedDecl *D) const {
1179  setDLLImportDLLExport(GV, D);
1180  setGVPropertiesAux(GV, D);
1181 }
1182 
1183 void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
1184  const NamedDecl *D) const {
1185  setGlobalVisibility(GV, D);
1186  setDSOLocal(GV);
1187  GV->setPartition(CodeGenOpts.SymbolPartition);
1188 }
1189 
1190 static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
1191  return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
1192  .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
1193  .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
1194  .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
1195  .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
1196 }
1197 
1198 llvm::GlobalVariable::ThreadLocalMode
1200  switch (CodeGenOpts.getDefaultTLSModel()) {
1202  return llvm::GlobalVariable::GeneralDynamicTLSModel;
1204  return llvm::GlobalVariable::LocalDynamicTLSModel;
1206  return llvm::GlobalVariable::InitialExecTLSModel;
1208  return llvm::GlobalVariable::LocalExecTLSModel;
1209  }
1210  llvm_unreachable("Invalid TLS model!");
1211 }
1212 
1213 void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
1214  assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
1215 
1216  llvm::GlobalValue::ThreadLocalMode TLM;
1217  TLM = GetDefaultLLVMTLSModel();
1218 
1219  // Override the TLS model if it is explicitly specified.
1220  if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
1221  TLM = GetLLVMTLSModel(Attr->getModel());
1222  }
1223 
1224  GV->setThreadLocalMode(TLM);
1225 }
1226 
1228  StringRef Name) {
1229  const TargetInfo &Target = CGM.getTarget();
1230  return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
1231 }
1232 
1234  const CPUSpecificAttr *Attr,
1235  unsigned CPUIndex,
1236  raw_ostream &Out) {
1237  // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
1238  // supported.
1239  if (Attr)
1240  Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
1241  else if (CGM.getTarget().supportsIFunc())
1242  Out << ".resolver";
1243 }
1244 
1245 static void AppendTargetMangling(const CodeGenModule &CGM,
1246  const TargetAttr *Attr, raw_ostream &Out) {
1247  if (Attr->isDefaultVersion())
1248  return;
1249 
1250  Out << '.';
1251  const TargetInfo &Target = CGM.getTarget();
1252  ParsedTargetAttr Info =
1253  Attr->parse([&Target](StringRef LHS, StringRef RHS) {
1254  // Multiversioning doesn't allow "no-${feature}", so we can
1255  // only have "+" prefixes here.
1256  assert(LHS.startswith("+") && RHS.startswith("+") &&
1257  "Features should always have a prefix.");
1258  return Target.multiVersionSortPriority(LHS.substr(1)) >
1259  Target.multiVersionSortPriority(RHS.substr(1));
1260  });
1261 
1262  bool IsFirst = true;
1263 
1264  if (!Info.Architecture.empty()) {
1265  IsFirst = false;
1266  Out << "arch_" << Info.Architecture;
1267  }
1268 
1269  for (StringRef Feat : Info.Features) {
1270  if (!IsFirst)
1271  Out << '_';
1272  IsFirst = false;
1273  Out << Feat.substr(1);
1274  }
1275 }
1276 
1277 // Returns true if GD is a function decl with internal linkage and
1278 // needs a unique suffix after the mangled name.
1280  CodeGenModule &CGM) {
1281  const Decl *D = GD.getDecl();
1282  return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(D) &&
1284 }
1285 
1287  const TargetClonesAttr *Attr,
1288  unsigned VersionIndex,
1289  raw_ostream &Out) {
1290  Out << '.';
1291  StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
1292  if (FeatureStr.startswith("arch="))
1293  Out << "arch_" << FeatureStr.substr(sizeof("arch=") - 1);
1294  else
1295  Out << FeatureStr;
1296 
1297  Out << '.' << Attr->getMangledIndex(VersionIndex);
1298 }
1299 
1301  const NamedDecl *ND,
1302  bool OmitMultiVersionMangling = false) {
1303  SmallString<256> Buffer;
1304  llvm::raw_svector_ostream Out(Buffer);
1306  if (!CGM.getModuleNameHash().empty())
1308  bool ShouldMangle = MC.shouldMangleDeclName(ND);
1309  if (ShouldMangle)
1310  MC.mangleName(GD.getWithDecl(ND), Out);
1311  else {
1312  IdentifierInfo *II = ND->getIdentifier();
1313  assert(II && "Attempt to mangle unnamed decl.");
1314  const auto *FD = dyn_cast<FunctionDecl>(ND);
1315 
1316  if (FD &&
1317  FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
1318  Out << "__regcall3__" << II->getName();
1319  } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
1321  Out << "__device_stub__" << II->getName();
1322  } else {
1323  Out << II->getName();
1324  }
1325  }
1326 
1327  // Check if the module name hash should be appended for internal linkage
1328  // symbols. This should come before multi-version target suffixes are
1329  // appended. This is to keep the name and module hash suffix of the
1330  // internal linkage function together. The unique suffix should only be
1331  // added when name mangling is done to make sure that the final name can
1332  // be properly demangled. For example, for C functions without prototypes,
1333  // name mangling is not done and the unique suffix should not be appeneded
1334  // then.
1335  if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
1336  assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&
1337  "Hash computed when not explicitly requested");
1338  Out << CGM.getModuleNameHash();
1339  }
1340 
1341  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
1342  if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
1343  switch (FD->getMultiVersionKind()) {
1347  FD->getAttr<CPUSpecificAttr>(),
1348  GD.getMultiVersionIndex(), Out);
1349  break;
1351  AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
1352  break;
1354  AppendTargetClonesMangling(CGM, FD->getAttr<TargetClonesAttr>(),
1355  GD.getMultiVersionIndex(), Out);
1356  break;
1358  llvm_unreachable("None multiversion type isn't valid here");
1359  }
1360  }
1361 
1362  // Make unique name for device side static file-scope variable for HIP.
1363  if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
1364  CGM.getLangOpts().GPURelocatableDeviceCode &&
1365  CGM.getLangOpts().CUDAIsDevice && !CGM.getLangOpts().CUID.empty())
1367  return std::string(Out.str());
1368 }
1369 
1370 void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
1371  const FunctionDecl *FD,
1372  StringRef &CurName) {
1373  if (!FD->isMultiVersion())
1374  return;
1375 
1376  // Get the name of what this would be without the 'target' attribute. This
1377  // allows us to lookup the version that was emitted when this wasn't a
1378  // multiversion function.
1379  std::string NonTargetName =
1380  getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
1381  GlobalDecl OtherGD;
1382  if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
1383  assert(OtherGD.getCanonicalDecl()
1384  .getDecl()
1385  ->getAsFunction()
1386  ->isMultiVersion() &&
1387  "Other GD should now be a multiversioned function");
1388  // OtherFD is the version of this function that was mangled BEFORE
1389  // becoming a MultiVersion function. It potentially needs to be updated.
1390  const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
1391  .getDecl()
1392  ->getAsFunction()
1393  ->getMostRecentDecl();
1394  std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
1395  // This is so that if the initial version was already the 'default'
1396  // version, we don't try to update it.
1397  if (OtherName != NonTargetName) {
1398  // Remove instead of erase, since others may have stored the StringRef
1399  // to this.
1400  const auto ExistingRecord = Manglings.find(NonTargetName);
1401  if (ExistingRecord != std::end(Manglings))
1402  Manglings.remove(&(*ExistingRecord));
1403  auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
1404  StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
1405  Result.first->first();
1406  // If this is the current decl is being created, make sure we update the name.
1407  if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
1408  CurName = OtherNameRef;
1409  if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
1410  Entry->setName(OtherName);
1411  }
1412  }
1413 }
1414 
1416  GlobalDecl CanonicalGD = GD.getCanonicalDecl();
1417 
1418  // Some ABIs don't have constructor variants. Make sure that base and
1419  // complete constructors get mangled the same.
1420  if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
1421  if (!getTarget().getCXXABI().hasConstructorVariants()) {
1422  CXXCtorType OrigCtorType = GD.getCtorType();
1423  assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
1424  if (OrigCtorType == Ctor_Base)
1425  CanonicalGD = GlobalDecl(CD, Ctor_Complete);
1426  }
1427  }
1428 
1429  // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
1430  // static device variable depends on whether the variable is referenced by
1431  // a host or device host function. Therefore the mangled name cannot be
1432  // cached.
1433  if (!LangOpts.CUDAIsDevice ||
1434  !getContext().mayExternalizeStaticVar(GD.getDecl())) {
1435  auto FoundName = MangledDeclNames.find(CanonicalGD);
1436  if (FoundName != MangledDeclNames.end())
1437  return FoundName->second;
1438  }
1439 
1440  // Keep the first result in the case of a mangling collision.
1441  const auto *ND = cast<NamedDecl>(GD.getDecl());
1442  std::string MangledName = getMangledNameImpl(*this, GD, ND);
1443 
1444  // Ensure either we have different ABIs between host and device compilations,
1445  // says host compilation following MSVC ABI but device compilation follows
1446  // Itanium C++ ABI or, if they follow the same ABI, kernel names after
1447  // mangling should be the same after name stubbing. The later checking is
1448  // very important as the device kernel name being mangled in host-compilation
1449  // is used to resolve the device binaries to be executed. Inconsistent naming
1450  // result in undefined behavior. Even though we cannot check that naming
1451  // directly between host- and device-compilations, the host- and
1452  // device-mangling in host compilation could help catching certain ones.
1453  assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
1454  getLangOpts().CUDAIsDevice ||
1455  (getContext().getAuxTargetInfo() &&
1456  (getContext().getAuxTargetInfo()->getCXXABI() !=
1457  getContext().getTargetInfo().getCXXABI())) ||
1458  getCUDARuntime().getDeviceSideName(ND) ==
1460  *this,
1462  ND));
1463 
1464  auto Result = Manglings.insert(std::make_pair(MangledName, GD));
1465  return MangledDeclNames[CanonicalGD] = Result.first->first();
1466 }
1467 
1469  const BlockDecl *BD) {
1470  MangleContext &MangleCtx = getCXXABI().getMangleContext();
1471  const Decl *D = GD.getDecl();
1472 
1473  SmallString<256> Buffer;
1474  llvm::raw_svector_ostream Out(Buffer);
1475  if (!D)
1476  MangleCtx.mangleGlobalBlock(BD,
1477  dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
1478  else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
1479  MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
1480  else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
1481  MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
1482  else
1483  MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
1484 
1485  auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
1486  return Result.first->first();
1487 }
1488 
1489 llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
1490  return getModule().getNamedValue(Name);
1491 }
1492 
1493 /// AddGlobalCtor - Add a function to the list that will be called before
1494 /// main() runs.
1495 void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
1496  llvm::Constant *AssociatedData) {
1497  // FIXME: Type coercion of void()* types.
1498  GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
1499 }
1500 
1501 /// AddGlobalDtor - Add a function to the list that will be called
1502 /// when the module is unloaded.
1503 void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
1504  bool IsDtorAttrFunc) {
1505  if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
1506  (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
1507  DtorsUsingAtExit[Priority].push_back(Dtor);
1508  return;
1509  }
1510 
1511  // FIXME: Type coercion of void()* types.
1512  GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
1513 }
1514 
1515 void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
1516  if (Fns.empty()) return;
1517 
1518  // Ctor function type is void()*.
1519  llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
1520  llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
1521  TheModule.getDataLayout().getProgramAddressSpace());
1522 
1523  // Get the type of a ctor entry, { i32, void ()*, i8* }.
1524  llvm::StructType *CtorStructTy = llvm::StructType::get(
1525  Int32Ty, CtorPFTy, VoidPtrTy);
1526 
1527  // Construct the constructor and destructor arrays.
1528  ConstantInitBuilder builder(*this);
1529  auto ctors = builder.beginArray(CtorStructTy);
1530  for (const auto &I : Fns) {
1531  auto ctor = ctors.beginStruct(CtorStructTy);
1532  ctor.addInt(Int32Ty, I.Priority);
1533  ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
1534  if (I.AssociatedData)
1535  ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
1536  else
1537  ctor.addNullPointer(VoidPtrTy);
1538  ctor.finishAndAddTo(ctors);
1539  }
1540 
1541  auto list =
1542  ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
1543  /*constant*/ false,
1544  llvm::GlobalValue::AppendingLinkage);
1545 
1546  // The LTO linker doesn't seem to like it when we set an alignment
1547  // on appending variables. Take it off as a workaround.
1548  list->setAlignment(llvm::None);
1549 
1550  Fns.clear();
1551 }
1552 
1553 llvm::GlobalValue::LinkageTypes
1555  const auto *D = cast<FunctionDecl>(GD.getDecl());
1556 
1558 
1559  if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
1560  return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
1561 
1562  if (isa<CXXConstructorDecl>(D) &&
1563  cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
1564  Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1565  // Our approach to inheriting constructors is fundamentally different from
1566  // that used by the MS ABI, so keep our inheriting constructor thunks
1567  // internal rather than trying to pick an unambiguous mangling for them.
1569  }
1570 
1571  return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
1572 }
1573 
1574 llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
1575  llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
1576  if (!MDS) return nullptr;
1577 
1578  return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
1579 }
1580 
1582  const CGFunctionInfo &Info,
1583  llvm::Function *F, bool IsThunk) {
1584  unsigned CallingConv;
1585  llvm::AttributeList PAL;
1586  ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv,
1587  /*AttrOnCallSite=*/false, IsThunk);
1588  F->setAttributes(PAL);
1589  F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1590 }
1591 
1593  std::string ReadOnlyQual("__read_only");
1594  std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
1595  if (ReadOnlyPos != std::string::npos)
1596  // "+ 1" for the space after access qualifier.
1597  TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
1598  else {
1599  std::string WriteOnlyQual("__write_only");
1600  std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
1601  if (WriteOnlyPos != std::string::npos)
1602  TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
1603  else {
1604  std::string ReadWriteQual("__read_write");
1605  std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
1606  if (ReadWritePos != std::string::npos)
1607  TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
1608  }
1609  }
1610 }
1611 
1612 // Returns the address space id that should be produced to the
1613 // kernel_arg_addr_space metadata. This is always fixed to the ids
1614 // as specified in the SPIR 2.0 specification in order to differentiate
1615 // for example in clGetKernelArgInfo() implementation between the address
1616 // spaces with targets without unique mapping to the OpenCL address spaces
1617 // (basically all single AS CPUs).
1618 static unsigned ArgInfoAddressSpace(LangAS AS) {
1619  switch (AS) {
1620  case LangAS::opencl_global:
1621  return 1;
1623  return 2;
1624  case LangAS::opencl_local:
1625  return 3;
1627  return 4; // Not in SPIR 2.0 specs.
1629  return 5;
1631  return 6;
1632  default:
1633  return 0; // Assume private.
1634  }
1635 }
1636 
1637 void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
1638  const FunctionDecl *FD,
1639  CodeGenFunction *CGF) {
1640  assert(((FD && CGF) || (!FD && !CGF)) &&
1641  "Incorrect use - FD and CGF should either be both null or not!");
1642  // Create MDNodes that represent the kernel arg metadata.
1643  // Each MDNode is a list in the form of "key", N number of values which is
1644  // the same number of values as their are kernel arguments.
1645 
1646  const PrintingPolicy &Policy = Context.getPrintingPolicy();
1647 
1648  // MDNode for the kernel argument address space qualifiers.
1649  SmallVector<llvm::Metadata *, 8> addressQuals;
1650 
1651  // MDNode for the kernel argument access qualifiers (images only).
1653 
1654  // MDNode for the kernel argument type names.
1655  SmallVector<llvm::Metadata *, 8> argTypeNames;
1656 
1657  // MDNode for the kernel argument base type names.
1658  SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
1659 
1660  // MDNode for the kernel argument type qualifiers.
1661  SmallVector<llvm::Metadata *, 8> argTypeQuals;
1662 
1663  // MDNode for the kernel argument names.
1665 
1666  if (FD && CGF)
1667  for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
1668  const ParmVarDecl *parm = FD->getParamDecl(i);
1669  QualType ty = parm->getType();
1670  std::string typeQuals;
1671 
1672  // Get image and pipe access qualifier:
1673  if (ty->isImageType() || ty->isPipeType()) {
1674  const Decl *PDecl = parm;
1675  if (auto *TD = dyn_cast<TypedefType>(ty))
1676  PDecl = TD->getDecl();
1677  const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
1678  if (A && A->isWriteOnly())
1679  accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
1680  else if (A && A->isReadWrite())
1681  accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
1682  else
1683  accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
1684  } else
1685  accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
1686 
1687  // Get argument name.
1688  argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
1689 
1690  auto getTypeSpelling = [&](QualType Ty) {
1691  auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
1692 
1693  if (Ty.isCanonical()) {
1694  StringRef typeNameRef = typeName;
1695  // Turn "unsigned type" to "utype"
1696  if (typeNameRef.consume_front("unsigned "))
1697  return std::string("u") + typeNameRef.str();
1698  if (typeNameRef.consume_front("signed "))
1699  return typeNameRef.str();
1700  }
1701 
1702  return typeName;
1703  };
1704 
1705  if (ty->isPointerType()) {
1706  QualType pointeeTy = ty->getPointeeType();
1707 
1708  // Get address qualifier.
1709  addressQuals.push_back(
1710  llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
1711  ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
1712 
1713  // Get argument type name.
1714  std::string typeName = getTypeSpelling(pointeeTy) + "*";
1715  std::string baseTypeName =
1716  getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
1717  argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1718  argBaseTypeNames.push_back(
1719  llvm::MDString::get(VMContext, baseTypeName));
1720 
1721  // Get argument type qualifiers:
1722  if (ty.isRestrictQualified())
1723  typeQuals = "restrict";
1724  if (pointeeTy.isConstQualified() ||
1725  (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
1726  typeQuals += typeQuals.empty() ? "const" : " const";
1727  if (pointeeTy.isVolatileQualified())
1728  typeQuals += typeQuals.empty() ? "volatile" : " volatile";
1729  } else {
1730  uint32_t AddrSpc = 0;
1731  bool isPipe = ty->isPipeType();
1732  if (ty->isImageType() || isPipe)
1734 
1735  addressQuals.push_back(
1736  llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
1737 
1738  // Get argument type name.
1739  ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
1740  std::string typeName = getTypeSpelling(ty);
1741  std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
1742 
1743  // Remove access qualifiers on images
1744  // (as they are inseparable from type in clang implementation,
1745  // but OpenCL spec provides a special query to get access qualifier
1746  // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
1747  if (ty->isImageType()) {
1748  removeImageAccessQualifier(typeName);
1749  removeImageAccessQualifier(baseTypeName);
1750  }
1751 
1752  argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1753  argBaseTypeNames.push_back(
1754  llvm::MDString::get(VMContext, baseTypeName));
1755 
1756  if (isPipe)
1757  typeQuals = "pipe";
1758  }
1759  argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
1760  }
1761 
1762  Fn->setMetadata("kernel_arg_addr_space",
1763  llvm::MDNode::get(VMContext, addressQuals));
1764  Fn->setMetadata("kernel_arg_access_qual",
1765  llvm::MDNode::get(VMContext, accessQuals));
1766  Fn->setMetadata("kernel_arg_type",
1767  llvm::MDNode::get(VMContext, argTypeNames));
1768  Fn->setMetadata("kernel_arg_base_type",
1769  llvm::MDNode::get(VMContext, argBaseTypeNames));
1770  Fn->setMetadata("kernel_arg_type_qual",
1771  llvm::MDNode::get(VMContext, argTypeQuals));
1772  if (getCodeGenOpts().EmitOpenCLArgMetadata)
1773  Fn->setMetadata("kernel_arg_name",
1774  llvm::MDNode::get(VMContext, argNames));
1775 }
1776 
1777 /// Determines whether the language options require us to model
1778 /// unwind exceptions. We treat -fexceptions as mandating this
1779 /// except under the fragile ObjC ABI with only ObjC exceptions
1780 /// enabled. This means, for example, that C with -fexceptions
1781 /// enables this.
1782 static bool hasUnwindExceptions(const LangOptions &LangOpts) {
1783  // If exceptions are completely disabled, obviously this is false.
1784  if (!LangOpts.Exceptions) return false;
1785 
1786  // If C++ exceptions are enabled, this is true.
1787  if (LangOpts.CXXExceptions) return true;
1788 
1789  // If ObjC exceptions are enabled, this depends on the ABI.
1790  if (LangOpts.ObjCExceptions) {
1791  return LangOpts.ObjCRuntime.hasUnwindExceptions();
1792  }
1793 
1794  return true;
1795 }
1796 
1798  const CXXMethodDecl *MD) {
1799  // Check that the type metadata can ever actually be used by a call.
1800  if (!CGM.getCodeGenOpts().LTOUnit ||
1801  !CGM.HasHiddenLTOVisibility(MD->getParent()))
1802  return false;
1803 
1804  // Only functions whose address can be taken with a member function pointer
1805  // need this sort of type metadata.
1806  return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
1807  !isa<CXXDestructorDecl>(MD);
1808 }
1809 
1810 std::vector<const CXXRecordDecl *>
1812  llvm::SetVector<const CXXRecordDecl *> MostBases;
1813 
1814  std::function<void (const CXXRecordDecl *)> CollectMostBases;
1815  CollectMostBases = [&](const CXXRecordDecl *RD) {
1816  if (RD->getNumBases() == 0)
1817  MostBases.insert(RD);
1818  for (const CXXBaseSpecifier &B : RD->bases())
1819  CollectMostBases(B.getType()->getAsCXXRecordDecl());
1820  };
1821  CollectMostBases(RD);
1822  return MostBases.takeVector();
1823 }
1824 
1826  llvm::Function *F) {
1827  llvm::AttrBuilder B(F->getContext());
1828 
1829  if (CodeGenOpts.UnwindTables)
1830  B.addAttribute(llvm::Attribute::UWTable);
1831 
1832  if (CodeGenOpts.StackClashProtector)
1833  B.addAttribute("probe-stack", "inline-asm");
1834 
1835  if (!hasUnwindExceptions(LangOpts))
1836  B.addAttribute(llvm::Attribute::NoUnwind);
1837 
1838  if (!D || !D->hasAttr<NoStackProtectorAttr>()) {
1839  if (LangOpts.getStackProtector() == LangOptions::SSPOn)
1840  B.addAttribute(llvm::Attribute::StackProtect);
1841  else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
1842  B.addAttribute(llvm::Attribute::StackProtectStrong);
1843  else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
1844  B.addAttribute(llvm::Attribute::StackProtectReq);
1845  }
1846 
1847  if (!D) {
1848  // If we don't have a declaration to control inlining, the function isn't
1849  // explicitly marked as alwaysinline for semantic reasons, and inlining is
1850  // disabled, mark the function as noinline.
1851  if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
1852  CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
1853  B.addAttribute(llvm::Attribute::NoInline);
1854 
1855  F->addFnAttrs(B);
1856  return;
1857  }
1858 
1859  // Track whether we need to add the optnone LLVM attribute,
1860  // starting with the default for this optimization level.
1861  bool ShouldAddOptNone =
1862  !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
1863  // We can't add optnone in the following cases, it won't pass the verifier.
1864  ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
1865  ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
1866 
1867  // Add optnone, but do so only if the function isn't always_inline.
1868  if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
1869  !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1870  B.addAttribute(llvm::Attribute::OptimizeNone);
1871 
1872  // OptimizeNone implies noinline; we should not be inlining such functions.
1873  B.addAttribute(llvm::Attribute::NoInline);
1874 
1875  // We still need to handle naked functions even though optnone subsumes
1876  // much of their semantics.
1877  if (D->hasAttr<NakedAttr>())
1878  B.addAttribute(llvm::Attribute::Naked);
1879 
1880  // OptimizeNone wins over OptimizeForSize and MinSize.
1881  F->removeFnAttr(llvm::Attribute::OptimizeForSize);
1882  F->removeFnAttr(llvm::Attribute::MinSize);
1883  } else if (D->hasAttr<NakedAttr>()) {
1884  // Naked implies noinline: we should not be inlining such functions.
1885  B.addAttribute(llvm::Attribute::Naked);
1886  B.addAttribute(llvm::Attribute::NoInline);
1887  } else if (D->hasAttr<NoDuplicateAttr>()) {
1888  B.addAttribute(llvm::Attribute::NoDuplicate);
1889  } else if (D->hasAttr<NoInlineAttr>() && !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1890  // Add noinline if the function isn't always_inline.
1891  B.addAttribute(llvm::Attribute::NoInline);
1892  } else if (D->hasAttr<AlwaysInlineAttr>() &&
1893  !F->hasFnAttribute(llvm::Attribute::NoInline)) {
1894  // (noinline wins over always_inline, and we can't specify both in IR)
1895  B.addAttribute(llvm::Attribute::AlwaysInline);
1896  } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
1897  // If we're not inlining, then force everything that isn't always_inline to
1898  // carry an explicit noinline attribute.
1899  if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
1900  B.addAttribute(llvm::Attribute::NoInline);
1901  } else {
1902  // Otherwise, propagate the inline hint attribute and potentially use its
1903  // absence to mark things as noinline.
1904  if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1905  // Search function and template pattern redeclarations for inline.
1906  auto CheckForInline = [](const FunctionDecl *FD) {
1907  auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
1908  return Redecl->isInlineSpecified();
1909  };
1910  if (any_of(FD->redecls(), CheckRedeclForInline))
1911  return true;
1912  const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
1913  if (!Pattern)
1914  return false;
1915  return any_of(Pattern->redecls(), CheckRedeclForInline);
1916  };
1917  if (CheckForInline(FD)) {
1918  B.addAttribute(llvm::Attribute::InlineHint);
1919  } else if (CodeGenOpts.getInlining() ==
1921  !FD->isInlined() &&
1922  !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1923  B.addAttribute(llvm::Attribute::NoInline);
1924  }
1925  }
1926  }
1927 
1928  // Add other optimization related attributes if we are optimizing this
1929  // function.
1930  if (!D->hasAttr<OptimizeNoneAttr>()) {
1931  if (D->hasAttr<ColdAttr>()) {
1932  if (!ShouldAddOptNone)
1933  B.addAttribute(llvm::Attribute::OptimizeForSize);
1934  B.addAttribute(llvm::Attribute::Cold);
1935  }
1936  if (D->hasAttr<HotAttr>())
1937  B.addAttribute(llvm::Attribute::Hot);
1938  if (D->hasAttr<MinSizeAttr>())
1939  B.addAttribute(llvm::Attribute::MinSize);
1940  }
1941 
1942  F->addFnAttrs(B);
1943 
1944  unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
1945  if (alignment)
1946  F->setAlignment(llvm::Align(alignment));
1947 
1948  if (!D->hasAttr<AlignedAttr>())
1949  if (LangOpts.FunctionAlignment)
1950  F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment));
1951 
1952  // Some C++ ABIs require 2-byte alignment for member functions, in order to
1953  // reserve a bit for differentiating between virtual and non-virtual member
1954  // functions. If the current target's C++ ABI requires this and this is a
1955  // member function, set its alignment accordingly.
1956  if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
1957  if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
1958  F->setAlignment(llvm::Align(2));
1959  }
1960 
1961  // In the cross-dso CFI mode with canonical jump tables, we want !type
1962  // attributes on definitions only.
1963  if (CodeGenOpts.SanitizeCfiCrossDso &&
1964  CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
1965  if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1966  // Skip available_externally functions. They won't be codegen'ed in the
1967  // current module anyway.
1968  if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
1970  }
1971  }
1972 
1973  // Emit type metadata on member functions for member function pointer checks.
1974  // These are only ever necessary on definitions; we're guaranteed that the
1975  // definition will be present in the LTO unit as a result of LTO visibility.
1976  auto *MD = dyn_cast<CXXMethodDecl>(D);
1977  if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
1978  for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
1979  llvm::Metadata *Id =
1981  MD->getType(), Context.getRecordType(Base).getTypePtr()));
1982  F->addTypeMetadata(0, Id);
1983  }
1984  }
1985 }
1986 
1988  llvm::Function *F) {
1989  if (D->hasAttr<StrictFPAttr>()) {
1990  llvm::AttrBuilder FuncAttrs(F->getContext());
1991  FuncAttrs.addAttribute("strictfp");
1992  F->addFnAttrs(FuncAttrs);
1993  }
1994 }
1995 
1996 void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
1997  const Decl *D = GD.getDecl();
1998  if (isa_and_nonnull<NamedDecl>(D))
1999  setGVProperties(GV, GD);
2000  else
2001  GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
2002 
2003  if (D && D->hasAttr<UsedAttr>())
2005 
2006  if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
2007  const auto *VD = cast<VarDecl>(D);
2008  if (VD->getType().isConstQualified() &&
2009  VD->getStorageDuration() == SD_Static)
2011  }
2012 }
2013 
2014 bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
2015  llvm::AttrBuilder &Attrs) {
2016  // Add target-cpu and target-features attributes to functions. If
2017  // we have a decl for the function and it has a target attribute then
2018  // parse that and add it to the feature set.
2019  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
2020  StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
2021  std::vector<std::string> Features;
2022  const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
2023  FD = FD ? FD->getMostRecentDecl() : FD;
2024  const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
2025  const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
2026  const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr;
2027  bool AddedAttr = false;
2028  if (TD || SD || TC) {
2029  llvm::StringMap<bool> FeatureMap;
2030  getContext().getFunctionFeatureMap(FeatureMap, GD);
2031 
2032  // Produce the canonical string for this set of features.
2033  for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
2034  Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
2035 
2036  // Now add the target-cpu and target-features to the function.
2037  // While we populated the feature map above, we still need to
2038  // get and parse the target attribute so we can get the cpu for
2039  // the function.
2040  if (TD) {
2041  ParsedTargetAttr ParsedAttr = TD->parse();
2042  if (!ParsedAttr.Architecture.empty() &&
2043  getTarget().isValidCPUName(ParsedAttr.Architecture)) {
2044  TargetCPU = ParsedAttr.Architecture;
2045  TuneCPU = ""; // Clear the tune CPU.
2046  }
2047  if (!ParsedAttr.Tune.empty() &&
2048  getTarget().isValidCPUName(ParsedAttr.Tune))
2049  TuneCPU = ParsedAttr.Tune;
2050  }
2051  } else {
2052  // Otherwise just add the existing target cpu and target features to the
2053  // function.
2054  Features = getTarget().getTargetOpts().Features;
2055  }
2056 
2057  if (!TargetCPU.empty()) {
2058  Attrs.addAttribute("target-cpu", TargetCPU);
2059  AddedAttr = true;
2060  }
2061  if (!TuneCPU.empty()) {
2062  Attrs.addAttribute("tune-cpu", TuneCPU);
2063  AddedAttr = true;
2064  }
2065  if (!Features.empty()) {
2066  llvm::sort(Features);
2067  Attrs.addAttribute("target-features", llvm::join(Features, ","));
2068  AddedAttr = true;
2069  }
2070 
2071  return AddedAttr;
2072 }
2073 
2074 void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
2075  llvm::GlobalObject *GO) {
2076  const Decl *D = GD.getDecl();
2077  SetCommonAttributes(GD, GO);
2078 
2079  if (D) {
2080  if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
2081  if (D->hasAttr<RetainAttr>())
2082  addUsedGlobal(GV);
2083  if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
2084  GV->addAttribute("bss-section", SA->getName());
2085  if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
2086  GV->addAttribute("data-section", SA->getName());
2087  if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
2088  GV->addAttribute("rodata-section", SA->getName());
2089  if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
2090  GV->addAttribute("relro-section", SA->getName());
2091  }
2092 
2093  if (auto *F = dyn_cast<llvm::Function>(GO)) {
2094  if (D->hasAttr<RetainAttr>())
2095  addUsedGlobal(F);
2096  if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
2097  if (!D->getAttr<SectionAttr>())
2098  F->addFnAttr("implicit-section-name", SA->getName());
2099 
2100  llvm::AttrBuilder Attrs(F->getContext());
2101  if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
2102  // We know that GetCPUAndFeaturesAttributes will always have the
2103  // newest set, since it has the newest possible FunctionDecl, so the
2104  // new ones should replace the old.
2105  llvm::AttributeMask RemoveAttrs;
2106  RemoveAttrs.addAttribute("target-cpu");
2107  RemoveAttrs.addAttribute("target-features");
2108  RemoveAttrs.addAttribute("tune-cpu");
2109  F->removeFnAttrs(RemoveAttrs);
2110  F->addFnAttrs(Attrs);
2111  }
2112  }
2113 
2114  if (const auto *CSA = D->getAttr<CodeSegAttr>())
2115  GO->setSection(CSA->getName());
2116  else if (const auto *SA = D->getAttr<SectionAttr>())
2117  GO->setSection(SA->getName());
2118  }
2119 
2120  getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
2121 }
2122 
2124  llvm::Function *F,
2125  const CGFunctionInfo &FI) {
2126  const Decl *D = GD.getDecl();
2127  SetLLVMFunctionAttributes(GD, FI, F, /*IsThunk=*/false);
2129 
2130  F->setLinkage(llvm::Function::InternalLinkage);
2131 
2132  setNonAliasAttributes(GD, F);
2133 }
2134 
2135 static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
2136  // Set linkage and visibility in case we never see a definition.
2138  // Don't set internal linkage on declarations.
2139  // "extern_weak" is overloaded in LLVM; we probably should have
2140  // separate linkage types for this.
2141  if (isExternallyVisible(LV.getLinkage()) &&
2142  (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
2143  GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
2144 }
2145 
2147  llvm::Function *F) {
2148  // Only if we are checking indirect calls.
2149  if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
2150  return;
2151 
2152  // Non-static class methods are handled via vtable or member function pointer
2153  // checks elsewhere.
2154  if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
2155  return;
2156 
2157  llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
2158  F->addTypeMetadata(0, MD);
2159  F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
2160 
2161  // Emit a hash-based bit set entry for cross-DSO calls.
2162  if (CodeGenOpts.SanitizeCfiCrossDso)
2163  if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
2164  F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
2165 }
2166 
2167 void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
2168  bool IsIncompleteFunction,
2169  bool IsThunk) {
2170 
2171  if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
2172  // If this is an intrinsic function, set the function's attributes
2173  // to the intrinsic's attributes.
2174  F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
2175  return;
2176  }
2177 
2178  const auto *FD = cast<FunctionDecl>(GD.getDecl());
2179 
2180  if (!IsIncompleteFunction)
2181  SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F,
2182  IsThunk);
2183 
2184  // Add the Returned attribute for "this", except for iOS 5 and earlier
2185  // where substantial code, including the libstdc++ dylib, was compiled with
2186  // GCC and does not actually return "this".
2187  if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
2188  !(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
2189  assert(!F->arg_empty() &&
2190  F->arg_begin()->getType()
2191  ->canLosslesslyBitCastTo(F->getReturnType()) &&
2192  "unexpected this return");
2193  F->addParamAttr(0, llvm::Attribute::Returned);
2194  }
2195 
2196  // Only a few attributes are set on declarations; these may later be
2197  // overridden by a definition.
2198 
2199  setLinkageForGV(F, FD);
2200  setGVProperties(F, FD);
2201 
2202  // Setup target-specific attributes.
2203  if (!IsIncompleteFunction && F->isDeclaration())
2204  getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
2205 
2206  if (const auto *CSA = FD->getAttr<CodeSegAttr>())
2207  F->setSection(CSA->getName());
2208  else if (const auto *SA = FD->getAttr<SectionAttr>())
2209  F->setSection(SA->getName());
2210 
2211  if (const auto *EA = FD->getAttr<ErrorAttr>()) {
2212  if (EA->isError())
2213  F->addFnAttr("dontcall-error", EA->getUserDiagnostic());
2214  else if (EA->isWarning())
2215  F->addFnAttr("dontcall-warn", EA->getUserDiagnostic());
2216  }
2217 
2218  // If we plan on emitting this inline builtin, we can't treat it as a builtin.
2219  if (FD->isInlineBuiltinDeclaration()) {
2220  const FunctionDecl *FDBody;
2221  bool HasBody = FD->hasBody(FDBody);
2222  (void)HasBody;
2223  assert(HasBody && "Inline builtin declarations should always have an "
2224  "available body!");
2225  if (shouldEmitFunction(FDBody))
2226  F->addFnAttr(llvm::Attribute::NoBuiltin);
2227  }
2228 
2230  // A replaceable global allocation function does not act like a builtin by
2231  // default, only if it is invoked by a new-expression or delete-expression.
2232  F->addFnAttr(llvm::Attribute::NoBuiltin);
2233  }
2234 
2235  if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
2236  F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2237  else if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
2238  if (MD->isVirtual())
2239  F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2240 
2241  // Don't emit entries for function declarations in the cross-DSO mode. This
2242  // is handled with better precision by the receiving DSO. But if jump tables
2243  // are non-canonical then we need type metadata in order to produce the local
2244  // jump table.
2245  if (!CodeGenOpts.SanitizeCfiCrossDso ||
2246  !CodeGenOpts.SanitizeCfiCanonicalJumpTables)
2248 
2249  if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
2251 
2252  if (const auto *CB = FD->getAttr<CallbackAttr>()) {
2253  // Annotate the callback behavior as metadata:
2254  // - The callback callee (as argument number).
2255  // - The callback payloads (as argument numbers).
2256  llvm::LLVMContext &Ctx = F->getContext();
2257  llvm::MDBuilder MDB(Ctx);
2258 
2259  // The payload indices are all but the first one in the encoding. The first
2260  // identifies the callback callee.
2261  int CalleeIdx = *CB->encoding_begin();
2262  ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
2263  F->addMetadata(llvm::LLVMContext::MD_callback,
2264  *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
2265  CalleeIdx, PayloadIndices,
2266  /* VarArgsArePassed */ false)}));
2267  }
2268 }
2269 
2270 void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
2271  assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
2272  "Only globals with definition can force usage.");
2273  LLVMUsed.emplace_back(GV);
2274 }
2275 
2276 void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
2277  assert(!GV->isDeclaration() &&
2278  "Only globals with definition can force usage.");
2279  LLVMCompilerUsed.emplace_back(GV);
2280 }
2281 
2282 void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) {
2283  assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
2284  "Only globals with definition can force usage.");
2285  if (getTriple().isOSBinFormatELF())
2286  LLVMCompilerUsed.emplace_back(GV);
2287  else
2288  LLVMUsed.emplace_back(GV);
2289 }
2290 
2291 static void emitUsed(CodeGenModule &CGM, StringRef Name,
2292  std::vector<llvm::WeakTrackingVH> &List) {
2293  // Don't create llvm.used if there is no need.
2294  if (List.empty())
2295  return;
2296 
2297  // Convert List to what ConstantArray needs.
2299  UsedArray.resize(List.size());
2300  for (unsigned i = 0, e = List.size(); i != e; ++i) {
2301  UsedArray[i] =
2302  llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2303  cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
2304  }
2305 
2306  if (UsedArray.empty())
2307  return;
2308  llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
2309 
2310  auto *GV = new llvm::GlobalVariable(
2311  CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
2312  llvm::ConstantArray::get(ATy, UsedArray), Name);
2313 
2314  GV->setSection("llvm.metadata");
2315 }
2316 
2317 void CodeGenModule::emitLLVMUsed() {
2318  emitUsed(*this, "llvm.used", LLVMUsed);
2319  emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
2320 }
2321 
2323  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
2324  LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2325 }
2326 
2327 void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
2330  if (Opt.empty())
2331  return;
2332  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2333  LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2334 }
2335 
2336 void CodeGenModule::AddDependentLib(StringRef Lib) {
2337  auto &C = getLLVMContext();
2338  if (getTarget().getTriple().isOSBinFormatELF()) {
2339  ELFDependentLibraries.push_back(
2340  llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
2341  return;
2342  }
2343 
2346  auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2347  LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
2348 }
2349 
2350 /// Add link options implied by the given module, including modules
2351 /// it depends on, using a postorder walk.
2355  // Import this module's parent.
2356  if (Mod->Parent && Visited.insert(Mod->Parent).second) {
2357  addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
2358  }
2359 
2360  // Import this module's dependencies.
2361  for (Module *Import : llvm::reverse(Mod->Imports)) {
2362  if (Visited.insert(Import).second)
2363  addLinkOptionsPostorder(CGM, Import, Metadata, Visited);
2364  }
2365 
2366  // Add linker options to link against the libraries/frameworks
2367  // described by this module.
2368  llvm::LLVMContext &Context = CGM.getLLVMContext();
2369  bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
2370 
2371  // For modules that use export_as for linking, use that module
2372  // name instead.
2373  if (Mod->UseExportAsModuleLinkName)
2374  return;
2375 
2376  for (const Module::LinkLibrary &LL : llvm::reverse(Mod->LinkLibraries)) {
2377  // Link against a framework. Frameworks are currently Darwin only, so we
2378  // don't to ask TargetCodeGenInfo for the spelling of the linker option.
2379  if (LL.IsFramework) {
2380  llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"),
2381  llvm::MDString::get(Context, LL.Library)};
2382 
2383  Metadata.push_back(llvm::MDNode::get(Context, Args));
2384  continue;
2385  }
2386 
2387  // Link against a library.
2388  if (IsELF) {
2389  llvm::Metadata *Args[2] = {
2390  llvm::MDString::get(Context, "lib"),
2391  llvm::MDString::get(Context, LL.Library),
2392  };
2393  Metadata.push_back(llvm::MDNode::get(Context, Args));
2394  } else {
2396  CGM.getTargetCodeGenInfo().getDependentLibraryOption(LL.Library, Opt);
2397  auto *OptString = llvm::MDString::get(Context, Opt);
2398  Metadata.push_back(llvm::MDNode::get(Context, OptString));
2399  }
2400  }
2401 }
2402 
2403 void CodeGenModule::EmitModuleLinkOptions() {
2404  // Collect the set of all of the modules we want to visit to emit link
2405  // options, which is essentially the imported modules and all of their
2406  // non-explicit child modules.
2407  llvm::SetVector<clang::Module *> LinkModules;
2410 
2411  // Seed the stack with imported modules.
2412  for (Module *M : ImportedModules) {
2413  // Do not add any link flags when an implementation TU of a module imports
2414  // a header of that same module.
2415  if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
2416  !getLangOpts().isCompilingModule())
2417  continue;
2418  if (Visited.insert(M).second)
2419  Stack.push_back(M);
2420  }
2421 
2422  // Find all of the modules to import, making a little effort to prune
2423  // non-leaf modules.
2424  while (!Stack.empty()) {
2425  clang::Module *Mod = Stack.pop_back_val();
2426 
2427  bool AnyChildren = false;
2428 
2429  // Visit the submodules of this module.
2430  for (const auto &SM : Mod->submodules()) {
2431  // Skip explicit children; they need to be explicitly imported to be
2432  // linked against.
2433  if (SM->IsExplicit)
2434  continue;
2435 
2436  if (Visited.insert(SM).second) {
2437  Stack.push_back(SM);
2438  AnyChildren = true;
2439  }
2440  }
2441 
2442  // We didn't find any children, so add this module to the list of
2443  // modules to link against.
2444  if (!AnyChildren) {
2445  LinkModules.insert(Mod);
2446  }
2447  }
2448 
2449  // Add link options for all of the imported modules in reverse topological
2450  // order. We don't do anything to try to order import link flags with respect
2451  // to linker options inserted by things like #pragma comment().
2452  SmallVector<llvm::MDNode *, 16> MetadataArgs;
2453  Visited.clear();
2454  for (Module *M : LinkModules)
2455  if (Visited.insert(M).second)
2456  addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
2457  std::reverse(MetadataArgs.begin(), MetadataArgs.end());
2458  LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
2459 
2460  // Add the linker options metadata flag.
2461  auto *NMD = getModule().getOrInsertNamedMetadata("llvm.linker.options");
2462  for (auto *MD : LinkerOptionsMetadata)
2463  NMD->addOperand(MD);
2464 }
2465 
2466 void CodeGenModule::EmitDeferred() {
2467  // Emit deferred declare target declarations.
2468  if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
2470 
2471  // Emit code for any potentially referenced deferred decls. Since a
2472  // previously unused static decl may become used during the generation of code
2473  // for a static function, iterate until no changes are made.
2474 
2475  if (!DeferredVTables.empty()) {
2476  EmitDeferredVTables();
2477 
2478  // Emitting a vtable doesn't directly cause more vtables to
2479  // become deferred, although it can cause functions to be
2480  // emitted that then need those vtables.
2481  assert(DeferredVTables.empty());
2482  }
2483 
2484  // Emit CUDA/HIP static device variables referenced by host code only.
2485  // Note we should not clear CUDADeviceVarODRUsedByHost since it is still
2486  // needed for further handling.
2487  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
2488  for (const auto *V : getContext().CUDADeviceVarODRUsedByHost)
2489  DeferredDeclsToEmit.push_back(V);
2490 
2491  // Stop if we're out of both deferred vtables and deferred declarations.
2492  if (DeferredDeclsToEmit.empty())
2493  return;
2494 
2495  // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
2496  // work, it will not interfere with this.
2497  std::vector<GlobalDecl> CurDeclsToEmit;
2498  CurDeclsToEmit.swap(DeferredDeclsToEmit);
2499 
2500  for (GlobalDecl &D : CurDeclsToEmit) {
2501  // We should call GetAddrOfGlobal with IsForDefinition set to true in order
2502  // to get GlobalValue with exactly the type we need, not something that
2503  // might had been created for another decl with the same mangled name but
2504  // different type.
2505  llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
2507 
2508  // In case of different address spaces, we may still get a cast, even with
2509  // IsForDefinition equal to true. Query mangled names table to get
2510  // GlobalValue.
2511  if (!GV)
2512  GV = GetGlobalValue(getMangledName(D));
2513 
2514  // Make sure GetGlobalValue returned non-null.
2515  assert(GV);
2516 
2517  // Check to see if we've already emitted this. This is necessary
2518  // for a couple of reasons: first, decls can end up in the
2519  // deferred-decls queue multiple times, and second, decls can end
2520  // up with definitions in unusual ways (e.g. by an extern inline
2521  // function acquiring a strong function redefinition). Just
2522  // ignore these cases.
2523  if (!GV->isDeclaration())
2524  continue;
2525 
2526  // If this is OpenMP, check if it is legal to emit this global normally.
2527  if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(D))
2528  continue;
2529 
2530  // Otherwise, emit the definition and move on to the next one.
2531  EmitGlobalDefinition(D, GV);
2532 
2533  // If we found out that we need to emit more decls, do that recursively.
2534  // This has the advantage that the decls are emitted in a DFS and related
2535  // ones are close together, which is convenient for testing.
2536  if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
2537  EmitDeferred();
2538  assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
2539  }
2540  }
2541 }
2542 
2543 void CodeGenModule::EmitVTablesOpportunistically() {
2544  // Try to emit external vtables as available_externally if they have emitted
2545  // all inlined virtual functions. It runs after EmitDeferred() and therefore
2546  // is not allowed to create new references to things that need to be emitted
2547  // lazily. Note that it also uses fact that we eagerly emitting RTTI.
2548 
2549  assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
2550  && "Only emit opportunistic vtables with optimizations");
2551 
2552  for (const CXXRecordDecl *RD : OpportunisticVTables) {
2553  assert(getVTables().isVTableExternal(RD) &&
2554  "This queue should only contain external vtables");
2555  if (getCXXABI().canSpeculativelyEmitVTable(RD))
2556  VTables.GenerateClassData(RD);
2557  }
2558  OpportunisticVTables.clear();
2559 }
2560 
2562  if (Annotations.empty())
2563  return;
2564 
2565  // Create a new global variable for the ConstantStruct in the Module.
2566  llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
2567  Annotations[0]->getType(), Annotations.size()), Annotations);
2568  auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
2569  llvm::GlobalValue::AppendingLinkage,
2570  Array, "llvm.global.annotations");
2571  gv->setSection(AnnotationSection);
2572 }
2573 
2574 llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
2575  llvm::Constant *&AStr = AnnotationStrings[Str];
2576  if (AStr)
2577  return AStr;
2578 
2579  // Not found yet, create a new global.
2580  llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
2581  auto *gv =
2582  new llvm::GlobalVariable(getModule(), s->getType(), true,
2583  llvm::GlobalValue::PrivateLinkage, s, ".str");
2584  gv->setSection(AnnotationSection);
2585  gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2586  AStr = gv;
2587  return gv;
2588 }
2589 
2592  PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2593  if (PLoc.isValid())
2594  return EmitAnnotationString(PLoc.getFilename());
2595  return EmitAnnotationString(SM.getBufferName(Loc));
2596 }
2597 
2600  PresumedLoc PLoc = SM.getPresumedLoc(L);
2601  unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
2602  SM.getExpansionLineNumber(L);
2603  return llvm::ConstantInt::get(Int32Ty, LineNo);
2604 }
2605 
2606 llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
2607  ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
2608  if (Exprs.empty())
2609  return llvm::ConstantPointerNull::get(GlobalsInt8PtrTy);
2610 
2611  llvm::FoldingSetNodeID ID;
2612  for (Expr *E : Exprs) {
2613  ID.Add(cast<clang::ConstantExpr>(E)->getAPValueResult());
2614  }
2615  llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()];
2616  if (Lookup)
2617  return Lookup;
2618 
2620  LLVMArgs.reserve(Exprs.size());
2621  ConstantEmitter ConstEmiter(*this);
2622  llvm::transform(Exprs, std::back_inserter(LLVMArgs), [&](const Expr *E) {
2623  const auto *CE = cast<clang::ConstantExpr>(E);
2624  return ConstEmiter.emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(),
2625  CE->getType());
2626  });
2627  auto *Struct = llvm::ConstantStruct::getAnon(LLVMArgs);
2628  auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true,
2629  llvm::GlobalValue::PrivateLinkage, Struct,
2630  ".args");
2631  GV->setSection(AnnotationSection);
2632  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2633  auto *Bitcasted = llvm::ConstantExpr::getBitCast(GV, GlobalsInt8PtrTy);
2634 
2635  Lookup = Bitcasted;
2636  return Bitcasted;
2637 }
2638 
2639 llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
2640  const AnnotateAttr *AA,
2641  SourceLocation L) {
2642  // Get the globals for file name, annotation, and the line number.
2643  llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
2644  *UnitGV = EmitAnnotationUnit(L),
2645  *LineNoCst = EmitAnnotationLineNo(L),
2646  *Args = EmitAnnotationArgs(AA);
2647 
2648  llvm::Constant *GVInGlobalsAS = GV;
2649  if (GV->getAddressSpace() !=
2650  getDataLayout().getDefaultGlobalsAddressSpace()) {
2651  GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast(
2652  GV, GV->getValueType()->getPointerTo(
2653  getDataLayout().getDefaultGlobalsAddressSpace()));
2654  }
2655 
2656  // Create the ConstantStruct for the global annotation.
2657  llvm::Constant *Fields[] = {
2658  llvm::ConstantExpr::getBitCast(GVInGlobalsAS, GlobalsInt8PtrTy),
2659  llvm::ConstantExpr::getBitCast(AnnoGV, GlobalsInt8PtrTy),
2660  llvm::ConstantExpr::getBitCast(UnitGV, GlobalsInt8PtrTy),
2661  LineNoCst,
2662  Args,
2663  };
2664  return llvm::ConstantStruct::getAnon(Fields);
2665 }
2666 
2668  llvm::GlobalValue *GV) {
2669  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2670  // Get the struct elements for these annotations.
2671  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2672  Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
2673 }
2674 
2676  SourceLocation Loc) const {
2677  const auto &NoSanitizeL = getContext().getNoSanitizeList();
2678  // NoSanitize by function name.
2679  if (NoSanitizeL.containsFunction(Kind, Fn->getName()))
2680  return true;
2681  // NoSanitize by location.
2682  if (Loc.isValid())
2683  return NoSanitizeL.containsLocation(Kind, Loc);
2684  // If location is unknown, this may be a compiler-generated function. Assume
2685  // it's located in the main file.
2686  auto &SM = Context.getSourceManager();
2687  if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
2688  return NoSanitizeL.containsFile(Kind, MainFile->getName());
2689  }
2690  return false;
2691 }
2692 
2693 bool CodeGenModule::isInNoSanitizeList(llvm::GlobalVariable *GV,
2694  SourceLocation Loc, QualType Ty,
2695  StringRef Category) const {
2696  // For now globals can be ignored only in ASan and KASan.
2697  const SanitizerMask EnabledAsanMask =
2698  LangOpts.Sanitize.Mask &
2699  (SanitizerKind::Address | SanitizerKind::KernelAddress |
2700  SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
2701  SanitizerKind::MemTag);
2702  if (!EnabledAsanMask)
2703  return false;
2704  const auto &NoSanitizeL = getContext().getNoSanitizeList();
2705  if (NoSanitizeL.containsGlobal(EnabledAsanMask, GV->getName(), Category))
2706  return true;
2707  if (NoSanitizeL.containsLocation(EnabledAsanMask, Loc, Category))
2708  return true;
2709  // Check global type.
2710  if (!Ty.isNull()) {
2711  // Drill down the array types: if global variable of a fixed type is
2712  // not sanitized, we also don't instrument arrays of them.
2713  while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
2714  Ty = AT->getElementType();
2716  // Only record types (classes, structs etc.) are ignored.
2717  if (Ty->isRecordType()) {
2718  std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
2719  if (NoSanitizeL.containsType(EnabledAsanMask, TypeStr, Category))
2720  return true;
2721  }
2722  }
2723  return false;
2724 }
2725 
2726 bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
2727  StringRef Category) const {
2728  const auto &XRayFilter = getContext().getXRayFilter();
2729  using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
2730  auto Attr = ImbueAttr::NONE;
2731  if (Loc.isValid())
2732  Attr = XRayFilter.shouldImbueLocation(Loc, Category);
2733  if (Attr == ImbueAttr::NONE)
2734  Attr = XRayFilter.shouldImbueFunction(Fn->getName());
2735  switch (Attr) {
2736  case ImbueAttr::NONE:
2737  return false;
2738  case ImbueAttr::ALWAYS:
2739  Fn->addFnAttr("function-instrument", "xray-always");
2740  break;
2741  case ImbueAttr::ALWAYS_ARG1:
2742  Fn->addFnAttr("function-instrument", "xray-always");
2743  Fn->addFnAttr("xray-log-args", "1");
2744  break;
2745  case ImbueAttr::NEVER:
2746  Fn->addFnAttr("function-instrument", "xray-never");
2747  break;
2748  }
2749  return true;
2750 }
2751 
2753  SourceLocation Loc) const {
2754  const auto &ProfileList = getContext().getProfileList();
2755  // If the profile list is empty, then instrument everything.
2756  if (ProfileList.isEmpty())
2757  return false;
2759  // First, check the function name.
2761  if (V.hasValue())
2762  return *V;
2763  // Next, check the source location.
2764  if (Loc.isValid()) {
2766  if (V.hasValue())
2767  return *V;
2768  }
2769  // If location is unknown, this may be a compiler-generated function. Assume
2770  // it's located in the main file.
2771  auto &SM = Context.getSourceManager();
2772  if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
2773  Optional<bool> V = ProfileList.isFileExcluded(MainFile->getName(), Kind);
2774  if (V.hasValue())
2775  return *V;
2776  }
2777  return ProfileList.getDefault();
2778 }
2779 
2780 bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
2781  // Never defer when EmitAllDecls is specified.
2782  if (LangOpts.EmitAllDecls)
2783  return true;
2784 
2785  if (CodeGenOpts.KeepStaticConsts) {
2786  const auto *VD = dyn_cast<VarDecl>(Global);
2787  if (VD && VD->getType().isConstQualified() &&
2788  VD->getStorageDuration() == SD_Static)
2789  return true;
2790  }
2791 
2792  return getContext().DeclMustBeEmitted(Global);
2793 }
2794 
2795 bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
2796  // In OpenMP 5.0 variables and function may be marked as
2797  // device_type(host/nohost) and we should not emit them eagerly unless we sure
2798  // that they must be emitted on the host/device. To be sure we need to have
2799  // seen a declare target with an explicit mentioning of the function, we know
2800  // we have if the level of the declare target attribute is -1. Note that we
2801  // check somewhere else if we should emit this at all.
2802  if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) {
2804  OMPDeclareTargetDeclAttr::getActiveAttr(Global);
2805  if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1)
2806  return false;
2807  }
2808 
2809  if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
2811  // Implicit template instantiations may change linkage if they are later
2812  // explicitly instantiated, so they should not be emitted eagerly.
2813  return false;
2814  }
2815  if (const auto *VD = dyn_cast<VarDecl>(Global))
2816  if (Context.getInlineVariableDefinitionKind(VD) ==
2818  // A definition of an inline constexpr static data member may change
2819  // linkage later if it's redeclared outside the class.
2820  return false;
2821  // If OpenMP is enabled and threadprivates must be generated like TLS, delay
2822  // codegen for global variables, because they may be marked as threadprivate.
2823  if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
2824  getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
2825  !isTypeConstant(Global->getType(), false) &&
2826  !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
2827  return false;
2828 
2829  return true;
2830 }
2831 
2833  StringRef Name = getMangledName(GD);
2834 
2835  // The UUID descriptor should be pointer aligned.
2837 
2838  // Look for an existing global.
2839  if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
2840  return ConstantAddress(GV, GV->getValueType(), Alignment);
2841 
2842  ConstantEmitter Emitter(*this);
2843  llvm::Constant *Init;
2844 
2845  APValue &V = GD->getAsAPValue();
2846  if (!V.isAbsent()) {
2847  // If possible, emit the APValue version of the initializer. In particular,
2848  // this gets the type of the constant right.
2849  Init = Emitter.emitForInitializer(
2850  GD->getAsAPValue(), GD->getType().getAddressSpace(), GD->getType());
2851  } else {
2852  // As a fallback, directly construct the constant.
2853  // FIXME: This may get padding wrong under esoteric struct layout rules.
2854  // MSVC appears to create a complete type 'struct __s_GUID' that it
2855  // presumably uses to represent these constants.
2856  MSGuidDecl::Parts Parts = GD->getParts();
2857  llvm::Constant *Fields[4] = {
2858  llvm::ConstantInt::get(Int32Ty, Parts.Part1),
2859  llvm::ConstantInt::get(Int16Ty, Parts.Part2),
2860  llvm::ConstantInt::get(Int16Ty, Parts.Part3),
2861  llvm::ConstantDataArray::getRaw(
2862  StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), 8,
2863  Int8Ty)};
2864  Init = llvm::ConstantStruct::getAnon(Fields);
2865  }
2866 
2867  auto *GV = new llvm::GlobalVariable(
2868  getModule(), Init->getType(),
2869  /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
2870  if (supportsCOMDAT())
2871  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
2872  setDSOLocal(GV);
2873 
2874  if (!V.isAbsent()) {
2875  Emitter.finalize(GV);
2876  return ConstantAddress(GV, GV->getValueType(), Alignment);
2877  }
2878 
2879  llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
2880  llvm::Constant *Addr = llvm::ConstantExpr::getBitCast(
2881  GV, Ty->getPointerTo(GV->getAddressSpace()));
2882  return ConstantAddress(Addr, Ty, Alignment);
2883 }
2884 
2886  const TemplateParamObjectDecl *TPO) {
2887  StringRef Name = getMangledName(TPO);
2888  CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
2889 
2890  if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
2891  return ConstantAddress(GV, GV->getValueType(), Alignment);
2892 
2893  ConstantEmitter Emitter(*this);
2894  llvm::Constant *Init = Emitter.emitForInitializer(
2895  TPO->getValue(), TPO->getType().getAddressSpace(), TPO->getType());
2896 
2897  if (!Init) {
2898  ErrorUnsupported(TPO, "template parameter object");
2899  return ConstantAddress::invalid();
2900  }
2901 
2902  auto *GV = new llvm::GlobalVariable(
2903  getModule(), Init->getType(),
2904  /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
2905  if (supportsCOMDAT())
2906  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
2907  Emitter.finalize(GV);
2908 
2909  return ConstantAddress(GV, GV->getValueType(), Alignment);
2910 }
2911 
2913  const AliasAttr *AA = VD->getAttr<AliasAttr>();
2914  assert(AA && "No alias?");
2915 
2916  CharUnits Alignment = getContext().getDeclAlign(VD);
2917  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
2918 
2919  // See if there is already something with the target's name in the module.
2920  llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
2921  if (Entry) {
2922  unsigned AS = getContext().getTargetAddressSpace(VD->getType());
2923  auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
2924  return ConstantAddress(Ptr, DeclTy, Alignment);
2925  }
2926 
2927  llvm::Constant *Aliasee;
2928  if (isa<llvm::FunctionType>(DeclTy))
2929  Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
2930  GlobalDecl(cast<FunctionDecl>(VD)),
2931  /*ForVTable=*/false);
2932  else
2933  Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
2934  nullptr);
2935 
2936  auto *F = cast<llvm::GlobalValue>(Aliasee);
2937  F->setLinkage(llvm::Function::ExternalWeakLinkage);
2938  WeakRefReferences.insert(F);
2939 
2940  return ConstantAddress(Aliasee, DeclTy, Alignment);
2941 }
2942 
2944  const auto *Global = cast<ValueDecl>(GD.getDecl());
2945 
2946  // Weak references don't produce any output by themselves.
2947  if (Global->hasAttr<WeakRefAttr>())
2948  return;
2949 
2950  // If this is an alias definition (which otherwise looks like a declaration)
2951  // emit it now.
2952  if (Global->hasAttr<AliasAttr>())
2953  return EmitAliasDefinition(GD);
2954 
2955  // IFunc like an alias whose value is resolved at runtime by calling resolver.
2956  if (Global->hasAttr<IFuncAttr>())
2957  return emitIFuncDefinition(GD);
2958 
2959  // If this is a cpu_dispatch multiversion function, emit the resolver.
2960  if (Global->hasAttr<CPUDispatchAttr>())
2961  return emitCPUDispatchDefinition(GD);
2962 
2963  // If this is CUDA, be selective about which declarations we emit.
2964  if (LangOpts.CUDA) {
2965  if (LangOpts.CUDAIsDevice) {
2966  if (!Global->hasAttr<CUDADeviceAttr>() &&
2967  !Global->hasAttr<CUDAGlobalAttr>() &&
2968  !Global->hasAttr<CUDAConstantAttr>() &&
2969  !Global->hasAttr<CUDASharedAttr>() &&
2970  !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
2972  return;
2973  } else {
2974  // We need to emit host-side 'shadows' for all global
2975  // device-side variables because the CUDA runtime needs their
2976  // size and host-side address in order to provide access to
2977  // their device-side incarnations.
2978 
2979  // So device-only functions are the only things we skip.
2980  if (isa<FunctionDecl>(Global) && !Global->hasAttr<CUDAHostAttr>() &&
2981  Global->hasAttr<CUDADeviceAttr>())
2982  return;
2983 
2984  assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&
2985  "Expected Variable or Function");
2986  }
2987  }
2988 
2989  if (LangOpts.OpenMP) {
2990  // If this is OpenMP, check if it is legal to emit this global normally.
2991  if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
2992  return;
2993  if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Global)) {
2994  if (MustBeEmitted(Global))
2996  return;
2997  } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
2998  if (MustBeEmitted(Global))
2999  EmitOMPDeclareMapper(DMD);
3000  return;
3001  }
3002  }
3003 
3004  // Ignore declarations, they will be emitted on their first use.
3005  if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
3006  // Forward declarations are emitted lazily on first use.
3007  if (!FD->doesThisDeclarationHaveABody()) {
3009  return;
3010 
3011  StringRef MangledName = getMangledName(GD);
3012 
3013  // Compute the function info and LLVM type.
3015  llvm::Type *Ty = getTypes().GetFunctionType(FI);
3016 
3017  GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
3018  /*DontDefer=*/false);
3019  return;
3020  }
3021  } else {
3022  const auto *VD = cast<VarDecl>(Global);
3023  assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
3024  if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
3025  !Context.isMSStaticDataMemberInlineDefinition(VD)) {
3026  if (LangOpts.OpenMP) {
3027  // Emit declaration of the must-be-emitted declare target variable.
3029  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
3030  bool UnifiedMemoryEnabled =
3032  if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
3033  !UnifiedMemoryEnabled) {
3034  (void)GetAddrOfGlobalVar(VD);
3035  } else {
3036  assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
3037  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
3038  UnifiedMemoryEnabled)) &&
3039  "Link clause or to clause with unified memory expected.");
3041  }
3042 
3043  return;
3044  }
3045  }
3046  // If this declaration may have caused an inline variable definition to
3047  // change linkage, make sure that it's emitted.
3048  if (Context.getInlineVariableDefinitionKind(VD) ==
3050  GetAddrOfGlobalVar(VD);
3051  return;
3052  }
3053  }
3054 
3055  // Defer code generation to first use when possible, e.g. if this is an inline
3056  // function. If the global must always be emitted, do it eagerly if possible
3057  // to benefit from cache locality.
3058  if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
3059  // Emit the definition if it can't be deferred.
3060  EmitGlobalDefinition(GD);
3061  return;
3062  }
3063 
3064  // If we're deferring emission of a C++ variable with an
3065  // initializer, remember the order in which it appeared in the file.
3066  if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
3067  cast<VarDecl>(Global)->hasInit()) {
3068  DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
3069  CXXGlobalInits.push_back(nullptr);
3070  }
3071 
3072  StringRef MangledName = getMangledName(GD);
3073  if (GetGlobalValue(MangledName) != nullptr) {
3074  // The value has already been used and should therefore be emitted.
3075  addDeferredDeclToEmit(GD);
3076  } else if (MustBeEmitted(Global)) {
3077  // The value must be emitted, but cannot be emitted eagerly.
3078  assert(!MayBeEmittedEagerly(Global));
3079  addDeferredDeclToEmit(GD);
3080  } else {
3081  // Otherwise, remember that we saw a deferred decl with this name. The
3082  // first use of the mangled name will cause it to move into
3083  // DeferredDeclsToEmit.
3084  DeferredDecls[MangledName] = GD;
3085  }
3086 }
3087 
3088 // Check if T is a class type with a destructor that's not dllimport.
3090  if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
3091  if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
3092  if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
3093  return true;
3094 
3095  return false;
3096 }
3097 
3098 namespace {
3099  struct FunctionIsDirectlyRecursive
3100  : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
3101  const StringRef Name;
3102  const Builtin::Context &BI;
3103  FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
3104  : Name(N), BI(C) {}
3105 
3106  bool VisitCallExpr(const CallExpr *E) {
3107  const FunctionDecl *FD = E->getDirectCallee();
3108  if (!FD)
3109  return false;
3110  AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
3111  if (Attr && Name == Attr->getLabel())
3112  return true;
3113  unsigned BuiltinID = FD->getBuiltinID();
3114  if (!BuiltinID || !BI.isLibFunction(BuiltinID))
3115  return false;
3116  StringRef BuiltinName = BI.getName(BuiltinID);
3117  if (BuiltinName.startswith("__builtin_") &&
3118  Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
3119  return true;
3120  }
3121  return false;
3122  }
3123 
3124  bool VisitStmt(const Stmt *S) {
3125  for (const Stmt *Child : S->children())
3126  if (Child && this->Visit(Child))
3127  return true;
3128  return false;
3129  }
3130  };
3131 
3132  // Make sure we're not referencing non-imported vars or functions.
3133  struct DLLImportFunctionVisitor
3134  : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
3135  bool SafeToInline = true;
3136 
3137  bool shouldVisitImplicitCode() const { return true; }
3138 
3139  bool VisitVarDecl(VarDecl *VD) {
3140  if (VD->getTLSKind()) {
3141  // A thread-local variable cannot be imported.
3142  SafeToInline = false;
3143  return SafeToInline;
3144  }
3145 
3146  // A variable definition might imply a destructor call.
3147  if (VD->isThisDeclarationADefinition())
3148  SafeToInline = !HasNonDllImportDtor(VD->getType());
3149 
3150  return SafeToInline;
3151  }
3152 
3153  bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
3154  if (const auto *D = E->getTemporary()->getDestructor())
3155  SafeToInline = D->hasAttr<DLLImportAttr>();
3156  return SafeToInline;
3157  }
3158 
3159  bool VisitDeclRefExpr(DeclRefExpr *E) {
3160  ValueDecl *VD = E->getDecl();
3161  if (isa<FunctionDecl>(VD))
3162  SafeToInline = VD->hasAttr<DLLImportAttr>();
3163  else if (VarDecl *V = dyn_cast<VarDecl>(VD))
3164  SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
3165  return SafeToInline;
3166  }
3167 
3168  bool VisitCXXConstructExpr(CXXConstructExpr *E) {
3169  SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
3170  return SafeToInline;
3171  }
3172 
3173  bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
3174  CXXMethodDecl *M = E->getMethodDecl();
3175  if (!M) {
3176  // Call through a pointer to member function. This is safe to inline.
3177  SafeToInline = true;
3178  } else {
3179  SafeToInline = M->hasAttr<DLLImportAttr>();
3180  }
3181  return SafeToInline;
3182  }
3183 
3184  bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
3185  SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
3186  return SafeToInline;
3187  }
3188 
3189  bool VisitCXXNewExpr(CXXNewExpr *E) {
3190  SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
3191  return SafeToInline;
3192  }
3193  };
3194 }
3195 
3196 // isTriviallyRecursive - Check if this function calls another
3197 // decl that, because of the asm attribute or the other decl being a builtin,
3198 // ends up pointing to itself.
3199 bool
3200 CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
3201  StringRef Name;
3202  if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
3203  // asm labels are a special kind of mangling we have to support.
3204  AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
3205  if (!Attr)
3206  return false;
3207  Name = Attr->getLabel();
3208  } else {
3209  Name = FD->getName();
3210  }
3211 
3212  FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
3213  const Stmt *Body = FD->getBody();
3214  return Body ? Walker.Visit(Body) : false;
3215 }
3216 
3217 bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
3218  if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
3219  return true;
3220  const auto *F = cast<FunctionDecl>(GD.getDecl());
3221  if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
3222  return false;
3223 
3224  if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) {
3225  // Check whether it would be safe to inline this dllimport function.
3226  DLLImportFunctionVisitor Visitor;
3227  Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
3228  if (!Visitor.SafeToInline)
3229  return false;
3230 
3231  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(F)) {
3232  // Implicit destructor invocations aren't captured in the AST, so the
3233  // check above can't see them. Check for them manually here.
3234  for (const Decl *Member : Dtor->getParent()->decls())
3235  if (isa<FieldDecl>(Member))
3236  if (HasNonDllImportDtor(cast<FieldDecl>(Member)->getType()))
3237  return false;
3238  for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
3239  if (HasNonDllImportDtor(B.getType()))
3240  return false;
3241  }
3242  }
3243 
3244  // Inline builtins declaration must be emitted. They often are fortified
3245  // functions.
3246  if (F->isInlineBuiltinDeclaration())
3247  return true;
3248 
3249  // PR9614. Avoid cases where the source code is lying to us. An available
3250  // externally function should have an equivalent function somewhere else,
3251  // but a function that calls itself through asm label/`__builtin_` trickery is
3252  // clearly not equivalent to the real implementation.
3253  // This happens in glibc's btowc and in some configure checks.
3254  return !isTriviallyRecursive(F);
3255 }
3256 
3257 bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
3258  return CodeGenOpts.OptimizationLevel > 0;
3259 }
3260 
3261 void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
3262  llvm::GlobalValue *GV) {
3263  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3264 
3265  if (FD->isCPUSpecificMultiVersion()) {
3266  auto *Spec = FD->getAttr<CPUSpecificAttr>();
3267  for (unsigned I = 0; I < Spec->cpus_size(); ++I)
3268  EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3269  // Requires multiple emits.
3270  } else if (FD->isTargetClonesMultiVersion()) {
3271  auto *Clone = FD->getAttr<TargetClonesAttr>();
3272  for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
3273  if (Clone->isFirstOfVersion(I))
3274  EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3275  EmitTargetClonesResolver(GD);
3276  } else
3277  EmitGlobalFunctionDefinition(GD, GV);
3278 }
3279 
3280 void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
3281  const auto *D = cast<ValueDecl>(GD.getDecl());
3282 
3283  PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
3284  Context.getSourceManager(),
3285  "Generating code for declaration");
3286 
3287  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3288  // At -O0, don't generate IR for functions with available_externally
3289  // linkage.
3290  if (!shouldEmitFunction(GD))
3291  return;
3292 
3293  llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
3294  std::string Name;
3295  llvm::raw_string_ostream OS(Name);
3296  FD->getNameForDiagnostic(OS, getContext().getPrintingPolicy(),
3297  /*Qualified=*/true);
3298  return Name;
3299  });
3300 
3301  if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
3302  // Make sure to emit the definition(s) before we emit the thunks.
3303  // This is necessary for the generation of certain thunks.
3304  if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
3305  ABI->emitCXXStructor(GD);
3306  else if (FD->isMultiVersion())
3307  EmitMultiVersionFunctionDefinition(GD, GV);
3308  else
3309  EmitGlobalFunctionDefinition(GD, GV);
3310 
3311  if (Method->isVirtual())
3312  getVTables().EmitThunks(GD);
3313 
3314  return;
3315  }
3316 
3317  if (FD->isMultiVersion())
3318  return EmitMultiVersionFunctionDefinition(GD, GV);
3319  return EmitGlobalFunctionDefinition(GD, GV);
3320  }
3321 
3322  if (const auto *VD = dyn_cast<VarDecl>(D))
3323  return EmitGlobalVarDefinition(VD, !VD->hasDefinition());
3324 
3325  llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
3326 }
3327 
3328 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
3329  llvm::Function *NewFn);
3330 
3331 static unsigned
3334  unsigned Priority = 0;
3335  for (StringRef Feat : RO.Conditions.Features)
3337 
3338  if (!RO.Conditions.Architecture.empty())
3339  Priority = std::max(
3341  return Priority;
3342 }
3343 
3344 // Multiversion functions should be at most 'WeakODRLinkage' so that a different
3345 // TU can forward declare the function without causing problems. Particularly
3346 // in the cases of CPUDispatch, this causes issues. This also makes sure we
3347 // work with internal linkage functions, so that the same function name can be
3348 // used with internal linkage in multiple TUs.
3349 llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
3350  GlobalDecl GD) {
3351  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3352  if (FD->getFormalLinkage() == InternalLinkage)
3354  return llvm::GlobalValue::WeakODRLinkage;
3355 }
3356 
3357 void CodeGenModule::EmitTargetClonesResolver(GlobalDecl GD) {
3358  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3359  assert(FD && "Not a FunctionDecl?");
3360  const auto *TC = FD->getAttr<TargetClonesAttr>();
3361  assert(TC && "Not a target_clones Function?");
3362 
3363  QualType CanonTy = Context.getCanonicalType(FD->getType());
3364  llvm::Type *DeclTy = getTypes().ConvertType(CanonTy);
3365 
3366  if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
3367  const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
3368  DeclTy = getTypes().GetFunctionType(FInfo);
3369  }
3370 
3371  llvm::Function *ResolverFunc;
3372  if (getTarget().supportsIFunc()) {
3373  auto *IFunc = cast<llvm::GlobalIFunc>(
3374  GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
3375  ResolverFunc = cast<llvm::Function>(IFunc->getResolver());
3376  } else
3377  ResolverFunc =
3378  cast<llvm::Function>(GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
3379 
3381  for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
3382  ++VersionIndex) {
3383  if (!TC->isFirstOfVersion(VersionIndex))
3384  continue;
3385  StringRef Version = TC->getFeatureStr(VersionIndex);
3386  StringRef MangledName =
3387  getMangledName(GD.getWithMultiVersionIndex(VersionIndex));
3388  llvm::Constant *Func = GetGlobalValue(MangledName);
3389  assert(Func &&
3390  "Should have already been created before calling resolver emit");
3391 
3392  StringRef Architecture;
3394 
3395  if (Version.startswith("arch="))
3396  Architecture = Version.drop_front(sizeof("arch=") - 1);
3397  else if (Version != "default")
3398  Feature.push_back(Version);
3399 
3400  Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
3401  }
3402 
3403  const TargetInfo &TI = getTarget();
3404  std::stable_sort(
3405  Options.begin(), Options.end(),
3408  return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
3409  });
3410  CodeGenFunction CGF(*this);
3411  CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3412 }
3413 
3414 void CodeGenModule::emitMultiVersionFunctions() {
3415  std::vector<GlobalDecl> MVFuncsToEmit;
3416  MultiVersionFuncs.swap(MVFuncsToEmit);
3417  for (GlobalDecl GD : MVFuncsToEmit) {
3419  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3421  FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
3422  GlobalDecl CurGD{
3423  (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
3424  StringRef MangledName = getMangledName(CurGD);
3425  llvm::Constant *Func = GetGlobalValue(MangledName);
3426  if (!Func) {
3427  if (CurFD->isDefined()) {
3428  EmitGlobalFunctionDefinition(CurGD, nullptr);
3429  Func = GetGlobalValue(MangledName);
3430  } else {
3431  const CGFunctionInfo &FI =
3433  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3434  Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
3435  /*DontDefer=*/false, ForDefinition);
3436  }
3437  assert(Func && "This should have just been created");
3438  }
3439 
3440  const auto *TA = CurFD->getAttr<TargetAttr>();
3442  TA->getAddedFeatures(Feats);
3443 
3444  Options.emplace_back(cast<llvm::Function>(Func),
3445  TA->getArchitecture(), Feats);
3446  });
3447 
3448  llvm::Function *ResolverFunc;
3449  const TargetInfo &TI = getTarget();
3450 
3451  if (TI.supportsIFunc() || FD->isTargetMultiVersion()) {
3452  ResolverFunc = cast<llvm::Function>(
3453  GetGlobalValue((getMangledName(GD) + ".resolver").str()));
3454  ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
3455  } else {
3456  ResolverFunc = cast<llvm::Function>(GetGlobalValue(getMangledName(GD)));
3457  }
3458 
3459  if (supportsCOMDAT())
3460  ResolverFunc->setComdat(
3461  getModule().getOrInsertComdat(ResolverFunc->getName()));
3462 
3463  llvm::stable_sort(
3464  Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
3466  return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
3467  });
3468  CodeGenFunction CGF(*this);
3469  CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3470  }
3471 
3472  // Ensure that any additions to the deferred decls list caused by emitting a
3473  // variant are emitted. This can happen when the variant itself is inline and
3474  // calls a function without linkage.
3475  if (!MVFuncsToEmit.empty())
3476  EmitDeferred();
3477 
3478  // Ensure that any additions to the multiversion funcs list from either the
3479  // deferred decls or the multiversion functions themselves are emitted.
3480  if (!MultiVersionFuncs.empty())
3481  emitMultiVersionFunctions();
3482 }
3483 
3484 void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
3485  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3486  assert(FD && "Not a FunctionDecl?");
3487  assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
3488  const auto *DD = FD->getAttr<CPUDispatchAttr>();
3489  assert(DD && "Not a cpu_dispatch Function?");
3490  llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
3491 
3492  if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
3493  const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
3494  DeclTy = getTypes().GetFunctionType(FInfo);
3495  }
3496 
3497  StringRef ResolverName = getMangledName(GD);
3498  UpdateMultiVersionNames(GD, FD, ResolverName);
3499 
3500  llvm::Type *ResolverType;
3501  GlobalDecl ResolverGD;
3502  if (getTarget().supportsIFunc()) {
3503  ResolverType = llvm::FunctionType::get(
3504  llvm::PointerType::get(DeclTy,
3505  Context.getTargetAddressSpace(FD->getType())),
3506  false);
3507  }
3508  else {
3509  ResolverType = DeclTy;
3510  ResolverGD = GD;
3511  }
3512 
3513  auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
3514  ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
3515  ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
3516  if (supportsCOMDAT())
3517  ResolverFunc->setComdat(
3518  getModule().getOrInsertComdat(ResolverFunc->getName()));
3519 
3521  const TargetInfo &Target = getTarget();
3522  unsigned Index = 0;
3523  for (const IdentifierInfo *II : DD->cpus()) {
3524  // Get the name of the target function so we can look it up/create it.
3525  std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
3526  getCPUSpecificMangling(*this, II->getName());
3527 
3528  llvm::Constant *Func = GetGlobalValue(MangledName);
3529 
3530  if (!Func) {
3531  GlobalDecl ExistingDecl = Manglings.lookup(MangledName);
3532  if (ExistingDecl.getDecl() &&
3533  ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
3534  EmitGlobalFunctionDefinition(ExistingDecl, nullptr);
3535  Func = GetGlobalValue(MangledName);
3536  } else {
3537  if (!ExistingDecl.getDecl())
3538  ExistingDecl = GD.getWithMultiVersionIndex(Index);
3539 
3540  Func = GetOrCreateLLVMFunction(
3541  MangledName, DeclTy, ExistingDecl,
3542  /*ForVTable=*/false, /*DontDefer=*/true,
3543  /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
3544  }
3545  }
3546 
3548  Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
3549  llvm::transform(Features, Features.begin(),
3550  [](StringRef Str) { return Str.substr(1); });
3551  llvm::erase_if(Features, [&Target](StringRef Feat) {
3552  return !Target.validateCpuSupports(Feat);
3553  });
3554  Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
3555  ++Index;
3556  }
3557 
3558  llvm::stable_sort(
3559  Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
3561  return llvm::X86::getCpuSupportsMask(LHS.Conditions.Features) >
3562  llvm::X86::getCpuSupportsMask(RHS.Conditions.Features);
3563  });
3564 
3565  // If the list contains multiple 'default' versions, such as when it contains
3566  // 'pentium' and 'generic', don't emit the call to the generic one (since we
3567  // always run on at least a 'pentium'). We do this by deleting the 'least
3568  // advanced' (read, lowest mangling letter).
3569  while (Options.size() > 1 &&
3570  llvm::X86::getCpuSupportsMask(
3571  (Options.end() - 2)->Conditions.Features) == 0) {
3572  StringRef LHSName = (Options.end() - 2)->Function->getName();
3573  StringRef RHSName = (Options.end() - 1)->Function->getName();
3574  if (LHSName.compare(RHSName) < 0)
3575  Options.erase(Options.end() - 2);
3576  else
3577  Options.erase(Options.end() - 1);
3578  }
3579 
3580  CodeGenFunction CGF(*this);
3581  CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3582 
3583  if (getTarget().supportsIFunc()) {
3584  std::string AliasName = getMangledNameImpl(
3585  *this, GD, FD, /*OmitMultiVersionMangling=*/true);
3586  llvm::Constant *AliasFunc = GetGlobalValue(AliasName);
3587  if (!AliasFunc) {
3588  auto *IFunc = cast<llvm::GlobalIFunc>(GetOrCreateLLVMFunction(
3589  AliasName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/true,
3590  /*IsThunk=*/false, llvm::AttributeList(), NotForDefinition));
3591  auto *GA = llvm::GlobalAlias::create(DeclTy, 0,
3592  getMultiversionLinkage(*this, GD),
3593  AliasName, IFunc, &getModule());
3594  SetCommonAttributes(GD, GA);
3595  }
3596  }
3597 }
3598 
3599 /// If a dispatcher for the specified mangled name is not in the module, create
3600 /// and return an llvm Function with the specified type.
3601 llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
3602  GlobalDecl GD, llvm::Type *DeclTy, const FunctionDecl *FD) {
3603  std::string MangledName =
3604  getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
3605 
3606  // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
3607  // a separate resolver).
3608  std::string ResolverName = MangledName;
3609  if (getTarget().supportsIFunc())
3610  ResolverName += ".ifunc";
3611  else if (FD->isTargetMultiVersion())
3612  ResolverName += ".resolver";
3613 
3614  // If this already exists, just return that one.
3615  if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
3616  return ResolverGV;
3617 
3618  // Since this is the first time we've created this IFunc, make sure
3619  // that we put this multiversioned function into the list to be
3620  // replaced later if necessary (target multiversioning only).
3621  if (FD->isTargetMultiVersion())
3622  MultiVersionFuncs.push_back(GD);
3623  else if (FD->isTargetClonesMultiVersion()) {
3624  // In target_clones multiversioning, make sure we emit this if used.
3625  auto DDI =
3626  DeferredDecls.find(getMangledName(GD.getWithMultiVersionIndex(0)));
3627  if (DDI != DeferredDecls.end()) {
3628  addDeferredDeclToEmit(GD);
3629  DeferredDecls.erase(DDI);
3630  } else {
3631  // Emit the symbol of the 1st variant, so that the deferred decls know we
3632  // need it, otherwise the only global value will be the resolver/ifunc,
3633  // which end up getting broken if we search for them with GetGlobalValue'.
3634  GetOrCreateLLVMFunction(
3636  /*ForVTable=*/false, /*DontDefer=*/true,
3637  /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
3638  }
3639  }
3640 
3641  if (getTarget().supportsIFunc()) {
3642  llvm::Type *ResolverType = llvm::FunctionType::get(
3643  llvm::PointerType::get(
3644  DeclTy, getContext().getTargetAddressSpace(FD->getType())),
3645  false);
3646  llvm::Constant *Resolver = GetOrCreateLLVMFunction(
3647  MangledName + ".resolver", ResolverType, GlobalDecl{},
3648  /*ForVTable=*/false);
3649  llvm::GlobalIFunc *GIF =
3651  "", Resolver, &getModule());
3652  GIF->setName(ResolverName);
3653  SetCommonAttributes(FD, GIF);
3654 
3655  return GIF;
3656  }
3657 
3658  llvm::Constant *Resolver = GetOrCreateLLVMFunction(
3659  ResolverName, DeclTy, GlobalDecl{}, /*ForVTable=*/false);
3660  assert(isa<llvm::GlobalValue>(Resolver) &&
3661  "Resolver should be created for the first time");
3662  SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
3663  return Resolver;
3664 }
3665 
3666 /// GetOrCreateLLVMFunction - If the specified mangled name is not in the
3667 /// module, create and return an llvm Function with the specified type. If there
3668 /// is something in the module with the specified name, return it potentially
3669 /// bitcasted to the right type.
3670 ///
3671 /// If D is non-null, it specifies a decl that correspond to this. This is used
3672 /// to set the attributes on the function when it is first created.
3673 llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
3674  StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable,
3675  bool DontDefer, bool IsThunk, llvm::AttributeList ExtraAttrs,
3676  ForDefinition_t IsForDefinition) {
3677  const Decl *D = GD.getDecl();
3678 
3679  // Any attempts to use a MultiVersion function should result in retrieving
3680  // the iFunc instead. Name Mangling will handle the rest of the changes.
3681  if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
3682  // For the device mark the function as one that should be emitted.
3683  if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
3684  !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
3685  !DontDefer && !IsForDefinition) {
3686  if (const FunctionDecl *FDDef = FD->getDefinition()) {
3687  GlobalDecl GDDef;
3688  if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
3689  GDDef = GlobalDecl(CD, GD.getCtorType());
3690  else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
3691  GDDef = GlobalDecl(DD, GD.getDtorType());
3692  else
3693  GDDef = GlobalDecl(FDDef);
3694  EmitGlobal(GDDef);
3695  }
3696  }
3697 
3698  if (FD->isMultiVersion()) {
3699  UpdateMultiVersionNames(GD, FD, MangledName);
3700  if (!IsForDefinition)
3701  return GetOrCreateMultiVersionResolver(GD, Ty, FD);
3702  }
3703  }
3704 
3705  // Lookup the entry, lazily creating it if necessary.
3706  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
3707  if (Entry) {
3708  if (WeakRefReferences.erase(Entry)) {
3709  const FunctionDecl *FD = cast_or_null<FunctionDecl>(D);
3710  if (FD && !FD->hasAttr<WeakAttr>())
3711  Entry->setLinkage(llvm::Function::ExternalLinkage);
3712  }
3713 
3714  // Handle dropped DLL attributes.
3715  if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>()) {
3716  Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
3717  setDSOLocal(Entry);
3718  }
3719 
3720  // If there are two attempts to define the same mangled name, issue an
3721  // error.
3722  if (IsForDefinition && !Entry->isDeclaration()) {
3723  GlobalDecl OtherGD;
3724  // Check that GD is not yet in DiagnosedConflictingDefinitions is required
3725  // to make sure that we issue an error only once.
3726  if (lookupRepresentativeDecl(MangledName, OtherGD) &&
3727  (GD.getCanonicalDecl().getDecl() !=
3728  OtherGD.getCanonicalDecl().getDecl()) &&
3729  DiagnosedConflictingDefinitions.insert(GD).second) {
3730  getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
3731  << MangledName;
3732  getDiags().Report(OtherGD.getDecl()->getLocation(),
3733  diag::note_previous_definition);
3734  }
3735  }
3736 
3737  if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
3738  (Entry->getValueType() == Ty)) {
3739  return Entry;
3740  }
3741 
3742  // Make sure the result is of the correct type.
3743  // (If function is requested for a definition, we always need to create a new
3744  // function, not just return a bitcast.)
3745  if (!IsForDefinition)
3746  return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
3747  }
3748 
3749  // This function doesn't have a complete type (for example, the return
3750  // type is an incomplete struct). Use a fake type instead, and make
3751  // sure not to try to set attributes.
3752  bool IsIncompleteFunction = false;
3753 
3754  llvm::FunctionType *FTy;
3755  if (isa<llvm::FunctionType>(Ty)) {
3756  FTy = cast<llvm::FunctionType>(Ty);
3757  } else {
3758  FTy = llvm::FunctionType::get(VoidTy, false);
3759  IsIncompleteFunction = true;
3760  }
3761 
3762  llvm::Function *F =
3763  llvm::Function::Create(FTy, llvm::Function::ExternalLinkage,
3764  Entry ? StringRef() : MangledName, &getModule());
3765 
3766  // If we already created a function with the same mangled name (but different
3767  // type) before, take its name and add it to the list of functions to be
3768  // replaced with F at the end of CodeGen.
3769  //
3770  // This happens if there is a prototype for a function (e.g. "int f()") and
3771  // then a definition of a different type (e.g. "int f(int x)").
3772  if (Entry) {
3773  F->takeName(Entry);
3774 
3775  // This might be an implementation of a function without a prototype, in
3776  // which case, try to do special replacement of calls which match the new
3777  // prototype. The really key thing here is that we also potentially drop
3778  // arguments from the call site so as to make a direct call, which makes the
3779  // inliner happier and suppresses a number of optimizer warnings (!) about
3780  // dropping arguments.
3781  if (!Entry->use_empty()) {
3783  Entry->removeDeadConstantUsers();
3784  }
3785 
3786  llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
3787  F, Entry->getValueType()->getPointerTo());
3788  addGlobalValReplacement(Entry, BC);
3789  }
3790 
3791  assert(F->getName() == MangledName && "name was uniqued!");
3792  if (D)
3793  SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
3794  if (ExtraAttrs.hasFnAttrs()) {
3795  llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
3796  F->addFnAttrs(B);
3797  }
3798 
3799  if (!DontDefer) {
3800  // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
3801  // each other bottoming out with the base dtor. Therefore we emit non-base
3802  // dtors on usage, even if there is no dtor definition in the TU.
3803  if (D && isa<CXXDestructorDecl>(D) &&
3804  getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
3805  GD.getDtorType()))
3806  addDeferredDeclToEmit(GD);
3807 
3808  // This is the first use or definition of a mangled name. If there is a
3809  // deferred decl with this name, remember that we need to emit it at the end
3810  // of the file.
3811  auto DDI = DeferredDecls.find(MangledName);
3812  if (DDI != DeferredDecls.end()) {
3813  // Move the potentially referenced deferred decl to the
3814  // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
3815  // don't need it anymore).
3816  addDeferredDeclToEmit(DDI->second);
3817  DeferredDecls.erase(DDI);
3818 
3819  // Otherwise, there are cases we have to worry about where we're
3820  // using a declaration for which we must emit a definition but where
3821  // we might not find a top-level definition:
3822  // - member functions defined inline in their classes
3823  // - friend functions defined inline in some class
3824  // - special member functions with implicit definitions
3825  // If we ever change our AST traversal to walk into class methods,
3826  // this will be unnecessary.
3827  //
3828  // We also don't emit a definition for a function if it's going to be an
3829  // entry in a vtable, unless it's already marked as used.
3830  } else if (getLangOpts().CPlusPlus && D) {
3831  // Look for a declaration that's lexically in a record.
3832  for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
3833  FD = FD->getPreviousDecl()) {
3834  if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
3835  if (FD->doesThisDeclarationHaveABody()) {
3836  addDeferredDeclToEmit(GD.getWithDecl(FD));
3837  break;
3838  }
3839  }
3840  }
3841  }
3842  }
3843 
3844  // Make sure the result is of the requested type.
3845  if (!IsIncompleteFunction) {
3846  assert(F->getFunctionType() == Ty);
3847  return F;
3848  }
3849 
3850  llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
3851  return llvm::ConstantExpr::getBitCast(F, PTy);
3852 }
3853 
3854 /// GetAddrOfFunction - Return the address of the given function. If Ty is
3855 /// non-null, then this function will use the specified type if it has to
3856 /// create it (this occurs when we see a definition of the function).
3858  llvm::Type *Ty,
3859  bool ForVTable,
3860  bool DontDefer,
3861  ForDefinition_t IsForDefinition) {
3862  assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
3863  "consteval function should never be emitted");
3864  // If there was no specific requested type, just convert it now.
3865  if (!Ty) {
3866  const auto *FD = cast<FunctionDecl>(GD.getDecl());
3867  Ty = getTypes().ConvertType(FD->getType());
3868  }
3869 
3870  // Devirtualized destructor calls may come through here instead of via
3871  // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
3872  // of the complete destructor when necessary.
3873  if (const auto *DD = dyn_cast<CXXDestructorDecl>(GD.getDecl())) {
3874  if (getTarget().getCXXABI().isMicrosoft() &&
3875  GD.getDtorType() == Dtor_Complete &&
3876  DD->getParent()->getNumVBases() == 0)
3877  GD = GlobalDecl(DD, Dtor_Base);
3878  }
3879 
3880  StringRef MangledName = getMangledName(GD);
3881  auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
3882  /*IsThunk=*/false, llvm::AttributeList(),
3883  IsForDefinition);
3884  // Returns kernel handle for HIP kernel stub function.
3885  if (LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
3886  cast<FunctionDecl>(GD.getDecl())->hasAttr<CUDAGlobalAttr>()) {
3887  auto *Handle = getCUDARuntime().getKernelHandle(
3888  cast<llvm::Function>(F->stripPointerCasts()), GD);
3889  if (IsForDefinition)
3890  return F;
3891  return llvm::ConstantExpr::getBitCast(Handle, Ty->getPointerTo());
3892  }
3893  return F;
3894 }
3895 
3897  llvm::GlobalValue *F =
3898  cast<llvm::GlobalValue>(GetAddrOfFunction(Decl)->stripPointerCasts());
3899 
3900  return llvm::ConstantExpr::getBitCast(llvm::NoCFIValue::get(F),
3901  llvm::Type::getInt8PtrTy(VMContext));
3902 }
3903 
3904 static const FunctionDecl *
3905 GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
3906  TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
3908 
3909  IdentifierInfo &CII = C.Idents.get(Name);
3910  for (const auto *Result : DC->lookup(&CII))
3911  if (const auto *FD = dyn_cast<FunctionDecl>(Result))
3912  return FD;
3913 
3914  if (!C.getLangOpts().CPlusPlus)
3915  return nullptr;
3916 
3917  // Demangle the premangled name from getTerminateFn()
3918  IdentifierInfo &CXXII =
3919  (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
3920  ? C.Idents.get("terminate")
3921  : C.Idents.get(Name);
3922 
3923  for (const auto &N : {"__cxxabiv1", "std"}) {
3924  IdentifierInfo &NS = C.Idents.get(N);
3925  for (const auto *Result : DC->lookup(&NS)) {
3926  const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
3927  if (auto *LSD = dyn_cast<LinkageSpecDecl>(Result))
3928  for (const auto *Result : LSD->lookup(&NS))
3929  if ((ND = dyn_cast<NamespaceDecl>(Result)))
3930  break;
3931 
3932  if (ND)
3933  for (const auto *Result : ND->lookup(&CXXII))
3934  if (const auto *FD = dyn_cast<FunctionDecl>(Result))
3935  return FD;
3936  }
3937  }
3938 
3939  return nullptr;
3940 }
3941 
3942 /// CreateRuntimeFunction - Create a new runtime function with the specified
3943 /// type and name.
3944 llvm::FunctionCallee
3945 CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
3946  llvm::AttributeList ExtraAttrs, bool Local,
3947  bool AssumeConvergent) {
3948  if (AssumeConvergent) {
3949  ExtraAttrs =
3950  ExtraAttrs.addFnAttribute(VMContext, llvm::Attribute::Convergent);
3951  }
3952 
3953  llvm::Constant *C =
3954  GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
3955  /*DontDefer=*/false, /*IsThunk=*/false,
3956  ExtraAttrs);
3957 
3958  if (auto *F = dyn_cast<llvm::Function>(C)) {
3959  if (F->empty()) {
3960  F->setCallingConv(getRuntimeCC());
3961 
3962  // In Windows Itanium environments, try to mark runtime functions
3963  // dllimport. For Mingw and MSVC, don't. We don't really know if the user
3964  // will link their standard library statically or dynamically. Marking
3965  // functions imported when they are not imported can cause linker errors
3966  // and warnings.
3967  if (!Local && getTriple().isWindowsItaniumEnvironment() &&
3968  !getCodeGenOpts().LTOVisibilityPublicStd) {
3969  const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
3970  if (!FD || FD->hasAttr<DLLImportAttr>()) {
3971  F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
3972  F->setLinkage(llvm::GlobalValue::ExternalLinkage);
3973  }
3974  }
3975  setDSOLocal(F);
3976  }
3977  }
3978 
3979  return {FTy, C};
3980 }
3981 
3982 /// isTypeConstant - Determine whether an object of this type can be emitted
3983 /// as a constant.
3984 ///
3985 /// If ExcludeCtor is true, the duration when the object's constructor runs
3986 /// will not be considered. The caller will need to verify that the object is
3987 /// not written to during its construction.
3988 bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
3989  if (!Ty.isConstant(Context) && !Ty->isReferenceType())
3990  return false;
3991 
3992  if (Context.getLangOpts().CPlusPlus) {
3993  if (const CXXRecordDecl *Record
3994  = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
3995  return ExcludeCtor && !Record->hasMutableFields() &&
3996  Record->hasTrivialDestructor();
3997  }
3998 
3999  return true;
4000 }
4001 
4002 /// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
4003 /// create and return an llvm GlobalVariable with the specified type and address
4004 /// space. If there is something in the module with the specified name, return
4005 /// it potentially bitcasted to the right type.
4006 ///
4007 /// If D is non-null, it specifies a decl that correspond to this. This is used
4008 /// to set the attributes on the global when it is first created.
4009 ///
4010 /// If IsForDefinition is true, it is guaranteed that an actual global with
4011 /// type Ty will be returned, not conversion of a variable with the same
4012 /// mangled name but some other type.
4013 llvm::Constant *
4014 CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
4015  LangAS AddrSpace, const VarDecl *D,
4016  ForDefinition_t IsForDefinition) {
4017  // Lookup the entry, lazily creating it if necessary.
4018  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4019  unsigned TargetAS = getContext().getTargetAddressSpace(AddrSpace);
4020  if (Entry) {
4021  if (WeakRefReferences.erase(Entry)) {
4022  if (D && !D->hasAttr<WeakAttr>())
4023  Entry->setLinkage(llvm::Function::ExternalLinkage);
4024  }
4025 
4026  // Handle dropped DLL attributes.
4027  if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
4028  Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
4029 
4030  if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
4032 
4033  if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS)
4034  return Entry;
4035 
4036  // If there are two attempts to define the same mangled name, issue an
4037  // error.
4038  if (IsForDefinition && !Entry->isDeclaration()) {
4039  GlobalDecl OtherGD;
4040  const VarDecl *OtherD;
4041 
4042  // Check that D is not yet in DiagnosedConflictingDefinitions is required
4043  // to make sure that we issue an error only once.
4044  if (D && lookupRepresentativeDecl(MangledName, OtherGD) &&
4045  (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) &&
4046  (OtherD = dyn_cast<VarDecl>(OtherGD.getDecl())) &&
4047  OtherD->hasInit() &&
4048  DiagnosedConflictingDefinitions.insert(D).second) {
4049  getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
4050  << MangledName;
4051  getDiags().Report(OtherGD.getDecl()->getLocation(),
4052  diag::note_previous_definition);
4053  }
4054  }
4055 
4056  // Make sure the result is of the correct type.
4057  if (Entry->getType()->getAddressSpace() != TargetAS) {
4058  return llvm::ConstantExpr::getAddrSpaceCast(Entry,
4059  Ty->getPointerTo(TargetAS));
4060  }
4061 
4062  // (If global is requested for a definition, we always need to create a new
4063  // global, not just return a bitcast.)
4064  if (!IsForDefinition)
4065  return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo(TargetAS));
4066  }
4067 
4068  auto DAddrSpace = GetGlobalVarAddressSpace(D);
4069 
4070  auto *GV = new llvm::GlobalVariable(
4071  getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr,
4072  MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal,
4073  getContext().getTargetAddressSpace(DAddrSpace));
4074 
4075  // If we already created a global with the same mangled name (but different
4076  // type) before, take its name and remove it from its parent.
4077  if (Entry) {
4078  GV->takeName(Entry);
4079 
4080  if (!Entry->use_empty()) {
4081  llvm::Constant *NewPtrForOldDecl =
4082  llvm::ConstantExpr::getBitCast(GV, Entry->getType());
4083  Entry->replaceAllUsesWith(NewPtrForOldDecl);
4084  }
4085 
4086  Entry->eraseFromParent();
4087  }
4088 
4089  // This is the first use or definition of a mangled name. If there is a
4090  // deferred decl with this name, remember that we need to emit it at the end
4091  // of the file.
4092  auto DDI = DeferredDecls.find(MangledName);
4093  if (DDI != DeferredDecls.end()) {
4094  // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
4095  // list, and remove it from DeferredDecls (since we don't need it anymore).
4096  addDeferredDeclToEmit(DDI->second);
4097  DeferredDecls.erase(DDI);
4098  }
4099 
4100  // Handle things which are present even on external declarations.
4101  if (D) {
4102  if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
4104 
4105  // FIXME: This code is overly simple and should be merged with other global
4106  // handling.
4107  GV->setConstant(isTypeConstant(D->getType(), false));
4108 
4109  GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
4110 
4111  setLinkageForGV(GV, D);
4112 
4113  if (D->getTLSKind()) {
4114  if (D->getTLSKind() == VarDecl::TLS_Dynamic)
4115  CXXThreadLocals.push_back(D);
4116  setTLSMode(GV, *D);
4117  }
4118 
4119  setGVProperties(GV, D);
4120 
4121  // If required by the ABI, treat declarations of static data members with
4122  // inline initializers as definitions.
4123  if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
4124  EmitGlobalVarDefinition(D);
4125  }
4126 
4127  // Emit section information for extern variables.
4128  if (D->hasExternalStorage()) {
4129  if (const SectionAttr *SA = D->getAttr<SectionAttr>())
4130  GV->setSection(SA->getName());
4131  }
4132 
4133  // Handle XCore specific ABI requirements.
4134  if (getTriple().getArch() == llvm::Triple::xcore &&
4136  D->getType().isConstant(Context) &&
4138  GV->setSection(".cp.rodata");
4139 
4140  // Check if we a have a const declaration with an initializer, we may be
4141  // able to emit it as available_externally to expose it's value to the
4142  // optimizer.
4143  if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() &&
4144  D->getType().isConstQualified() && !GV->hasInitializer() &&
4145  !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) {
4146  const auto *Record =
4148  bool HasMutableFields = Record && Record->hasMutableFields();
4149  if (!HasMutableFields) {
4150  const VarDecl *InitDecl;
4151  const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4152  if (InitExpr) {
4153  ConstantEmitter emitter(*this);
4154  llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
4155  if (Init) {
4156  auto *InitType = Init->getType();
4157  if (GV->getValueType() != InitType) {
4158  // The type of the initializer does not match the definition.
4159  // This happens when an initializer has a different type from
4160  // the type of the global (because of padding at the end of a
4161  // structure for instance).
4162  GV->setName(StringRef());
4163  // Make a new global with the correct type, this is now guaranteed
4164  // to work.
4165  auto *NewGV = cast<llvm::GlobalVariable>(
4166  GetAddrOfGlobalVar(D, InitType, IsForDefinition)
4167  ->stripPointerCasts());
4168 
4169  // Erase the old global, since it is no longer used.
4170  GV->eraseFromParent();
4171  GV = NewGV;
4172  } else {
4173  GV->setInitializer(Init);
4174  GV->setConstant(true);
4175  GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
4176  }
4177  emitter.finalize(GV);
4178  }
4179  }
4180  }
4181  }
4182  }
4183 
4184  if (GV->isDeclaration()) {
4185  getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
4186  // External HIP managed variables needed to be recorded for transformation
4187  // in both device and host compilations.
4188  if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() &&
4189  D->hasExternalStorage())
4191  }
4192 
4193  LangAS ExpectedAS =
4194  D ? D->getType().getAddressSpace()
4195  : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
4196  assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS);
4197  if (DAddrSpace != ExpectedAS) {
4199  *this, GV, DAddrSpace, ExpectedAS, Ty->getPointerTo(TargetAS));
4200  }
4201 
4202  return GV;
4203 }
4204 
4205 llvm::Constant *
4207  const Decl *D = GD.getDecl();
4208 
4209  if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
4210  return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
4211  /*DontDefer=*/false, IsForDefinition);
4212 
4213  if (isa<CXXMethodDecl>(D)) {
4214  auto FInfo =
4215  &getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(D));
4216  auto Ty = getTypes().GetFunctionType(*FInfo);
4217  return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
4218  IsForDefinition);
4219  }
4220 
4221  if (isa<FunctionDecl>(D)) {
4223  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
4224  return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
4225  IsForDefinition);
4226  }
4227 
4228  return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr, IsForDefinition);
4229 }
4230 
4232  StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
4233  unsigned Alignment) {
4234  llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
4235  llvm::GlobalVariable *OldGV = nullptr;
4236 
4237  if (GV) {
4238  // Check if the variable has the right type.
4239  if (GV->getValueType() == Ty)
4240  return GV;
4241 
4242  // Because C++ name mangling, the only way we can end up with an already
4243  // existing global with the same name is if it has been declared extern "C".
4244  assert(GV->isDeclaration() && "Declaration has wrong type!");
4245  OldGV = GV;
4246  }
4247 
4248  // Create a new variable.
4249  GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
4250  Linkage, nullptr, Name);
4251 
4252  if (OldGV) {
4253  // Replace occurrences of the old variable if needed.
4254  GV->takeName(OldGV);
4255 
4256  if (!OldGV->use_empty()) {
4257  llvm::Constant *NewPtrForOldDecl =
4258  llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
4259  OldGV->replaceAllUsesWith(NewPtrForOldDecl);
4260  }
4261 
4262  OldGV->eraseFromParent();
4263  }
4264 
4265  if (supportsCOMDAT() && GV->isWeakForLinker() &&
4266  !GV->hasAvailableExternallyLinkage())
4267  GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
4268 
4269  GV->setAlignment(llvm::MaybeAlign(Alignment));
4270 
4271  return GV;
4272 }
4273 
4274 /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
4275 /// given global variable. If Ty is non-null and if the global doesn't exist,
4276 /// then it will be created with the specified type instead of whatever the
4277 /// normal requested type would be. If IsForDefinition is true, it is guaranteed
4278 /// that an actual global with type Ty will be returned, not conversion of a
4279 /// variable with the same mangled name but some other type.
4281  llvm::Type *Ty,
4282  ForDefinition_t IsForDefinition) {
4283  assert(D->hasGlobalStorage() && "Not a global variable");
4284  QualType ASTTy = D->getType();
4285  if (!Ty)
4286  Ty = getTypes().ConvertTypeForMem(ASTTy);
4287 
4288  StringRef MangledName = getMangledName(D);
4289  return GetOrCreateLLVMGlobal(MangledName, Ty, ASTTy.getAddressSpace(), D,
4290  IsForDefinition);
4291 }
4292 
4293 /// CreateRuntimeVariable - Create a new runtime global variable with the
4294 /// specified type and name.
4295 llvm::Constant *
4297  StringRef Name) {
4298  LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global
4299  : LangAS::Default;
4300  auto *Ret = GetOrCreateLLVMGlobal(Name, Ty, AddrSpace, nullptr);
4301  setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
4302  return Ret;
4303 }
4304 
4306  assert(!D->getInit() && "Cannot emit definite definitions here!");
4307 
4308  StringRef MangledName = getMangledName(D);
4309  llvm::GlobalValue *GV = GetGlobalValue(MangledName);
4310 
4311  // We already have a definition, not declaration, with the same mangled name.
4312  // Emitting of declaration is not required (and actually overwrites emitted
4313  // definition).
4314  if (GV && !GV->isDeclaration())
4315  return;
4316 
4317  // If we have not seen a reference to this variable yet, place it into the
4318  // deferred declarations table to be emitted if needed later.
4319  if (!MustBeEmitted(D) && !GV) {
4320  DeferredDecls[MangledName] = D;
4321  return;
4322  }
4323 
4324  // The tentative definition is the only definition.
4325  EmitGlobalVarDefinition(D);
4326 }
4327 
4329  EmitExternalVarDeclaration(D);
4330 }
4331 
4333  return Context.toCharUnitsFromBits(
4334  getDataLayout().getTypeStoreSizeInBits(Ty));
4335 }
4336 
4338  if (LangOpts.OpenCL) {
4340  assert(AS == LangAS::opencl_global ||
4343  AS == LangAS::opencl_constant ||
4344  AS == LangAS::opencl_local ||
4346  return AS;
4347  }
4348 
4349  if (LangOpts.SYCLIsDevice &&
4350  (!D || D->getType().getAddressSpace() == LangAS::Default))
4351  return LangAS::sycl_global;
4352 
4353  if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
4354  if (D && D->hasAttr<CUDAConstantAttr>())
4355  return LangAS::cuda_constant;
4356  else if (D && D->hasAttr<CUDASharedAttr>())
4357  return LangAS::cuda_shared;
4358  else if (D && D->hasAttr<CUDADeviceAttr>())
4359  return LangAS::cuda_device;
4360  else if (D && D->getType().isConstQualified())
4361  return LangAS::cuda_constant;
4362  else
4363  return LangAS::cuda_device;
4364  }
4365 
4366  if (LangOpts.OpenMP) {
4367  LangAS AS;
4368  if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(D, AS))
4369  return AS;
4370  }
4372 }
4373 
4375  // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
4376  if (LangOpts.OpenCL)
4377  return LangAS::opencl_constant;
4378  if (LangOpts.SYCLIsDevice)
4379  return LangAS::sycl_global;
4380  if (auto AS = getTarget().getConstantAddressSpace())
4381  return AS.getValue();
4382  return LangAS::Default;
4383 }
4384 
4385 // In address space agnostic languages, string literals are in default address
4386 // space in AST. However, certain targets (e.g. amdgcn) request them to be
4387 // emitted in constant address space in LLVM IR. To be consistent with other
4388 // parts of AST, string literal global variables in constant address space
4389 // need to be casted to default address space before being put into address
4390 // map and referenced by other part of CodeGen.
4391 // In OpenCL, string literals are in constant address space in AST, therefore
4392 // they should not be casted to default address space.
4393 static llvm::Constant *
4395  llvm::GlobalVariable *GV) {
4396  llvm::Constant *Cast = GV;
4397  if (!CGM.getLangOpts().OpenCL) {
4398  auto AS = CGM.GetGlobalConstantAddressSpace();
4399  if (AS != LangAS::Default)
4401  CGM, GV, AS, LangAS::Default,
4402  GV->getValueType()->getPointerTo(
4404  }
4405  return Cast;
4406 }
4407 
4408 template<typename SomeDecl>
4410  llvm::GlobalValue *GV) {
4411  if (!getLangOpts().CPlusPlus)
4412  return;
4413 
4414  // Must have 'used' attribute, or else inline assembly can't rely on
4415  // the name existing.
4416  if (!D->template hasAttr<UsedAttr>())
4417  return;
4418 
4419  // Must have internal linkage and an ordinary name.
4420  if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
4421  return;
4422 
4423  // Must be in an extern "C" context. Entities declared directly within
4424  // a record are not extern "C" even if the record is in such a context.
4425  const SomeDecl *First = D->getFirstDecl();
4426  if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
4427  return;
4428 
4429  // OK, this is an internal linkage entity inside an extern "C" linkage
4430  // specification. Make a note of that so we can give it the "expected"
4431  // mangled name if nothing else is using that name.
4432  std::pair<StaticExternCMap::iterator, bool> R =
4433  StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
4434 
4435  // If we have multiple internal linkage entities with the same name
4436  // in extern "C" regions, none of them gets that name.
4437  if (!R.second)
4438  R.first->second = nullptr;
4439 }
4440 
4441 static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
4442  if (!CGM.supportsCOMDAT())
4443  return false;
4444 
4445  if (D.hasAttr<SelectAnyAttr>())
4446  return true;
4447 
4449  if (auto *VD = dyn_cast<VarDecl>(&D))
4451  else
4452  Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
4453 
4454  switch (Linkage) {
4455  case GVA_Internal:
4457  case GVA_StrongExternal:
4458  return false;
4459  case GVA_DiscardableODR:
4460  case GVA_StrongODR:
4461  return true;
4462  }
4463  llvm_unreachable("No such linkage");
4464 }
4465 
4467  llvm::GlobalObject &GO) {
4468  if (!shouldBeInCOMDAT(*this, D))
4469  return;
4470  GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
4471 }
4472 
4473 /// Pass IsTentative as true if you want to create a tentative definition.
4474 void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
4475  bool IsTentative) {
4476  // OpenCL global variables of sampler type are translated to function calls,
4477  // therefore no need to be translated.
4478  QualType ASTTy = D->getType();
4479  if (getLangOpts().OpenCL && ASTTy->isSamplerT())
4480  return;
4481 
4482  // If this is OpenMP device, check if it is legal to emit this global
4483  // normally.
4484  if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
4485  OpenMPRuntime->emitTargetGlobalVariable(D))
4486  return;
4487 
4488  llvm::TrackingVH<llvm::Constant> Init;
4489  bool NeedsGlobalCtor = false;
4490  bool NeedsGlobalDtor =
4492 
4493  const VarDecl *InitDecl;
4494  const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4495 
4496  Optional<ConstantEmitter> emitter;
4497 
4498  // CUDA E.2.4.1 "__shared__ variables cannot have an initialization
4499  // as part of their declaration." Sema has already checked for
4500  // error cases, so we just need to set Init to UndefValue.
4501  bool IsCUDASharedVar =
4502  getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
4503  // Shadows of initialized device-side global variables are also left
4504  // undefined.
4505  // Managed Variables should be initialized on both host side and device side.
4506  bool IsCUDAShadowVar =
4507  !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
4508  (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
4509  D->hasAttr<CUDASharedAttr>());
4510  bool IsCUDADeviceShadowVar =
4511  getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
4514  if (getLangOpts().CUDA &&
4515  (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
4516  Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
4517  else if (D->hasAttr<LoaderUninitializedAttr>())
4518  Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
4519  else if (!InitExpr) {
4520  // This is a tentative definition; tentative definitions are
4521  // implicitly initialized with { 0 }.
4522  //
4523  // Note that tentative definitions are only emitted at the end of
4524  // a translation unit, so they should never have incomplete
4525  // type. In addition, EmitTentativeDefinition makes sure that we
4526  // never attempt to emit a tentative definition if a real one
4527  // exists. A use may still exists, however, so we still may need
4528  // to do a RAUW.
4529  assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
4530  Init = EmitNullConstant(D->getType());
4531  } else {
4532  initializedGlobalDecl = GlobalDecl(D);
4533  emitter.emplace(*this);
4534  llvm::Constant *Initializer = emitter->tryEmitForInitializer(*InitDecl);
4535  if (!Initializer) {
4536  QualType T = InitExpr->getType();
4537  if (D->getType()->isReferenceType())
4538  T = D->getType();
4539 
4540  if (getLangOpts().CPlusPlus) {
4541  Init = EmitNullConstant(T);
4542  NeedsGlobalCtor = true;
4543  } else {
4544  ErrorUnsupported(D, "static initializer");
4545  Init = llvm::UndefValue::get(getTypes().ConvertType(T));
4546  }
4547  } else {
4548  Init = Initializer;
4549  // We don't need an initializer, so remove the entry for the delayed
4550  // initializer position (just in case this entry was delayed) if we
4551  // also don't need to register a destructor.
4552  if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
4553  DelayedCXXInitPosition.erase(D);
4554  }
4555  }
4556 
4557  llvm::Type* InitType = Init->getType();
4558  llvm::Constant *Entry =
4559  GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative));
4560 
4561  // Strip off pointer casts if we got them.
4562  Entry = Entry->stripPointerCasts();
4563 
4564  // Entry is now either a Function or GlobalVariable.
4565  auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
4566 
4567  // We have a definition after a declaration with the wrong type.
4568  // We must make a new GlobalVariable* and update everything that used OldGV
4569  // (a declaration or tentative definition) with the new GlobalVariable*
4570  // (which will be a definition).
4571  //
4572  // This happens if there is a prototype for a global (e.g.
4573  // "extern int x[];") and then a definition of a different type (e.g.
4574  // "int x[10];"). This also happens when an initializer has a different type
4575  // from the type of the global (this happens with unions).
4576  if (!GV || GV->getValueType() != InitType ||
4577  GV->getType()->getAddressSpace() !=
4578  getContext().getTargetAddressSpace(GetGlobalVarAddressSpace(D))) {
4579 
4580  // Move the old entry aside so that we'll create a new one.
4581  Entry->setName(StringRef());
4582 
4583  // Make a new global with the correct type, this is now guaranteed to work.
4584  GV = cast<llvm::GlobalVariable>(
4585  GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative))
4586  ->stripPointerCasts());
4587 
4588  // Replace all uses of the old global with the new global
4589  llvm::Constant *NewPtrForOldDecl =
4590  llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
4591  Entry->getType());
4592  Entry->replaceAllUsesWith(NewPtrForOldDecl);
4593 
4594  // Erase the old global, since it is no longer used.
4595  cast<llvm::GlobalValue>(Entry)->eraseFromParent();
4596  }
4597 
4599 
4600  if (D->hasAttr<AnnotateAttr>())
4601  AddGlobalAnnotations(D, GV);
4602 
4603  // Set the llvm linkage type as appropriate.
4604  llvm::GlobalValue::LinkageTypes Linkage =
4605  getLLVMLinkageVarDefinition(D, GV->isConstant());
4606 
4607  // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
4608  // the device. [...]"
4609  // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
4610  // __device__, declares a variable that: [...]
4611  // Is accessible from all the threads within the grid and from the host
4612  // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
4613  // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
4614  if (GV && LangOpts.CUDA) {
4615  if (LangOpts.CUDAIsDevice) {
4617  (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
4620  GV->setExternallyInitialized(true);
4621  } else {
4623  }
4625  }
4626 
4627  GV->setInitializer(Init);
4628  if (emitter)
4629  emitter->finalize(GV);
4630 
4631  // If it is safe to mark the global 'constant', do so now.
4632  GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
4633  isTypeConstant(D->getType(), true));
4634 
4635  // If it is in a read-only section, mark it 'constant'.
4636  if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
4637  const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
4638  if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
4639  GV->setConstant(true);
4640  }
4641 
4642  GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
4643 
4644  // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
4645  // function is only defined alongside the variable, not also alongside
4646  // callers. Normally, all accesses to a thread_local go through the
4647  // thread-wrapper in order to ensure initialization has occurred, underlying
4648  // variable will never be used other than the thread-wrapper, so it can be
4649  // converted to internal linkage.
4650  //
4651  // However, if the variable has the 'constinit' attribute, it _can_ be
4652  // referenced directly, without calling the thread-wrapper, so the linkage
4653  // must not be changed.
4654  //
4655  // Additionally, if the variable isn't plain external linkage, e.g. if it's
4656  // weak or linkonce, the de-duplication semantics are important to preserve,
4657  // so we don't change the linkage.
4658  if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
4660  Context.getTargetInfo().getTriple().isOSDarwin() &&
4661  !D->hasAttr<ConstInitAttr>())
4663 
4664  GV->setLinkage(Linkage);
4665  if (D->hasAttr<DLLImportAttr>())
4666  GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
4667  else if (D->hasAttr<DLLExportAttr>())
4668  GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
4669  else
4670  GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
4671 
4672  if (Linkage == llvm::GlobalVariable::CommonLinkage) {
4673  // common vars aren't constant even if declared const.
4674  GV->setConstant(false);
4675  // Tentative definition of global variables may be initialized with
4676  // non-zero null pointers. In this case they should have weak linkage
4677  // since common linkage must have zero initializer and must not have
4678  // explicit section therefore cannot have non-zero initial value.
4679  if (!GV->getInitializer()->isNullValue())
4680  GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
4681  }
4682 
4683  setNonAliasAttributes(D, GV);
4684 
4685  if (D->getTLSKind() && !GV->isThreadLocal()) {
4686  if (D->getTLSKind() == VarDecl::TLS_Dynamic)
4687  CXXThreadLocals.push_back(D);
4688  setTLSMode(GV, *D);
4689  }
4690 
4691  maybeSetTrivialComdat(*D, *GV);
4692 
4693  // Emit the initializer function if necessary.
4694  if (NeedsGlobalCtor || NeedsGlobalDtor)
4695  EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
4696 
4697  SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
4698 
4699  // Emit global variable debug information.
4700  if (CGDebugInfo *DI = getModuleDebugInfo())
4701  if (getCodeGenOpts().hasReducedDebugInfo())
4702  DI->EmitGlobalVariable(GV, D);
4703 }
4704 
4705 void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) {
4706  if (CGDebugInfo *DI = getModuleDebugInfo())
4707  if (getCodeGenOpts().hasReducedDebugInfo()) {
4708  QualType ASTTy = D->getType();
4709  llvm::Type *Ty = getTypes().ConvertTypeForMem(D->getType());
4710  llvm::Constant *GV =
4711  GetOrCreateLLVMGlobal(D->getName(), Ty, ASTTy.getAddressSpace(), D);
4712  DI->EmitExternalVariable(
4713  cast<llvm::GlobalVariable>(GV->stripPointerCasts()), D);
4714  }
4715 }
4716 
4717 static bool isVarDeclStrongDefinition(const ASTContext &Context,
4718  CodeGenModule &CGM, const VarDecl *D,
4719  bool NoCommon) {
4720  // Don't give variables common linkage if -fno-common was specified unless it
4721  // was overridden by a NoCommon attribute.
4722  if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
4723  return true;
4724 
4725  // C11 6.9.2/2:
4726  // A declaration of an identifier for an object that has file scope without
4727  // an initializer, and without a storage-class specifier or with the
4728  // storage-class specifier static, constitutes a tentative definition.
4729  if (D->getInit() || D->hasExternalStorage())
4730  return true;
4731 
4732  // A variable cannot be both common and exist in a section.
4733  if (D->hasAttr<SectionAttr>())
4734  return true;
4735 
4736  // A variable cannot be both common and exist in a section.
4737  // We don't try to determine which is the right section in the front-end.
4738  // If no specialized section name is applicable, it will resort to default.
4739  if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
4740  D->hasAttr<PragmaClangDataSectionAttr>() ||
4741  D->hasAttr<PragmaClangRelroSectionAttr>() ||
4742  D->hasAttr<PragmaClangRodataSectionAttr>())
4743  return true;
4744 
4745  // Thread local vars aren't considered common linkage.
4746  if (D->getTLSKind())
4747  return true;
4748 
4749  // Tentative definitions marked with WeakImportAttr are true definitions.
4750  if (D->hasAttr<WeakImportAttr>())
4751  return true;
4752 
4753  // A variable cannot be both common and exist in a comdat.
4754  if (shouldBeInCOMDAT(CGM, *D))
4755  return true;
4756 
4757  // Declarations with a required alignment do not have common linkage in MSVC
4758  // mode.
4759  if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
4760  if (D->hasAttr<AlignedAttr>())
4761  return true;
4762  QualType VarType = D->getType();
4763  if (Context.isAlignmentRequired(VarType))
4764  return true;
4765 
4766  if (const auto *RT = VarType->getAs<RecordType>()) {
4767  const RecordDecl *RD = RT->getDecl();
4768  for (const FieldDecl *FD : RD->fields()) {
4769  if (FD->isBitField())
4770  continue;
4771  if (FD->hasAttr<AlignedAttr>())
4772  return true;
4773  if (Context.isAlignmentRequired(FD->getType()))
4774  return true;
4775  }
4776  }
4777  }
4778 
4779  // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
4780  // common symbols, so symbols with greater alignment requirements cannot be
4781  // common.
4782  // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
4783  // alignments for common symbols via the aligncomm directive, so this
4784  // restriction only applies to MSVC environments.
4785  if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
4786  Context.getTypeAlignIfKnown(D->getType()) >
4787  Context.toBits(CharUnits::fromQuantity(32)))
4788  return true;
4789 
4790  return false;
4791 }
4792 
4793 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
4794  const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
4795  if (Linkage == GVA_Internal)
4797 
4798  if (D->hasAttr<WeakAttr>()) {
4799  if (IsConstantVariable)
4800  return llvm::GlobalVariable::WeakODRLinkage;
4801  else
4802  return llvm::GlobalVariable::WeakAnyLinkage;
4803  }
4804 
4805  if (const auto *FD = D->getAsFunction())
4807  return llvm::GlobalVariable::LinkOnceAnyLinkage;
4808 
4809  // We are guaranteed to have a strong definition somewhere else,
4810  // so we can use available_externally linkage.
4812  return llvm::GlobalValue::AvailableExternallyLinkage;
4813 
4814  // Note that Apple's kernel linker doesn't support symbol
4815  // coalescing, so we need to avoid linkonce and weak linkages there.
4816  // Normally, this means we just map to internal, but for explicit
4817  // instantiations we'll map to external.
4818 
4819  // In C++, the compiler has to emit a definition in every translation unit
4820  // that references the function. We should use linkonce_odr because
4821  // a) if all references in this translation unit are optimized away, we
4822  // don't need to codegen it. b) if the function persists, it needs to be
4823  // merged with other definitions. c) C++ has the ODR, so we know the
4824  // definition is dependable.
4825  if (Linkage == GVA_DiscardableODR)
4826  return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
4828 
4829  // An explicit instantiation of a template has weak linkage, since
4830  // explicit instantiations can occur in multiple translation units
4831  // and must all be equivalent. However, we are not allowed to
4832  // throw away these explicit instantiations.
4833  //
4834  // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
4835  // so say that CUDA templates are either external (for kernels) or internal.
4836  // This lets llvm perform aggressive inter-procedural optimizations. For
4837  // -fgpu-rdc case, device function calls across multiple TU's are allowed,
4838  // therefore we need to follow the normal linkage paradigm.
4839  if (Linkage == GVA_StrongODR) {
4840  if (getLangOpts().AppleKext)
4842  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
4843  !getLangOpts().GPURelocatableDeviceCode)
4844  return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
4846  return llvm::Function::WeakODRLinkage;
4847  }
4848 
4849  // C++ doesn't have tentative definitions and thus cannot have common
4850  // linkage.
4851  if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
4852  !isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D),
4853  CodeGenOpts.NoCommon))
4854  return llvm::GlobalVariable::CommonLinkage;
4855 
4856  // selectany symbols are externally visible, so use weak instead of
4857  // linkonce. MSVC optimizes away references to const selectany globals, so
4858  // all definitions should be the same and ODR linkage should be used.
4859  // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
4860  if (D->hasAttr<SelectAnyAttr>())
4861  return llvm::GlobalVariable::WeakODRLinkage;
4862 
4863  // Otherwise, we have strong external linkage.
4864  assert(Linkage == GVA_StrongExternal);
4866 }
4867 
4868 llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
4869  const VarDecl *VD, bool IsConstant) {
4871  return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
4872 }
4873 
4874 /// Replace the uses of a function that was declared with a non-proto type.
4875 /// We want to silently drop extra arguments from call sites
4876 static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
4877  llvm::Function *newFn) {
4878  // Fast path.
4879  if (old->use_empty()) return;
4880 
4881  llvm::Type *newRetTy = newFn->getReturnType();
4883 
4884  for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
4885  ui != ue; ) {
4886  llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
4887  llvm::User *user = use->getUser();
4888 
4889  // Recognize and replace uses of bitcasts. Most calls to
4890  // unprototyped functions will use bitcasts.
4891  if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(user)) {
4892  if (bitcast->getOpcode() == llvm::Instruction::BitCast)
4893  replaceUsesOfNonProtoConstant(bitcast, newFn);
4894  continue;
4895  }
4896 
4897  // Recognize calls to the function.
4898  llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
4899  if (!callSite) continue;
4900  if (!callSite->isCallee(&*use))
4901  continue;
4902 
4903  // If the return types don't match exactly, then we can't
4904  // transform this call unless it's dead.
4905  if (callSite->getType() != newRetTy && !callSite->use_empty())
4906  continue;
4907 
4908  // Get the call site's attribute list.
4910  llvm::AttributeList oldAttrs = callSite->getAttributes();
4911 
4912  // If the function was passed too few arguments, don't transform.
4913  unsigned newNumArgs = newFn->arg_size();
4914  if (callSite->arg_size() < newNumArgs)
4915  continue;
4916 
4917  // If extra arguments were passed, we silently drop them.
4918  // If any of the types mismatch, we don't transform.
4919  unsigned argNo = 0;
4920  bool dontTransform = false;
4921  for (llvm::Argument &A : newFn->args()) {
4922  if (callSite->getArgOperand(argNo)->getType() != A.getType()) {
4923  dontTransform = true;
4924  break;
4925  }
4926 
4927  // Add any parameter attributes.
4928  newArgAttrs.push_back(oldAttrs.getParamAttrs(argNo));
4929  argNo++;
4930  }
4931  if (dontTransform)
4932  continue;
4933 
4934  // Okay, we can transform this. Create the new call instruction and copy
4935  // over the required information.
4936  newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
4937 
4938  // Copy over any operand bundles.
4940  callSite->getOperandBundlesAsDefs(newBundles);
4941 
4942  llvm::CallBase *newCall;
4943  if (isa<llvm::CallInst>(callSite)) {
4944  newCall =
4945  llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
4946  } else {
4947  auto *oldInvoke = cast<llvm::InvokeInst>(callSite);
4948  newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(),
4949  oldInvoke->getUnwindDest(), newArgs,
4950  newBundles, "", callSite);
4951  }
4952  newArgs.clear(); // for the next iteration
4953 
4954  if (!newCall->getType()->isVoidTy())
4955  newCall->takeName(callSite);
4956  newCall->setAttributes(
4957  llvm::AttributeList::get(newFn->getContext(), oldAttrs.getFnAttrs(),
4958  oldAttrs.getRetAttrs(), newArgAttrs));
4959  newCall->setCallingConv(callSite->getCallingConv());
4960 
4961  // Finally, remove the old call, replacing any uses with the new one.
4962  if (!callSite->use_empty())
4963  callSite->replaceAllUsesWith(newCall);
4964 
4965  // Copy debug location attached to CI.
4966  if (callSite->getDebugLoc())
4967  newCall->setDebugLoc(callSite->getDebugLoc());
4968 
4969  callSite->eraseFromParent();
4970  }
4971 }
4972 
4973 /// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
4974 /// implement a function with no prototype, e.g. "int foo() {}". If there are
4975 /// existing call uses of the old function in the module, this adjusts them to
4976 /// call the new function directly.
4977 ///
4978 /// This is not just a cleanup: the always_inline pass requires direct calls to
4979 /// functions to be able to inline them. If there is a bitcast in the way, it
4980 /// won't inline them. Instcombine normally deletes these calls, but it isn't
4981 /// run at -O0.
4982 static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
4983  llvm::Function *NewFn) {
4984  // If we're redefining a global as a function, don't transform it.
4985  if (!isa<llvm::Function>(Old)) return;
4986 
4987  replaceUsesOfNonProtoConstant(Old, NewFn);
4988 }
4989 
4991  auto DK = VD->isThisDeclarationADefinition();
4992  if (DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>())
4993  return;
4994 
4996  // If we have a definition, this might be a deferred decl. If the
4997  // instantiation is explicit, make sure we emit it at the end.
4999  GetAddrOfGlobalVar(VD);
5000 
5001  EmitTopLevelDecl(VD);
5002 }
5003 
5004 void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
5005  llvm::GlobalValue *GV) {
5006  const auto *D = cast<FunctionDecl>(GD.getDecl());
5007 
5008  // Compute the function info and LLVM type.
5010  llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
5011 
5012  // Get or create the prototype for the function.
5013  if (!GV || (GV->getValueType() != Ty))
5014  GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
5015  /*DontDefer=*/true,
5016  ForDefinition));
5017 
5018  // Already emitted.
5019  if (!GV->isDeclaration())
5020  return;
5021 
5022  // We need to set linkage and visibility on the function before
5023  // generating code for it because various parts of IR generation
5024  // want to propagate this information down (e.g. to local static
5025  // declarations).
5026  auto *Fn = cast<llvm::Function>(GV);
5027  setFunctionLinkage(GD, Fn);
5028 
5029  // FIXME: this is redundant with part of setFunctionDefinitionAttributes
5030  setGVProperties(Fn, GD);
5031 
5033 
5034  maybeSetTrivialComdat(*D, *Fn);
5035 
5036  // Set CodeGen attributes that represent floating point environment.
5038 
5039  CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
5040 
5041  setNonAliasAttributes(GD, Fn);
5043 
5044  if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
5045  AddGlobalCtor(Fn, CA->getPriority());
5046  if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
5047  AddGlobalDtor(Fn, DA->getPriority(), true);
5048  if (D->hasAttr<AnnotateAttr>())
5049  AddGlobalAnnotations(D, Fn);
5050 }
5051 
5052 void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
5053  const auto *D = cast<ValueDecl>(GD.getDecl());
5054  const AliasAttr *AA = D->getAttr<AliasAttr>();
5055  assert(AA && "Not an alias?");
5056 
5057  StringRef MangledName = getMangledName(GD);
5058 
5059  if (AA->getAliasee() == MangledName) {
5060  Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
5061  return;
5062  }
5063 
5064  // If there is a definition in the module, then it wins over the alias.
5065  // This is dubious, but allow it to be safe. Just ignore the alias.
5066  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
5067  if (Entry && !Entry->isDeclaration())
5068  return;
5069 
5070  Aliases.push_back(GD);
5071 
5072  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
5073 
5074  // Create a reference to the named value. This ensures that it is emitted
5075  // if a deferred decl.
5076  llvm::Constant *Aliasee;
5077  llvm::GlobalValue::LinkageTypes LT;
5078  if (isa<llvm::FunctionType>(DeclTy)) {
5079  Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
5080  /*ForVTable=*/false);
5081  LT = getFunctionLinkage(GD);
5082  } else {
5083  Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
5084  /*D=*/nullptr);
5085  if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
5087  else
5088  LT = getFunctionLinkage(GD);
5089  }
5090 
5091  // Create the new alias itself, but don't set a name yet.
5092  unsigned AS = Aliasee->getType()->getPointerAddressSpace();
5093  auto *GA =
5094  llvm::GlobalAlias::create(DeclTy, AS, LT, "", Aliasee, &getModule());
5095 
5096  if (Entry) {
5097  if (GA->getAliasee() == Entry) {
5098  Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
5099  return;
5100  }
5101 
5102  assert(Entry->isDeclaration());
5103 
5104  // If there is a declaration in the module, then we had an extern followed
5105  // by the alias, as in:
5106  // extern int test6();
5107  // ...
5108  // int test6() __attribute__((alias("test7")));
5109  //
5110  // Remove it and replace uses of it with the alias.
5111  GA->takeName(Entry);
5112 
5113  Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
5114  Entry->getType()));
5115  Entry->eraseFromParent();
5116  } else {
5117  GA->setName(MangledName);
5118  }
5119 
5120  // Set attributes which are particular to an alias; this is a
5121  // specialization of the attributes which may be set on a global
5122  // variable/function.
5123  if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
5124  D->isWeakImported()) {
5125  GA->setLinkage(llvm::Function::WeakAnyLinkage);
5126  }
5127 
5128  if (const auto *VD = dyn_cast<VarDecl>(D))
5129  if (VD->getTLSKind())
5130  setTLSMode(GA, *VD);
5131 
5132  SetCommonAttributes(GD, GA);
5133 }
5134 
5135 void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
5136  const auto *D = cast<ValueDecl>(GD.getDecl());
5137  const IFuncAttr *IFA = D->getAttr<IFuncAttr>();
5138  assert(IFA && "Not an ifunc?");
5139 
5140  StringRef MangledName = getMangledName(GD);
5141 
5142  if (IFA->getResolver() == MangledName) {
5143  Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
5144  return;
5145  }
5146 
5147  // Report an error if some definition overrides ifunc.
5148  llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
5149  if (Entry && !Entry->isDeclaration()) {
5150  GlobalDecl OtherGD;
5151  if (lookupRepresentativeDecl(MangledName, OtherGD) &&
5152  DiagnosedConflictingDefinitions.insert(GD).second) {
5153  Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name)
5154  << MangledName;
5155  Diags.Report(OtherGD.getDecl()->getLocation(),
5156  diag::note_previous_definition);
5157  }
5158  return;
5159  }
5160 
5161  Aliases.push_back(GD);
5162 
5163  llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
5164  llvm::Type *ResolverTy = llvm::GlobalIFunc::getResolverFunctionType(DeclTy);
5165  llvm::Constant *Resolver =
5166  GetOrCreateLLVMFunction(IFA->getResolver(), ResolverTy, {},
5167  /*ForVTable=*/false);
5168  llvm::GlobalIFunc *GIF =
5170  "", Resolver, &getModule());
5171  if (Entry) {
5172  if (GIF->getResolver() == Entry) {
5173  Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
5174  return;
5175  }
5176  assert(Entry->isDeclaration());
5177 
5178  // If there is a declaration in the module, then we had an extern followed
5179  // by the ifunc, as in:
5180  // extern int test();
5181  // ...
5182  // int test() __attribute__((ifunc("resolver")));
5183  //
5184  // Remove it and replace uses of it with the ifunc.
5185  GIF->takeName(Entry);
5186 
5187  Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GIF,
5188  Entry->getType()));
5189  Entry->eraseFromParent();
5190  } else
5191  GIF->setName(MangledName);
5192 
5193  SetCommonAttributes(GD, GIF);
5194 }
5195 
5196 llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
5197  ArrayRef<llvm::Type*> Tys) {
5198  return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
5199  Tys);
5200 }
5201 
5202 static llvm::StringMapEntry<llvm::GlobalVariable *> &
5203 GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
5204  const StringLiteral *Literal, bool TargetIsLSB,
5205  bool &IsUTF16, unsigned &StringLength) {
5206  StringRef String = Literal->getString();
5207  unsigned NumBytes = String.size();
5208 
5209  // Check for simple case.
5210  if (!Literal->containsNonAsciiOrNull()) {
5211  StringLength = NumBytes;
5212  return *Map.insert(std::make_pair(String, nullptr)).first;
5213  }
5214 
5215  // Otherwise, convert the UTF8 literals into a string of shorts.
5216  IsUTF16 = true;
5217 
5218  SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
5219  const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
5220  llvm::UTF16 *ToPtr = &ToBuf[0];
5221 
5222  (void)llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
5223  ToPtr + NumBytes, llvm::strictConversion);
5224 
5225  // ConvertUTF8toUTF16 returns the length in ToPtr.
5226  StringLength = ToPtr - &ToBuf[0];
5227 
5228  // Add an explicit null.
5229  *ToPtr = 0;
5230  return *Map.insert(std::make_pair(
5231  StringRef(reinterpret_cast<const char *>(ToBuf.data()),
5232  (StringLength + 1) * 2),
5233  nullptr)).first;
5234 }
5235 
5238  unsigned StringLength = 0;
5239  bool isUTF16 = false;
5240  llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
5241  GetConstantCFStringEntry(CFConstantStringMap, Literal,
5242  getDataLayout().isLittleEndian(), isUTF16,
5243  StringLength);
5244 
5245  if (auto *C = Entry.second)
5246  return ConstantAddress(
5247  C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
5248 
5249  llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
5250  llvm::Constant *Zeros[] = { Zero, Zero };
5251 
5252  const ASTContext &Context = getContext();
5253  const llvm::Triple &Triple = getTriple();
5254 
5255  const auto CFRuntime = getLangOpts().CFRuntime;
5256  const bool IsSwiftABI =
5257  static_cast<unsigned>(CFRuntime) >=
5258  static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
5259  const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
5260 
5261  // If we don't already have it, get __CFConstantStringClassReference.
5262  if (!CFConstantStringClassRef) {
5263  const char *CFConstantStringClassName = "__CFConstantStringClassReference";
5264  llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
5265  Ty = llvm::ArrayType::get(Ty, 0);
5266 
5267  switch (CFRuntime) {
5268  default: break;
5269  case LangOptions::CoreFoundationABI::Swift: LLVM_FALLTHROUGH;
5271  CFConstantStringClassName =
5272  Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
5273  : "$s10Foundation19_NSCFConstantStringCN";
5274  Ty = IntPtrTy;
5275  break;
5277  CFConstantStringClassName =
5278  Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
5279  : "$S10Foundation19_NSCFConstantStringCN";
5280  Ty = IntPtrTy;
5281  break;
5283  CFConstantStringClassName =
5284  Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
5285  : "__T010Foundation19_NSCFConstantStringCN";
5286  Ty = IntPtrTy;
5287  break;
5288  }
5289 
5290  llvm::Constant *C = CreateRuntimeVariable(Ty, CFConstantStringClassName);
5291 
5292  if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
5293  llvm::GlobalValue *GV = nullptr;
5294 
5295  if ((GV = dyn_cast<llvm::GlobalValue>(C))) {
5296  IdentifierInfo &II = Context.Idents.get(GV->getName());
5297  TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
5299 
5300  const VarDecl *VD = nullptr;
5301  for (const auto *Result : DC->lookup(&II))
5302  if ((VD = dyn_cast<VarDecl>(Result)))
5303  break;
5304 
5305  if (Triple.isOSBinFormatELF()) {
5306  if (!VD)
5307  GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
5308  } else {
5309  GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
5310  if (!VD || !VD->hasAttr<DLLExportAttr>())
5311  GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
5312  else
5313  GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
5314  }
5315 
5316  setDSOLocal(GV);
5317  }
5318  }
5319 
5320  // Decay array -> ptr
5321  CFConstantStringClassRef =
5322  IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty)
5323  : llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
5324  }
5325 
5326  QualType CFTy = Context.getCFConstantStringType();
5327 
5328  auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
5329 
5330  ConstantInitBuilder Builder(*this);
5331  auto Fields = Builder.beginStruct(STy);
5332 
5333  // Class pointer.
5334  Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
5335 
5336  // Flags.
5337  if (IsSwiftABI) {
5338  Fields.addInt(IntPtrTy, IsSwift4_1 ? 0x05 : 0x01);
5339  Fields.addInt(Int64Ty, isUTF16 ? 0x07d0 : 0x07c8);
5340  } else {
5341  Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
5342  }
5343 
5344  // String pointer.
5345  llvm::Constant *C = nullptr;
5346  if (isUTF16) {
5347  auto Arr = llvm::makeArrayRef(
5348  reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
5349  Entry.first().size() / 2);
5350  C = llvm::ConstantDataArray::get(VMContext, Arr);
5351  } else {
5352  C = llvm::ConstantDataArray::getString(VMContext, Entry.first());
5353  }
5354 
5355  // Note: -fwritable-strings doesn't make the backing store strings of
5356  // CFStrings writable. (See <rdar://problem/10657500>)
5357  auto *GV =
5358  new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
5359  llvm::GlobalValue::PrivateLinkage, C, ".str");
5360  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
5361  // Don't enforce the target's minimum global alignment, since the only use
5362  // of the string is via this class initializer.
5363  CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(Context.ShortTy)
5364  : Context.getTypeAlignInChars(Context.CharTy);
5365  GV->setAlignment(Align.getAsAlign());
5366 
5367  // FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
5368  // Without it LLVM can merge the string with a non unnamed_addr one during
5369  // LTO. Doing that changes the section it ends in, which surprises ld64.
5370  if (Triple.isOSBinFormatMachO())
5371  GV->setSection(isUTF16 ? "__TEXT,__ustring"
5372  : "__TEXT,__cstring,cstring_literals");
5373  // Make sure the literal ends up in .rodata to allow for safe ICF and for
5374  // the static linker to adjust permissions to read-only later on.
5375  else if (Triple.isOSBinFormatELF())
5376  GV->setSection(".rodata");
5377 
5378  // String.
5379  llvm::Constant *Str =
5380  llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
5381 
5382  if (isUTF16)
5383  // Cast the UTF16 string to the correct type.
5384  Str = llvm::ConstantExpr::getBitCast(Str, Int8PtrTy);
5385  Fields.add(Str);
5386 
5387  // String length.
5388  llvm::IntegerType *LengthTy =
5389  llvm::IntegerType::get(getModule().getContext(),
5390  Context.getTargetInfo().getLongWidth());
5391  if (IsSwiftABI) {
5392  if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
5394  LengthTy = Int32Ty;
5395  else
5396  LengthTy = IntPtrTy;
5397  }
5398  Fields.addInt(LengthTy, StringLength);
5399 
5400  // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is
5401  // properly aligned on 32-bit platforms.
5402  CharUnits Alignment =
5403  IsSwiftABI ? Context.toCharUnitsFromBits(64) : getPointerAlign();
5404 
5405  // The struct.
5406  GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
5407  /*isConstant=*/false,
5408  llvm::GlobalVariable::PrivateLinkage);
5409  GV->addAttribute("objc_arc_inert");
5410  switch (Triple.getObjectFormat()) {
5411  case llvm::Triple::UnknownObjectFormat:
5412  llvm_unreachable("unknown file format");
5413  case llvm::Triple::GOFF:
5414  llvm_unreachable("GOFF is not yet implemented");
5415  case llvm::Triple::XCOFF:
5416  llvm_unreachable("XCOFF is not yet implemented");
5417  case llvm::Triple::COFF:
5418  case llvm::Triple::ELF:
5419  case llvm::Triple::Wasm:
5420  GV->setSection("cfstring");
5421  break;
5422  case llvm::Triple::MachO:
5423  GV->setSection("__DATA,__cfstring");
5424  break;
5425  }
5426  Entry.second = GV;
5427 
5428  return ConstantAddress(GV, GV->getValueType(), Alignment);
5429 }
5430 
5432  return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo;
5433 }
5434 
5436  if (ObjCFastEnumerationStateType.isNull()) {
5437  RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
5438  D->startDefinition();
5439 
5440  QualType FieldTypes[] = {
5441  Context.UnsignedLongTy,
5442  Context.getPointerType(Context.getObjCIdType()),
5443  Context.getPointerType(Context.UnsignedLongTy),
5444  Context.getConstantArrayType(Context.UnsignedLongTy,
5445  llvm::APInt(32, 5), nullptr, ArrayType::Normal, 0)
5446  };
5447 
5448  for (size_t i = 0; i < 4; ++i) {
5449  FieldDecl *Field = FieldDecl::Create(Context,
5450  D,
5451  SourceLocation(),
5452  SourceLocation(), nullptr,
5453  FieldTypes[i], /*TInfo=*/nullptr,
5454  /*BitWidth=*/nullptr,
5455  /*Mutable=*/false,
5456  ICIS_NoInit);
5457  Field->setAccess(AS_public);
5458  D->addDecl(Field);
5459  }
5460 
5461  D->completeDefinition();
5462  ObjCFastEnumerationStateType = Context.getTagDeclType(D);
5463  }
5464 
5465  return ObjCFastEnumerationStateType;
5466 }
5467 
5468 llvm::Constant *
5470  assert(!E->getType()->isPointerType() && "Strings are always arrays");
5471 
5472  // Don't emit it as the address of the string, emit the string data itself
5473  // as an inline array.
5474  if (E->getCharByteWidth() == 1) {
5475  SmallString<64> Str(E->getString());
5476 
5477  // Resize the string to the right size, which is indicated by its type.
5478  const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
5479  Str.resize(CAT->getSize().getZExtValue());
5480  return llvm::ConstantDataArray::getString(VMContext, Str, false);
5481  }
5482 
5483  auto *AType = cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
5484  llvm::Type *ElemTy = AType->getElementType();
5485  unsigned NumElements = AType->getNumElements();
5486 
5487  // Wide strings have either 2-byte or 4-byte elements.
5488  if (ElemTy->getPrimitiveSizeInBits() == 16) {
5489  SmallVector<uint16_t, 32> Elements;
5490  Elements.reserve(NumElements);
5491 
5492  for(unsigned i = 0, e = E->getLength(); i != e; ++i)
5493  Elements.push_back(E->getCodeUnit(i));
5494  Elements.resize(NumElements);
5495  return llvm::ConstantDataArray::get(VMContext, Elements);
5496  }
5497 
5498  assert(ElemTy->getPrimitiveSizeInBits() == 32);
5499  SmallVector<uint32_t, 32> Elements;
5500  Elements.reserve(NumElements);
5501 
5502  for(unsigned i = 0, e = E->getLength(); i != e; ++i)
5503  Elements.push_back(E->getCodeUnit(i));
5504  Elements.resize(NumElements);
5505  return llvm::ConstantDataArray::get(VMContext, Elements);
5506 }
5507 
5508 static llvm::GlobalVariable *
5509 GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
5510  CodeGenModule &CGM, StringRef GlobalName,
5511  CharUnits Alignment) {
5512  unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
5514 
5515  llvm::Module &M = CGM.getModule();
5516  // Create a global variable for this string
5517  auto *GV = new llvm::GlobalVariable(
5518  M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
5519  nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
5520  GV->setAlignment(Alignment.getAsAlign());
5521  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
5522  if (GV->isWeakForLinker()) {
5523  assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
5524  GV->setComdat(M.getOrInsertComdat(GV->getName()));
5525  }
5526  CGM.setDSOLocal(GV);
5527 
5528  return GV;
5529 }
5530 
5531 /// GetAddrOfConstantStringFromLiteral - Return a pointer to a
5532 /// constant array for the given string literal.
5535  StringRef Name) {
5536  CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(S->getType());
5537 
5538  llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
5539  llvm::GlobalVariable **Entry = nullptr;
5540  if (!LangOpts.WritableStrings) {
5541  Entry = &ConstantStringMap[C];
5542  if (auto GV = *Entry) {
5543  if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
5544  GV->setAlignment(Alignment.getAsAlign());
5546  GV->getValueType(), Alignment);
5547  }
5548  }
5549 
5550  SmallString<256> MangledNameBuffer;
5551  StringRef GlobalVariableName;
5552  llvm::GlobalValue::LinkageTypes LT;
5553 
5554  // Mangle the string literal if that's how the ABI merges duplicate strings.
5555  // Don't do it if they are writable, since we don't want writes in one TU to
5556  // affect strings in another.
5557  if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) &&
5558  !LangOpts.WritableStrings) {
5559  llvm::raw_svector_ostream Out(MangledNameBuffer);
5561  LT = llvm::GlobalValue::LinkOnceODRLinkage;
5562  GlobalVariableName = MangledNameBuffer;
5563  } else {
5564  LT = llvm::GlobalValue::PrivateLinkage;
5565  GlobalVariableName = Name;
5566  }
5567 
5568  auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
5569  if (Entry)
5570  *Entry = GV;
5571 
5572  SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
5573  QualType());
5574 
5576  GV->getValueType(), Alignment);
5577 }
5578 
5579 /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
5580 /// array for the given ObjCEncodeExpr node.
5583  std::string Str;
5585 
5586  return GetAddrOfConstantCString(Str);
5587 }
5588 
5589 /// GetAddrOfConstantCString - Returns a pointer to a character array containing
5590 /// the literal and a terminating '\0' character.
5591 /// The result has pointer to array type.
5593  const std::string &Str, const char *GlobalName) {
5594  StringRef StrWithNull(Str.c_str(), Str.size() + 1);
5595  CharUnits Alignment =
5597 
5598  llvm::Constant *C =
5599  llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
5600 
5601  // Don't share any string literals if strings aren't constant.
5602  llvm::GlobalVariable **Entry = nullptr;
5603  if (!LangOpts.WritableStrings) {
5604  Entry = &ConstantStringMap[C];
5605  if (auto GV = *Entry) {
5606  if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
5607  GV->setAlignment(Alignment.getAsAlign());
5609  GV->getValueType(), Alignment);
5610  }
5611  }
5612 
5613  // Get the default prefix if a name wasn't specified.
5614  if (!GlobalName)
5615  GlobalName = ".str";
5616  // Create a global variable for this.
5617  auto GV = GenerateStringLiteral(C, llvm::GlobalValue::PrivateLinkage, *this,
5618  GlobalName, Alignment);
5619  if (Entry)
5620  *Entry = GV;
5621 
5623  GV->getValueType(), Alignment);
5624 }
5625 
5627  const MaterializeTemporaryExpr *E, const Expr *Init) {
5628  assert((E->getStorageDuration() == SD_Static ||
5629  E->getStorageDuration() == SD_Thread) && "not a global temporary");
5630  const auto *VD = cast<VarDecl>(E->getExtendingDecl());
5631 
5632  // If we're not materializing a subobject of the temporary, keep the
5633  // cv-qualifiers from the type of the MaterializeTemporaryExpr.
5634  QualType MaterializedType = Init->getType();