clang  9.0.0svn
ASTContext.cpp
Go to the documentation of this file.
1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ASTContext interface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/AST/ASTContext.h"
14 #include "CXXABI.h"
15 #include "clang/AST/APValue.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/AttrIterator.h"
20 #include "clang/AST/CharUnits.h"
21 #include "clang/AST/Comment.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclBase.h"
24 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/AST/DeclOpenMP.h"
28 #include "clang/AST/DeclTemplate.h"
30 #include "clang/AST/Expr.h"
31 #include "clang/AST/ExprCXX.h"
33 #include "clang/AST/Mangle.h"
37 #include "clang/AST/RecordLayout.h"
39 #include "clang/AST/Stmt.h"
40 #include "clang/AST/TemplateBase.h"
41 #include "clang/AST/TemplateName.h"
42 #include "clang/AST/Type.h"
43 #include "clang/AST/TypeLoc.h"
47 #include "clang/Basic/Builtins.h"
50 #include "clang/Basic/FixedPoint.h"
52 #include "clang/Basic/LLVM.h"
54 #include "clang/Basic/Linkage.h"
59 #include "clang/Basic/Specifiers.h"
61 #include "clang/Basic/TargetInfo.h"
62 #include "clang/Basic/XRayLists.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/APSInt.h"
65 #include "llvm/ADT/ArrayRef.h"
66 #include "llvm/ADT/DenseMap.h"
67 #include "llvm/ADT/DenseSet.h"
68 #include "llvm/ADT/FoldingSet.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/PointerUnion.h"
72 #include "llvm/ADT/STLExtras.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/StringExtras.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Triple.h"
78 #include "llvm/Support/Capacity.h"
79 #include "llvm/Support/Casting.h"
80 #include "llvm/Support/Compiler.h"
81 #include "llvm/Support/ErrorHandling.h"
82 #include "llvm/Support/MathExtras.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include <algorithm>
85 #include <cassert>
86 #include <cstddef>
87 #include <cstdint>
88 #include <cstdlib>
89 #include <map>
90 #include <memory>
91 #include <string>
92 #include <tuple>
93 #include <utility>
94 
95 using namespace clang;
96 
109 
112 };
113 
115  if (!CommentsLoaded && ExternalSource) {
116  ExternalSource->ReadComments();
117 
118 #ifndef NDEBUG
120  assert(std::is_sorted(RawComments.begin(), RawComments.end(),
121  BeforeThanCompare<RawComment>(SourceMgr)));
122 #endif
123 
124  CommentsLoaded = true;
125  }
126 
127  assert(D);
128 
129  // User can not attach documentation to implicit declarations.
130  if (D->isImplicit())
131  return nullptr;
132 
133  // User can not attach documentation to implicit instantiations.
134  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
135  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
136  return nullptr;
137  }
138 
139  if (const auto *VD = dyn_cast<VarDecl>(D)) {
140  if (VD->isStaticDataMember() &&
141  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
142  return nullptr;
143  }
144 
145  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
146  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
147  return nullptr;
148  }
149 
150  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
151  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
152  if (TSK == TSK_ImplicitInstantiation ||
153  TSK == TSK_Undeclared)
154  return nullptr;
155  }
156 
157  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
158  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
159  return nullptr;
160  }
161  if (const auto *TD = dyn_cast<TagDecl>(D)) {
162  // When tag declaration (but not definition!) is part of the
163  // decl-specifier-seq of some other declaration, it doesn't get comment
164  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
165  return nullptr;
166  }
167  // TODO: handle comments for function parameters properly.
168  if (isa<ParmVarDecl>(D))
169  return nullptr;
170 
171  // TODO: we could look up template parameter documentation in the template
172  // documentation.
173  if (isa<TemplateTypeParmDecl>(D) ||
174  isa<NonTypeTemplateParmDecl>(D) ||
175  isa<TemplateTemplateParmDecl>(D))
176  return nullptr;
177 
179 
180  // If there are no comments anywhere, we won't find anything.
181  if (RawComments.empty())
182  return nullptr;
183 
184  // Find declaration location.
185  // For Objective-C declarations we generally don't expect to have multiple
186  // declarators, thus use declaration starting location as the "declaration
187  // location".
188  // For all other declarations multiple declarators are used quite frequently,
189  // so we use the location of the identifier as the "declaration location".
190  SourceLocation DeclLoc;
191  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
192  isa<ObjCPropertyDecl>(D) ||
193  isa<RedeclarableTemplateDecl>(D) ||
194  isa<ClassTemplateSpecializationDecl>(D))
195  DeclLoc = D->getBeginLoc();
196  else {
197  DeclLoc = D->getLocation();
198  if (DeclLoc.isMacroID()) {
199  if (isa<TypedefDecl>(D)) {
200  // If location of the typedef name is in a macro, it is because being
201  // declared via a macro. Try using declaration's starting location as
202  // the "declaration location".
203  DeclLoc = D->getBeginLoc();
204  } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
205  // If location of the tag decl is inside a macro, but the spelling of
206  // the tag name comes from a macro argument, it looks like a special
207  // macro like NS_ENUM is being used to define the tag decl. In that
208  // case, adjust the source location to the expansion loc so that we can
209  // attach the comment to the tag decl.
210  if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
211  TD->isCompleteDefinition())
212  DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
213  }
214  }
215  }
216 
217  // If the declaration doesn't map directly to a location in a file, we
218  // can't find the comment.
219  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
220  return nullptr;
221 
222  // Find the comment that occurs just after this declaration.
224  {
225  // When searching for comments during parsing, the comment we are looking
226  // for is usually among the last two comments we parsed -- check them
227  // first.
228  RawComment CommentAtDeclLoc(
229  SourceMgr, SourceRange(DeclLoc), LangOpts.CommentOpts, false);
230  BeforeThanCompare<RawComment> Compare(SourceMgr);
231  ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
232  bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
233  if (!Found && RawComments.size() >= 2) {
234  MaybeBeforeDecl--;
235  Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
236  }
237 
238  if (Found) {
239  Comment = MaybeBeforeDecl + 1;
240  assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
241  &CommentAtDeclLoc, Compare));
242  } else {
243  // Slow path.
244  Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
245  &CommentAtDeclLoc, Compare);
246  }
247  }
248 
249  // Decompose the location for the declaration and find the beginning of the
250  // file buffer.
251  std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc);
252 
253  // First check whether we have a trailing comment.
254  if (Comment != RawComments.end() &&
255  ((*Comment)->isDocumentation() || LangOpts.CommentOpts.ParseAllComments)
256  && (*Comment)->isTrailingComment() &&
257  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
258  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
259  std::pair<FileID, unsigned> CommentBeginDecomp
260  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin());
261  // Check that Doxygen trailing comment comes after the declaration, starts
262  // on the same line and in the same file as the declaration.
263  if (DeclLocDecomp.first == CommentBeginDecomp.first &&
264  SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
265  == SourceMgr.getLineNumber(CommentBeginDecomp.first,
266  CommentBeginDecomp.second)) {
267  return *Comment;
268  }
269  }
270 
271  // The comment just after the declaration was not a trailing comment.
272  // Let's look at the previous comment.
273  if (Comment == RawComments.begin())
274  return nullptr;
275  --Comment;
276 
277  // Check that we actually have a non-member Doxygen comment.
278  if (!((*Comment)->isDocumentation() ||
279  LangOpts.CommentOpts.ParseAllComments) ||
280  (*Comment)->isTrailingComment())
281  return nullptr;
282 
283  // Decompose the end of the comment.
284  std::pair<FileID, unsigned> CommentEndDecomp
285  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd());
286 
287  // If the comment and the declaration aren't in the same file, then they
288  // aren't related.
289  if (DeclLocDecomp.first != CommentEndDecomp.first)
290  return nullptr;
291 
292  // Get the corresponding buffer.
293  bool Invalid = false;
294  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
295  &Invalid).data();
296  if (Invalid)
297  return nullptr;
298 
299  // Extract text between the comment and declaration.
300  StringRef Text(Buffer + CommentEndDecomp.second,
301  DeclLocDecomp.second - CommentEndDecomp.second);
302 
303  // There should be no other declarations or preprocessor directives between
304  // comment and declaration.
305  if (Text.find_first_of(";{}#@") != StringRef::npos)
306  return nullptr;
307 
308  return *Comment;
309 }
310 
311 /// If we have a 'templated' declaration for a template, adjust 'D' to
312 /// refer to the actual template.
313 /// If we have an implicit instantiation, adjust 'D' to refer to template.
314 static const Decl *adjustDeclToTemplate(const Decl *D) {
315  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
316  // Is this function declaration part of a function template?
317  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
318  return FTD;
319 
320  // Nothing to do if function is not an implicit instantiation.
321  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
322  return D;
323 
324  // Function is an implicit instantiation of a function template?
325  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
326  return FTD;
327 
328  // Function is instantiated from a member definition of a class template?
329  if (const FunctionDecl *MemberDecl =
331  return MemberDecl;
332 
333  return D;
334  }
335  if (const auto *VD = dyn_cast<VarDecl>(D)) {
336  // Static data member is instantiated from a member definition of a class
337  // template?
338  if (VD->isStaticDataMember())
339  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
340  return MemberDecl;
341 
342  return D;
343  }
344  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
345  // Is this class declaration part of a class template?
346  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
347  return CTD;
348 
349  // Class is an implicit instantiation of a class template or partial
350  // specialization?
351  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
352  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
353  return D;
354  llvm::PointerUnion<ClassTemplateDecl *,
356  PU = CTSD->getSpecializedTemplateOrPartial();
357  return PU.is<ClassTemplateDecl*>() ?
358  static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) :
359  static_cast<const Decl*>(
361  }
362 
363  // Class is instantiated from a member definition of a class template?
364  if (const MemberSpecializationInfo *Info =
365  CRD->getMemberSpecializationInfo())
366  return Info->getInstantiatedFrom();
367 
368  return D;
369  }
370  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
371  // Enum is instantiated from a member definition of a class template?
372  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
373  return MemberDecl;
374 
375  return D;
376  }
377  // FIXME: Adjust alias templates?
378  return D;
379 }
380 
382  const Decl *D,
383  const Decl **OriginalDecl) const {
384  D = adjustDeclToTemplate(D);
385 
386  // Check whether we have cached a comment for this declaration already.
387  {
388  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
389  RedeclComments.find(D);
390  if (Pos != RedeclComments.end()) {
391  const RawCommentAndCacheFlags &Raw = Pos->second;
393  if (OriginalDecl)
394  *OriginalDecl = Raw.getOriginalDecl();
395  return Raw.getRaw();
396  }
397  }
398  }
399 
400  // Search for comments attached to declarations in the redeclaration chain.
401  const RawComment *RC = nullptr;
402  const Decl *OriginalDeclForRC = nullptr;
403  for (auto I : D->redecls()) {
404  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
405  RedeclComments.find(I);
406  if (Pos != RedeclComments.end()) {
407  const RawCommentAndCacheFlags &Raw = Pos->second;
409  RC = Raw.getRaw();
410  OriginalDeclForRC = Raw.getOriginalDecl();
411  break;
412  }
413  } else {
415  OriginalDeclForRC = I;
417  if (RC) {
418  // Call order swapped to work around ICE in VS2015 RTM (Release Win32)
419  // https://connect.microsoft.com/VisualStudio/feedback/details/1741530
421  Raw.setRaw(RC);
422  } else
424  Raw.setOriginalDecl(I);
425  RedeclComments[I] = Raw;
426  if (RC)
427  break;
428  }
429  }
430 
431  // If we found a comment, it should be a documentation comment.
432  assert(!RC || RC->isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
433 
434  if (OriginalDecl)
435  *OriginalDecl = OriginalDeclForRC;
436 
437  // Update cache for every declaration in the redeclaration chain.
439  Raw.setRaw(RC);
441  Raw.setOriginalDecl(OriginalDeclForRC);
442 
443  for (auto I : D->redecls()) {
446  R = Raw;
447  }
448 
449  return RC;
450 }
451 
452 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
454  const DeclContext *DC = ObjCMethod->getDeclContext();
455  if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
456  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
457  if (!ID)
458  return;
459  // Add redeclared method here.
460  for (const auto *Ext : ID->known_extensions()) {
461  if (ObjCMethodDecl *RedeclaredMethod =
462  Ext->getMethod(ObjCMethod->getSelector(),
463  ObjCMethod->isInstanceMethod()))
464  Redeclared.push_back(RedeclaredMethod);
465  }
466  }
467 }
468 
470  const Decl *D) const {
471  auto *ThisDeclInfo = new (*this) comments::DeclInfo;
472  ThisDeclInfo->CommentDecl = D;
473  ThisDeclInfo->IsFilled = false;
474  ThisDeclInfo->fill();
475  ThisDeclInfo->CommentDecl = FC->getDecl();
476  if (!ThisDeclInfo->TemplateParameters)
477  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
478  comments::FullComment *CFC =
479  new (*this) comments::FullComment(FC->getBlocks(),
480  ThisDeclInfo);
481  return CFC;
482 }
483 
486  return RC ? RC->parse(*this, nullptr, D) : nullptr;
487 }
488 
490  const Decl *D,
491  const Preprocessor *PP) const {
492  if (D->isInvalidDecl())
493  return nullptr;
494  D = adjustDeclToTemplate(D);
495 
496  const Decl *Canonical = D->getCanonicalDecl();
497  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
498  ParsedComments.find(Canonical);
499 
500  if (Pos != ParsedComments.end()) {
501  if (Canonical != D) {
502  comments::FullComment *FC = Pos->second;
504  return CFC;
505  }
506  return Pos->second;
507  }
508 
509  const Decl *OriginalDecl;
510 
511  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
512  if (!RC) {
513  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
515  const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
516  if (OMD && OMD->isPropertyAccessor())
517  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
518  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
519  return cloneFullComment(FC, D);
520  if (OMD)
521  addRedeclaredMethods(OMD, Overridden);
522  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
523  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
524  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
525  return cloneFullComment(FC, D);
526  }
527  else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
528  // Attach any tag type's documentation to its typedef if latter
529  // does not have one of its own.
530  QualType QT = TD->getUnderlyingType();
531  if (const auto *TT = QT->getAs<TagType>())
532  if (const Decl *TD = TT->getDecl())
533  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
534  return cloneFullComment(FC, D);
535  }
536  else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
537  while (IC->getSuperClass()) {
538  IC = IC->getSuperClass();
539  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
540  return cloneFullComment(FC, D);
541  }
542  }
543  else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
544  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
545  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
546  return cloneFullComment(FC, D);
547  }
548  else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
549  if (!(RD = RD->getDefinition()))
550  return nullptr;
551  // Check non-virtual bases.
552  for (const auto &I : RD->bases()) {
553  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
554  continue;
555  QualType Ty = I.getType();
556  if (Ty.isNull())
557  continue;
558  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
559  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
560  continue;
561 
562  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
563  return cloneFullComment(FC, D);
564  }
565  }
566  // Check virtual bases.
567  for (const auto &I : RD->vbases()) {
568  if (I.getAccessSpecifier() != AS_public)
569  continue;
570  QualType Ty = I.getType();
571  if (Ty.isNull())
572  continue;
573  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
574  if (!(VirtualBase= VirtualBase->getDefinition()))
575  continue;
576  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
577  return cloneFullComment(FC, D);
578  }
579  }
580  }
581  return nullptr;
582  }
583 
584  // If the RawComment was attached to other redeclaration of this Decl, we
585  // should parse the comment in context of that other Decl. This is important
586  // because comments can contain references to parameter names which can be
587  // different across redeclarations.
588  if (D != OriginalDecl)
589  return getCommentForDecl(OriginalDecl, PP);
590 
591  comments::FullComment *FC = RC->parse(*this, PP, D);
592  ParsedComments[Canonical] = FC;
593  return FC;
594 }
595 
596 void
597 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
598  TemplateTemplateParmDecl *Parm) {
599  ID.AddInteger(Parm->getDepth());
600  ID.AddInteger(Parm->getPosition());
601  ID.AddBoolean(Parm->isParameterPack());
602 
604  ID.AddInteger(Params->size());
606  PEnd = Params->end();
607  P != PEnd; ++P) {
608  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
609  ID.AddInteger(0);
610  ID.AddBoolean(TTP->isParameterPack());
611  continue;
612  }
613 
614  if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
615  ID.AddInteger(1);
616  ID.AddBoolean(NTTP->isParameterPack());
617  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
618  if (NTTP->isExpandedParameterPack()) {
619  ID.AddBoolean(true);
620  ID.AddInteger(NTTP->getNumExpansionTypes());
621  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
622  QualType T = NTTP->getExpansionType(I);
623  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
624  }
625  } else
626  ID.AddBoolean(false);
627  continue;
628  }
629 
630  auto *TTP = cast<TemplateTemplateParmDecl>(*P);
631  ID.AddInteger(2);
632  Profile(ID, TTP);
633  }
634 }
635 
637 ASTContext::getCanonicalTemplateTemplateParmDecl(
638  TemplateTemplateParmDecl *TTP) const {
639  // Check if we already have a canonical template template parameter.
640  llvm::FoldingSetNodeID ID;
641  CanonicalTemplateTemplateParm::Profile(ID, TTP);
642  void *InsertPos = nullptr;
643  CanonicalTemplateTemplateParm *Canonical
644  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
645  if (Canonical)
646  return Canonical->getParam();
647 
648  // Build a canonical template parameter list.
650  SmallVector<NamedDecl *, 4> CanonParams;
651  CanonParams.reserve(Params->size());
653  PEnd = Params->end();
654  P != PEnd; ++P) {
655  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
656  CanonParams.push_back(
658  SourceLocation(),
659  SourceLocation(),
660  TTP->getDepth(),
661  TTP->getIndex(), nullptr, false,
662  TTP->isParameterPack()));
663  else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
664  QualType T = getCanonicalType(NTTP->getType());
667  if (NTTP->isExpandedParameterPack()) {
668  SmallVector<QualType, 2> ExpandedTypes;
669  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
670  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
671  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
672  ExpandedTInfos.push_back(
673  getTrivialTypeSourceInfo(ExpandedTypes.back()));
674  }
675 
677  SourceLocation(),
678  SourceLocation(),
679  NTTP->getDepth(),
680  NTTP->getPosition(), nullptr,
681  T,
682  TInfo,
683  ExpandedTypes,
684  ExpandedTInfos);
685  } else {
687  SourceLocation(),
688  SourceLocation(),
689  NTTP->getDepth(),
690  NTTP->getPosition(), nullptr,
691  T,
692  NTTP->isParameterPack(),
693  TInfo);
694  }
695  CanonParams.push_back(Param);
696 
697  } else
698  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
699  cast<TemplateTemplateParmDecl>(*P)));
700  }
701 
702  assert(!TTP->getRequiresClause() &&
703  "Unexpected requires-clause on template template-parameter");
704  Expr *const CanonRequiresClause = nullptr;
705 
706  TemplateTemplateParmDecl *CanonTTP
708  SourceLocation(), TTP->getDepth(),
709  TTP->getPosition(),
710  TTP->isParameterPack(),
711  nullptr,
713  SourceLocation(),
714  CanonParams,
715  SourceLocation(),
716  CanonRequiresClause));
717 
718  // Get the new insert position for the node we care about.
719  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
720  assert(!Canonical && "Shouldn't be in the map!");
721  (void)Canonical;
722 
723  // Create the canonical template template parameter entry.
724  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
725  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
726  return CanonTTP;
727 }
728 
729 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
730  if (!LangOpts.CPlusPlus) return nullptr;
731 
732  switch (T.getCXXABI().getKind()) {
733  case TargetCXXABI::GenericARM: // Same as Itanium at this level
734  case TargetCXXABI::iOS:
735  case TargetCXXABI::iOS64:
741  return CreateItaniumCXXABI(*this);
743  return CreateMicrosoftCXXABI(*this);
744  }
745  llvm_unreachable("Invalid CXXABI type!");
746 }
747 
748 static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
749  const LangOptions &LOpts) {
750  if (LOpts.FakeAddressSpaceMap) {
751  // The fake address space map must have a distinct entry for each
752  // language-specific address space.
753  static const unsigned FakeAddrSpaceMap[] = {
754  0, // Default
755  1, // opencl_global
756  3, // opencl_local
757  2, // opencl_constant
758  0, // opencl_private
759  4, // opencl_generic
760  5, // cuda_device
761  6, // cuda_constant
762  7 // cuda_shared
763  };
764  return &FakeAddrSpaceMap;
765  } else {
766  return &T.getAddressSpaceMap();
767  }
768 }
769 
771  const LangOptions &LangOpts) {
772  switch (LangOpts.getAddressSpaceMapMangling()) {
774  return TI.useAddressSpaceMapMangling();
776  return true;
778  return false;
779  }
780  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
781 }
782 
784  IdentifierTable &idents, SelectorTable &sels,
785  Builtin::Context &builtins)
786  : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
787  DependentTemplateSpecializationTypes(this_()),
788  SubstTemplateTemplateParmPacks(this_()), SourceMgr(SM), LangOpts(LOpts),
789  SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
790  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
791  LangOpts.XRayNeverInstrumentFiles,
792  LangOpts.XRayAttrListFiles, SM)),
793  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
794  BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
795  CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
796  CompCategories(this_()), LastSDM(nullptr, 0) {
797  TUDecl = TranslationUnitDecl::Create(*this);
798  TraversalScope = {TUDecl};
799 }
800 
802  // Release the DenseMaps associated with DeclContext objects.
803  // FIXME: Is this the ideal solution?
804  ReleaseDeclContextMaps();
805 
806  // Call all of the deallocation functions on all of their targets.
807  for (auto &Pair : Deallocations)
808  (Pair.first)(Pair.second);
809 
810  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
811  // because they can contain DenseMaps.
812  for (llvm::DenseMap<const ObjCContainerDecl*,
813  const ASTRecordLayout*>::iterator
814  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
815  // Increment in loop to prevent using deallocated memory.
816  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
817  R->Destroy(*this);
818 
819  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
820  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
821  // Increment in loop to prevent using deallocated memory.
822  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
823  R->Destroy(*this);
824  }
825 
826  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
827  AEnd = DeclAttrs.end();
828  A != AEnd; ++A)
829  A->second->~AttrVec();
830 
831  for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
832  MaterializedTemporaryValues)
833  MTVPair.second->~APValue();
834 
835  for (const auto &Value : ModuleInitializers)
836  Value.second->~PerModuleInitializers();
837 }
838 
840  /// Contains parents of a node.
842 
843  /// Maps from a node to its parents. This is used for nodes that have
844  /// pointer identity only, which are more common and we can save space by
845  /// only storing a unique pointer to them.
846  using ParentMapPointers = llvm::DenseMap<
847  const void *,
848  llvm::PointerUnion4<const Decl *, const Stmt *,
850 
851  /// Parent map for nodes without pointer identity. We store a full
852  /// DynTypedNode for all keys.
853  using ParentMapOtherNodes = llvm::DenseMap<
855  llvm::PointerUnion4<const Decl *, const Stmt *,
856  ast_type_traits::DynTypedNode *, ParentVector *>>;
857 
858  ParentMapPointers PointerParents;
859  ParentMapOtherNodes OtherParents;
860  class ASTVisitor;
861 
862  static ast_type_traits::DynTypedNode
863  getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
864  if (const auto *D = U.dyn_cast<const Decl *>())
866  if (const auto *S = U.dyn_cast<const Stmt *>())
868  return *U.get<ast_type_traits::DynTypedNode *>();
869  }
870 
871  template <typename NodeTy, typename MapTy>
872  static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
873  const MapTy &Map) {
874  auto I = Map.find(Node);
875  if (I == Map.end()) {
877  }
878  if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
879  return llvm::makeArrayRef(*V);
880  }
881  return getSingleDynTypedNodeFromParentMap(I->second);
882  }
883 
884 public:
885  ParentMap(ASTContext &Ctx);
887  for (const auto &Entry : PointerParents) {
888  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
889  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
890  } else if (Entry.second.is<ParentVector *>()) {
891  delete Entry.second.get<ParentVector *>();
892  }
893  }
894  for (const auto &Entry : OtherParents) {
895  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
896  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
897  } else if (Entry.second.is<ParentVector *>()) {
898  delete Entry.second.get<ParentVector *>();
899  }
900  }
901  }
902 
903  DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
904  if (Node.getNodeKind().hasPointerIdentity())
905  return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
906  return getDynNodeFromMap(Node, OtherParents);
907  }
908 };
909 
910 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
911  TraversalScope = TopLevelDecls;
912  Parents.reset();
913 }
914 
915 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
916  Deallocations.push_back({Callback, Data});
917 }
918 
919 void
921  ExternalSource = std::move(Source);
922 }
923 
925  llvm::errs() << "\n*** AST Context Stats:\n";
926  llvm::errs() << " " << Types.size() << " types total.\n";
927 
928  unsigned counts[] = {
929 #define TYPE(Name, Parent) 0,
930 #define ABSTRACT_TYPE(Name, Parent)
931 #include "clang/AST/TypeNodes.def"
932  0 // Extra
933  };
934 
935  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
936  Type *T = Types[i];
937  counts[(unsigned)T->getTypeClass()]++;
938  }
939 
940  unsigned Idx = 0;
941  unsigned TotalBytes = 0;
942 #define TYPE(Name, Parent) \
943  if (counts[Idx]) \
944  llvm::errs() << " " << counts[Idx] << " " << #Name \
945  << " types, " << sizeof(Name##Type) << " each " \
946  << "(" << counts[Idx] * sizeof(Name##Type) \
947  << " bytes)\n"; \
948  TotalBytes += counts[Idx] * sizeof(Name##Type); \
949  ++Idx;
950 #define ABSTRACT_TYPE(Name, Parent)
951 #include "clang/AST/TypeNodes.def"
952 
953  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
954 
955  // Implicit special member functions.
956  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
958  << " implicit default constructors created\n";
959  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
961  << " implicit copy constructors created\n";
962  if (getLangOpts().CPlusPlus)
963  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
965  << " implicit move constructors created\n";
966  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
968  << " implicit copy assignment operators created\n";
969  if (getLangOpts().CPlusPlus)
970  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
972  << " implicit move assignment operators created\n";
973  llvm::errs() << NumImplicitDestructorsDeclared << "/"
975  << " implicit destructors created\n";
976 
977  if (ExternalSource) {
978  llvm::errs() << "\n";
979  ExternalSource->PrintStats();
980  }
981 
982  BumpAlloc.PrintStats();
983 }
984 
986  bool NotifyListeners) {
987  if (NotifyListeners)
988  if (auto *Listener = getASTMutationListener())
990 
991  MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
992 }
993 
995  auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
996  if (It == MergedDefModules.end())
997  return;
998 
999  auto &Merged = It->second;
1001  for (Module *&M : Merged)
1002  if (!Found.insert(M).second)
1003  M = nullptr;
1004  Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
1005 }
1006 
1007 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1008  if (LazyInitializers.empty())
1009  return;
1010 
1011  auto *Source = Ctx.getExternalSource();
1012  assert(Source && "lazy initializers but no external source");
1013 
1014  auto LazyInits = std::move(LazyInitializers);
1015  LazyInitializers.clear();
1016 
1017  for (auto ID : LazyInits)
1018  Initializers.push_back(Source->GetExternalDecl(ID));
1019 
1020  assert(LazyInitializers.empty() &&
1021  "GetExternalDecl for lazy module initializer added more inits");
1022 }
1023 
1025  // One special case: if we add a module initializer that imports another
1026  // module, and that module's only initializer is an ImportDecl, simplify.
1027  if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1028  auto It = ModuleInitializers.find(ID->getImportedModule());
1029 
1030  // Maybe the ImportDecl does nothing at all. (Common case.)
1031  if (It == ModuleInitializers.end())
1032  return;
1033 
1034  // Maybe the ImportDecl only imports another ImportDecl.
1035  auto &Imported = *It->second;
1036  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1037  Imported.resolve(*this);
1038  auto *OnlyDecl = Imported.Initializers.front();
1039  if (isa<ImportDecl>(OnlyDecl))
1040  D = OnlyDecl;
1041  }
1042  }
1043 
1044  auto *&Inits = ModuleInitializers[M];
1045  if (!Inits)
1046  Inits = new (*this) PerModuleInitializers;
1047  Inits->Initializers.push_back(D);
1048 }
1049 
1051  auto *&Inits = ModuleInitializers[M];
1052  if (!Inits)
1053  Inits = new (*this) PerModuleInitializers;
1054  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1055  IDs.begin(), IDs.end());
1056 }
1057 
1059  auto It = ModuleInitializers.find(M);
1060  if (It == ModuleInitializers.end())
1061  return None;
1062 
1063  auto *Inits = It->second;
1064  Inits->resolve(*this);
1065  return Inits->Initializers;
1066 }
1067 
1069  if (!ExternCContext)
1070  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1071 
1072  return ExternCContext;
1073 }
1074 
1077  const IdentifierInfo *II) const {
1078  auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
1079  BuiltinTemplate->setImplicit();
1080  TUDecl->addDecl(BuiltinTemplate);
1081 
1082  return BuiltinTemplate;
1083 }
1084 
1087  if (!MakeIntegerSeqDecl)
1088  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1090  return MakeIntegerSeqDecl;
1091 }
1092 
1095  if (!TypePackElementDecl)
1096  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1098  return TypePackElementDecl;
1099 }
1100 
1102  RecordDecl::TagKind TK) const {
1103  SourceLocation Loc;
1104  RecordDecl *NewDecl;
1105  if (getLangOpts().CPlusPlus)
1106  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1107  Loc, &Idents.get(Name));
1108  else
1109  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1110  &Idents.get(Name));
1111  NewDecl->setImplicit();
1112  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1113  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1114  return NewDecl;
1115 }
1116 
1118  StringRef Name) const {
1120  TypedefDecl *NewDecl = TypedefDecl::Create(
1121  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1122  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1123  NewDecl->setImplicit();
1124  return NewDecl;
1125 }
1126 
1128  if (!Int128Decl)
1129  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1130  return Int128Decl;
1131 }
1132 
1134  if (!UInt128Decl)
1135  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1136  return UInt128Decl;
1137 }
1138 
1139 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1140  auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1141  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1142  Types.push_back(Ty);
1143 }
1144 
1146  const TargetInfo *AuxTarget) {
1147  assert((!this->Target || this->Target == &Target) &&
1148  "Incorrect target reinitialization");
1149  assert(VoidTy.isNull() && "Context reinitialized?");
1150 
1151  this->Target = &Target;
1152  this->AuxTarget = AuxTarget;
1153 
1154  ABI.reset(createCXXABI(Target));
1155  AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1156  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1157 
1158  // C99 6.2.5p19.
1159  InitBuiltinType(VoidTy, BuiltinType::Void);
1160 
1161  // C99 6.2.5p2.
1162  InitBuiltinType(BoolTy, BuiltinType::Bool);
1163  // C99 6.2.5p3.
1164  if (LangOpts.CharIsSigned)
1165  InitBuiltinType(CharTy, BuiltinType::Char_S);
1166  else
1167  InitBuiltinType(CharTy, BuiltinType::Char_U);
1168  // C99 6.2.5p4.
1169  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1170  InitBuiltinType(ShortTy, BuiltinType::Short);
1171  InitBuiltinType(IntTy, BuiltinType::Int);
1172  InitBuiltinType(LongTy, BuiltinType::Long);
1173  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1174 
1175  // C99 6.2.5p6.
1176  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1177  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1178  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1179  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1180  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1181 
1182  // C99 6.2.5p10.
1183  InitBuiltinType(FloatTy, BuiltinType::Float);
1184  InitBuiltinType(DoubleTy, BuiltinType::Double);
1185  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1186 
1187  // GNU extension, __float128 for IEEE quadruple precision
1188  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1189 
1190  // C11 extension ISO/IEC TS 18661-3
1191  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1192 
1193  // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1194  InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1195  InitBuiltinType(AccumTy, BuiltinType::Accum);
1196  InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1197  InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1198  InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1199  InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1200  InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1201  InitBuiltinType(FractTy, BuiltinType::Fract);
1202  InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1203  InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1204  InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1205  InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1206  InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1207  InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1208  InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1209  InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1210  InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1211  InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1212  InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1213  InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1214  InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1215  InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1216  InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1217  InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1218 
1219  // GNU extension, 128-bit integers.
1220  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1221  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1222 
1223  // C++ 3.9.1p5
1224  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1225  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1226  else // -fshort-wchar makes wchar_t be unsigned.
1227  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1228  if (LangOpts.CPlusPlus && LangOpts.WChar)
1229  WideCharTy = WCharTy;
1230  else {
1231  // C99 (or C++ using -fno-wchar).
1232  WideCharTy = getFromTargetType(Target.getWCharType());
1233  }
1234 
1235  WIntTy = getFromTargetType(Target.getWIntType());
1236 
1237  // C++20 (proposed)
1238  InitBuiltinType(Char8Ty, BuiltinType::Char8);
1239 
1240  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1241  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1242  else // C99
1243  Char16Ty = getFromTargetType(Target.getChar16Type());
1244 
1245  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1246  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1247  else // C99
1248  Char32Ty = getFromTargetType(Target.getChar32Type());
1249 
1250  // Placeholder type for type-dependent expressions whose type is
1251  // completely unknown. No code should ever check a type against
1252  // DependentTy and users should never see it; however, it is here to
1253  // help diagnose failures to properly check for type-dependent
1254  // expressions.
1255  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1256 
1257  // Placeholder type for functions.
1258  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1259 
1260  // Placeholder type for bound members.
1261  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1262 
1263  // Placeholder type for pseudo-objects.
1264  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1265 
1266  // "any" type; useful for debugger-like clients.
1267  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1268 
1269  // Placeholder type for unbridged ARC casts.
1270  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1271 
1272  // Placeholder type for builtin functions.
1273  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1274 
1275  // Placeholder type for OMP array sections.
1276  if (LangOpts.OpenMP)
1277  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1278 
1279  // C99 6.2.5p11.
1284 
1285  // Builtin types for 'id', 'Class', and 'SEL'.
1286  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1287  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1288  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1289 
1290  if (LangOpts.OpenCL) {
1291 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1292  InitBuiltinType(SingletonId, BuiltinType::Id);
1293 #include "clang/Basic/OpenCLImageTypes.def"
1294 
1295  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1296  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1297  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1298  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1299  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1300 
1301 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1302  InitBuiltinType(Id##Ty, BuiltinType::Id);
1303 #include "clang/Basic/OpenCLExtensionTypes.def"
1304  }
1305 
1306  // Builtin type for __objc_yes and __objc_no
1307  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1308  SignedCharTy : BoolTy);
1309 
1310  ObjCConstantStringType = QualType();
1311 
1312  ObjCSuperType = QualType();
1313 
1314  // void * type
1315  if (LangOpts.OpenCLVersion >= 200) {
1316  auto Q = VoidTy.getQualifiers();
1320  } else {
1322  }
1323 
1324  // nullptr type (C++0x 2.14.7)
1325  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1326 
1327  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1328  InitBuiltinType(HalfTy, BuiltinType::Half);
1329 
1330  // Builtin type used to help define __builtin_va_list.
1331  VaListTagDecl = nullptr;
1332 }
1333 
1335  return SourceMgr.getDiagnostics();
1336 }
1337 
1339  AttrVec *&Result = DeclAttrs[D];
1340  if (!Result) {
1341  void *Mem = Allocate(sizeof(AttrVec));
1342  Result = new (Mem) AttrVec;
1343  }
1344 
1345  return *Result;
1346 }
1347 
1348 /// Erase the attributes corresponding to the given declaration.
1350  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1351  if (Pos != DeclAttrs.end()) {
1352  Pos->second->~AttrVec();
1353  DeclAttrs.erase(Pos);
1354  }
1355 }
1356 
1357 // FIXME: Remove ?
1360  assert(Var->isStaticDataMember() && "Not a static data member");
1362  .dyn_cast<MemberSpecializationInfo *>();
1363 }
1364 
1367  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1368  TemplateOrInstantiation.find(Var);
1369  if (Pos == TemplateOrInstantiation.end())
1370  return {};
1371 
1372  return Pos->second;
1373 }
1374 
1375 void
1378  SourceLocation PointOfInstantiation) {
1379  assert(Inst->isStaticDataMember() && "Not a static data member");
1380  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1382  Tmpl, TSK, PointOfInstantiation));
1383 }
1384 
1385 void
1388  assert(!TemplateOrInstantiation[Inst] &&
1389  "Already noted what the variable was instantiated from");
1390  TemplateOrInstantiation[Inst] = TSI;
1391 }
1392 
1394  const FunctionDecl *FD){
1395  assert(FD && "Specialization is 0");
1396  llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
1397  = ClassScopeSpecializationPattern.find(FD);
1398  if (Pos == ClassScopeSpecializationPattern.end())
1399  return nullptr;
1400 
1401  return Pos->second;
1402 }
1403 
1405  FunctionDecl *Pattern) {
1406  assert(FD && "Specialization is 0");
1407  assert(Pattern && "Class scope specialization pattern is 0");
1408  ClassScopeSpecializationPattern[FD] = Pattern;
1409 }
1410 
1411 NamedDecl *
1413  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1414  if (Pos == InstantiatedFromUsingDecl.end())
1415  return nullptr;
1416 
1417  return Pos->second;
1418 }
1419 
1420 void
1422  assert((isa<UsingDecl>(Pattern) ||
1423  isa<UnresolvedUsingValueDecl>(Pattern) ||
1424  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1425  "pattern decl is not a using decl");
1426  assert((isa<UsingDecl>(Inst) ||
1427  isa<UnresolvedUsingValueDecl>(Inst) ||
1428  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1429  "instantiation did not produce a using decl");
1430  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1431  InstantiatedFromUsingDecl[Inst] = Pattern;
1432 }
1433 
1436  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1437  = InstantiatedFromUsingShadowDecl.find(Inst);
1438  if (Pos == InstantiatedFromUsingShadowDecl.end())
1439  return nullptr;
1440 
1441  return Pos->second;
1442 }
1443 
1444 void
1446  UsingShadowDecl *Pattern) {
1447  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1448  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1449 }
1450 
1452  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1453  = InstantiatedFromUnnamedFieldDecl.find(Field);
1454  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1455  return nullptr;
1456 
1457  return Pos->second;
1458 }
1459 
1461  FieldDecl *Tmpl) {
1462  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1463  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1464  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1465  "Already noted what unnamed field was instantiated from");
1466 
1467  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1468 }
1469 
1472  return overridden_methods(Method).begin();
1473 }
1474 
1477  return overridden_methods(Method).end();
1478 }
1479 
1480 unsigned
1482  auto Range = overridden_methods(Method);
1483  return Range.end() - Range.begin();
1484 }
1485 
1488  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1489  OverriddenMethods.find(Method->getCanonicalDecl());
1490  if (Pos == OverriddenMethods.end())
1491  return overridden_method_range(nullptr, nullptr);
1492  return overridden_method_range(Pos->second.begin(), Pos->second.end());
1493 }
1494 
1496  const CXXMethodDecl *Overridden) {
1497  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1498  OverriddenMethods[Method].push_back(Overridden);
1499 }
1500 
1502  const NamedDecl *D,
1503  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1504  assert(D);
1505 
1506  if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1507  Overridden.append(overridden_methods_begin(CXXMethod),
1508  overridden_methods_end(CXXMethod));
1509  return;
1510  }
1511 
1512  const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1513  if (!Method)
1514  return;
1515 
1517  Method->getOverriddenMethods(OverDecls);
1518  Overridden.append(OverDecls.begin(), OverDecls.end());
1519 }
1520 
1522  assert(!Import->NextLocalImport && "Import declaration already in the chain");
1523  assert(!Import->isFromASTFile() && "Non-local import declaration");
1524  if (!FirstLocalImport) {
1525  FirstLocalImport = Import;
1526  LastLocalImport = Import;
1527  return;
1528  }
1529 
1530  LastLocalImport->NextLocalImport = Import;
1531  LastLocalImport = Import;
1532 }
1533 
1534 //===----------------------------------------------------------------------===//
1535 // Type Sizing and Analysis
1536 //===----------------------------------------------------------------------===//
1537 
1538 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1539 /// scalar floating point type.
1540 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1541  const auto *BT = T->getAs<BuiltinType>();
1542  assert(BT && "Not a floating point type!");
1543  switch (BT->getKind()) {
1544  default: llvm_unreachable("Not a floating point type!");
1545  case BuiltinType::Float16:
1546  case BuiltinType::Half:
1547  return Target->getHalfFormat();
1548  case BuiltinType::Float: return Target->getFloatFormat();
1549  case BuiltinType::Double: return Target->getDoubleFormat();
1550  case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
1551  case BuiltinType::Float128: return Target->getFloat128Format();
1552  }
1553 }
1554 
1555 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1556  unsigned Align = Target->getCharWidth();
1557 
1558  bool UseAlignAttrOnly = false;
1559  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1560  Align = AlignFromAttr;
1561 
1562  // __attribute__((aligned)) can increase or decrease alignment
1563  // *except* on a struct or struct member, where it only increases
1564  // alignment unless 'packed' is also specified.
1565  //
1566  // It is an error for alignas to decrease alignment, so we can
1567  // ignore that possibility; Sema should diagnose it.
1568  if (isa<FieldDecl>(D)) {
1569  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1570  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1571  } else {
1572  UseAlignAttrOnly = true;
1573  }
1574  }
1575  else if (isa<FieldDecl>(D))
1576  UseAlignAttrOnly =
1577  D->hasAttr<PackedAttr>() ||
1578  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1579 
1580  // If we're using the align attribute only, just ignore everything
1581  // else about the declaration and its type.
1582  if (UseAlignAttrOnly) {
1583  // do nothing
1584  } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1585  QualType T = VD->getType();
1586  if (const auto *RT = T->getAs<ReferenceType>()) {
1587  if (ForAlignof)
1588  T = RT->getPointeeType();
1589  else
1590  T = getPointerType(RT->getPointeeType());
1591  }
1592  QualType BaseT = getBaseElementType(T);
1593  if (T->isFunctionType())
1594  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1595  else if (!BaseT->isIncompleteType()) {
1596  // Adjust alignments of declarations with array type by the
1597  // large-array alignment on the target.
1598  if (const ArrayType *arrayType = getAsArrayType(T)) {
1599  unsigned MinWidth = Target->getLargeArrayMinWidth();
1600  if (!ForAlignof && MinWidth) {
1601  if (isa<VariableArrayType>(arrayType))
1602  Align = std::max(Align, Target->getLargeArrayAlign());
1603  else if (isa<ConstantArrayType>(arrayType) &&
1604  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1605  Align = std::max(Align, Target->getLargeArrayAlign());
1606  }
1607  }
1608  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1609  if (BaseT.getQualifiers().hasUnaligned())
1610  Align = Target->getCharWidth();
1611  if (const auto *VD = dyn_cast<VarDecl>(D)) {
1612  if (VD->hasGlobalStorage() && !ForAlignof)
1613  Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
1614  }
1615  }
1616 
1617  // Fields can be subject to extra alignment constraints, like if
1618  // the field is packed, the struct is packed, or the struct has a
1619  // a max-field-alignment constraint (#pragma pack). So calculate
1620  // the actual alignment of the field within the struct, and then
1621  // (as we're expected to) constrain that by the alignment of the type.
1622  if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1623  const RecordDecl *Parent = Field->getParent();
1624  // We can only produce a sensible answer if the record is valid.
1625  if (!Parent->isInvalidDecl()) {
1626  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1627 
1628  // Start with the record's overall alignment.
1629  unsigned FieldAlign = toBits(Layout.getAlignment());
1630 
1631  // Use the GCD of that and the offset within the record.
1632  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1633  if (Offset > 0) {
1634  // Alignment is always a power of 2, so the GCD will be a power of 2,
1635  // which means we get to do this crazy thing instead of Euclid's.
1636  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1637  if (LowBitOfOffset < FieldAlign)
1638  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1639  }
1640 
1641  Align = std::min(Align, FieldAlign);
1642  }
1643  }
1644  }
1645 
1646  return toCharUnitsFromBits(Align);
1647 }
1648 
1649 // getTypeInfoDataSizeInChars - Return the size of a type, in
1650 // chars. If the type is a record, its data size is returned. This is
1651 // the size of the memcpy that's performed when assigning this type
1652 // using a trivial copy/move assignment operator.
1653 std::pair<CharUnits, CharUnits>
1655  std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
1656 
1657  // In C++, objects can sometimes be allocated into the tail padding
1658  // of a base-class subobject. We decide whether that's possible
1659  // during class layout, so here we can just trust the layout results.
1660  if (getLangOpts().CPlusPlus) {
1661  if (const auto *RT = T->getAs<RecordType>()) {
1662  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1663  sizeAndAlign.first = layout.getDataSize();
1664  }
1665  }
1666 
1667  return sizeAndAlign;
1668 }
1669 
1670 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1671 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1672 std::pair<CharUnits, CharUnits>
1674  const ConstantArrayType *CAT) {
1675  std::pair<CharUnits, CharUnits> EltInfo =
1676  Context.getTypeInfoInChars(CAT->getElementType());
1677  uint64_t Size = CAT->getSize().getZExtValue();
1678  assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
1679  (uint64_t)(-1)/Size) &&
1680  "Overflow in array type char size evaluation");
1681  uint64_t Width = EltInfo.first.getQuantity() * Size;
1682  unsigned Align = EltInfo.second.getQuantity();
1683  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1684  Context.getTargetInfo().getPointerWidth(0) == 64)
1685  Width = llvm::alignTo(Width, Align);
1686  return std::make_pair(CharUnits::fromQuantity(Width),
1687  CharUnits::fromQuantity(Align));
1688 }
1689 
1690 std::pair<CharUnits, CharUnits>
1692  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1693  return getConstantArrayInfoInChars(*this, CAT);
1694  TypeInfo Info = getTypeInfo(T);
1695  return std::make_pair(toCharUnitsFromBits(Info.Width),
1696  toCharUnitsFromBits(Info.Align));
1697 }
1698 
1699 std::pair<CharUnits, CharUnits>
1701  return getTypeInfoInChars(T.getTypePtr());
1702 }
1703 
1705  return getTypeInfo(T).AlignIsRequired;
1706 }
1707 
1709  return isAlignmentRequired(T.getTypePtr());
1710 }
1711 
1713  // An alignment on a typedef overrides anything else.
1714  if (const auto *TT = T->getAs<TypedefType>())
1715  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1716  return Align;
1717 
1718  // If we have an (array of) complete type, we're done.
1719  T = getBaseElementType(T);
1720  if (!T->isIncompleteType())
1721  return getTypeAlign(T);
1722 
1723  // If we had an array type, its element type might be a typedef
1724  // type with an alignment attribute.
1725  if (const auto *TT = T->getAs<TypedefType>())
1726  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1727  return Align;
1728 
1729  // Otherwise, see if the declaration of the type had an attribute.
1730  if (const auto *TT = T->getAs<TagType>())
1731  return TT->getDecl()->getMaxAlignment();
1732 
1733  return 0;
1734 }
1735 
1737  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1738  if (I != MemoizedTypeInfo.end())
1739  return I->second;
1740 
1741  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1742  TypeInfo TI = getTypeInfoImpl(T);
1743  MemoizedTypeInfo[T] = TI;
1744  return TI;
1745 }
1746 
1747 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1748 /// method does not work on incomplete types.
1749 ///
1750 /// FIXME: Pointers into different addr spaces could have different sizes and
1751 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1752 /// should take a QualType, &c.
1753 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1754  uint64_t Width = 0;
1755  unsigned Align = 8;
1756  bool AlignIsRequired = false;
1757  unsigned AS = 0;
1758  switch (T->getTypeClass()) {
1759 #define TYPE(Class, Base)
1760 #define ABSTRACT_TYPE(Class, Base)
1761 #define NON_CANONICAL_TYPE(Class, Base)
1762 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1763 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1764  case Type::Class: \
1765  assert(!T->isDependentType() && "should not see dependent types here"); \
1766  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1767 #include "clang/AST/TypeNodes.def"
1768  llvm_unreachable("Should not see dependent types");
1769 
1770  case Type::FunctionNoProto:
1771  case Type::FunctionProto:
1772  // GCC extension: alignof(function) = 32 bits
1773  Width = 0;
1774  Align = 32;
1775  break;
1776 
1777  case Type::IncompleteArray:
1778  case Type::VariableArray:
1779  Width = 0;
1780  Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
1781  break;
1782 
1783  case Type::ConstantArray: {
1784  const auto *CAT = cast<ConstantArrayType>(T);
1785 
1786  TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
1787  uint64_t Size = CAT->getSize().getZExtValue();
1788  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1789  "Overflow in array type bit size evaluation");
1790  Width = EltInfo.Width * Size;
1791  Align = EltInfo.Align;
1792  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1793  getTargetInfo().getPointerWidth(0) == 64)
1794  Width = llvm::alignTo(Width, Align);
1795  break;
1796  }
1797  case Type::ExtVector:
1798  case Type::Vector: {
1799  const auto *VT = cast<VectorType>(T);
1800  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1801  Width = EltInfo.Width * VT->getNumElements();
1802  Align = Width;
1803  // If the alignment is not a power of 2, round up to the next power of 2.
1804  // This happens for non-power-of-2 length vectors.
1805  if (Align & (Align-1)) {
1806  Align = llvm::NextPowerOf2(Align);
1807  Width = llvm::alignTo(Width, Align);
1808  }
1809  // Adjust the alignment based on the target max.
1810  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1811  if (TargetVectorAlign && TargetVectorAlign < Align)
1812  Align = TargetVectorAlign;
1813  break;
1814  }
1815 
1816  case Type::Builtin:
1817  switch (cast<BuiltinType>(T)->getKind()) {
1818  default: llvm_unreachable("Unknown builtin type!");
1819  case BuiltinType::Void:
1820  // GCC extension: alignof(void) = 8 bits.
1821  Width = 0;
1822  Align = 8;
1823  break;
1824  case BuiltinType::Bool:
1825  Width = Target->getBoolWidth();
1826  Align = Target->getBoolAlign();
1827  break;
1828  case BuiltinType::Char_S:
1829  case BuiltinType::Char_U:
1830  case BuiltinType::UChar:
1831  case BuiltinType::SChar:
1832  case BuiltinType::Char8:
1833  Width = Target->getCharWidth();
1834  Align = Target->getCharAlign();
1835  break;
1836  case BuiltinType::WChar_S:
1837  case BuiltinType::WChar_U:
1838  Width = Target->getWCharWidth();
1839  Align = Target->getWCharAlign();
1840  break;
1841  case BuiltinType::Char16:
1842  Width = Target->getChar16Width();
1843  Align = Target->getChar16Align();
1844  break;
1845  case BuiltinType::Char32:
1846  Width = Target->getChar32Width();
1847  Align = Target->getChar32Align();
1848  break;
1849  case BuiltinType::UShort:
1850  case BuiltinType::Short:
1851  Width = Target->getShortWidth();
1852  Align = Target->getShortAlign();
1853  break;
1854  case BuiltinType::UInt:
1855  case BuiltinType::Int:
1856  Width = Target->getIntWidth();
1857  Align = Target->getIntAlign();
1858  break;
1859  case BuiltinType::ULong:
1860  case BuiltinType::Long:
1861  Width = Target->getLongWidth();
1862  Align = Target->getLongAlign();
1863  break;
1864  case BuiltinType::ULongLong:
1865  case BuiltinType::LongLong:
1866  Width = Target->getLongLongWidth();
1867  Align = Target->getLongLongAlign();
1868  break;
1869  case BuiltinType::Int128:
1870  case BuiltinType::UInt128:
1871  Width = 128;
1872  Align = 128; // int128_t is 128-bit aligned on all targets.
1873  break;
1874  case BuiltinType::ShortAccum:
1875  case BuiltinType::UShortAccum:
1876  case BuiltinType::SatShortAccum:
1877  case BuiltinType::SatUShortAccum:
1878  Width = Target->getShortAccumWidth();
1879  Align = Target->getShortAccumAlign();
1880  break;
1881  case BuiltinType::Accum:
1882  case BuiltinType::UAccum:
1883  case BuiltinType::SatAccum:
1884  case BuiltinType::SatUAccum:
1885  Width = Target->getAccumWidth();
1886  Align = Target->getAccumAlign();
1887  break;
1888  case BuiltinType::LongAccum:
1889  case BuiltinType::ULongAccum:
1890  case BuiltinType::SatLongAccum:
1891  case BuiltinType::SatULongAccum:
1892  Width = Target->getLongAccumWidth();
1893  Align = Target->getLongAccumAlign();
1894  break;
1895  case BuiltinType::ShortFract:
1896  case BuiltinType::UShortFract:
1897  case BuiltinType::SatShortFract:
1898  case BuiltinType::SatUShortFract:
1899  Width = Target->getShortFractWidth();
1900  Align = Target->getShortFractAlign();
1901  break;
1902  case BuiltinType::Fract:
1903  case BuiltinType::UFract:
1904  case BuiltinType::SatFract:
1905  case BuiltinType::SatUFract:
1906  Width = Target->getFractWidth();
1907  Align = Target->getFractAlign();
1908  break;
1909  case BuiltinType::LongFract:
1910  case BuiltinType::ULongFract:
1911  case BuiltinType::SatLongFract:
1912  case BuiltinType::SatULongFract:
1913  Width = Target->getLongFractWidth();
1914  Align = Target->getLongFractAlign();
1915  break;
1916  case BuiltinType::Float16:
1917  case BuiltinType::Half:
1918  Width = Target->getHalfWidth();
1919  Align = Target->getHalfAlign();
1920  break;
1921  case BuiltinType::Float:
1922  Width = Target->getFloatWidth();
1923  Align = Target->getFloatAlign();
1924  break;
1925  case BuiltinType::Double:
1926  Width = Target->getDoubleWidth();
1927  Align = Target->getDoubleAlign();
1928  break;
1929  case BuiltinType::LongDouble:
1930  Width = Target->getLongDoubleWidth();
1931  Align = Target->getLongDoubleAlign();
1932  break;
1933  case BuiltinType::Float128:
1934  Width = Target->getFloat128Width();
1935  Align = Target->getFloat128Align();
1936  break;
1937  case BuiltinType::NullPtr:
1938  Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
1939  Align = Target->getPointerAlign(0); // == sizeof(void*)
1940  break;
1941  case BuiltinType::ObjCId:
1942  case BuiltinType::ObjCClass:
1943  case BuiltinType::ObjCSel:
1944  Width = Target->getPointerWidth(0);
1945  Align = Target->getPointerAlign(0);
1946  break;
1947  case BuiltinType::OCLSampler:
1948  case BuiltinType::OCLEvent:
1949  case BuiltinType::OCLClkEvent:
1950  case BuiltinType::OCLQueue:
1951  case BuiltinType::OCLReserveID:
1952 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1953  case BuiltinType::Id:
1954 #include "clang/Basic/OpenCLImageTypes.def"
1955 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1956  case BuiltinType::Id:
1957 #include "clang/Basic/OpenCLExtensionTypes.def"
1958  AS = getTargetAddressSpace(
1959  Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)));
1960  Width = Target->getPointerWidth(AS);
1961  Align = Target->getPointerAlign(AS);
1962  break;
1963  }
1964  break;
1965  case Type::ObjCObjectPointer:
1966  Width = Target->getPointerWidth(0);
1967  Align = Target->getPointerAlign(0);
1968  break;
1969  case Type::BlockPointer:
1970  AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
1971  Width = Target->getPointerWidth(AS);
1972  Align = Target->getPointerAlign(AS);
1973  break;
1974  case Type::LValueReference:
1975  case Type::RValueReference:
1976  // alignof and sizeof should never enter this code path here, so we go
1977  // the pointer route.
1978  AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
1979  Width = Target->getPointerWidth(AS);
1980  Align = Target->getPointerAlign(AS);
1981  break;
1982  case Type::Pointer:
1983  AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
1984  Width = Target->getPointerWidth(AS);
1985  Align = Target->getPointerAlign(AS);
1986  break;
1987  case Type::MemberPointer: {
1988  const auto *MPT = cast<MemberPointerType>(T);
1989  CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
1990  Width = MPI.Width;
1991  Align = MPI.Align;
1992  break;
1993  }
1994  case Type::Complex: {
1995  // Complex types have the same alignment as their elements, but twice the
1996  // size.
1997  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
1998  Width = EltInfo.Width * 2;
1999  Align = EltInfo.Align;
2000  break;
2001  }
2002  case Type::ObjCObject:
2003  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2004  case Type::Adjusted:
2005  case Type::Decayed:
2006  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2007  case Type::ObjCInterface: {
2008  const auto *ObjCI = cast<ObjCInterfaceType>(T);
2009  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2010  Width = toBits(Layout.getSize());
2011  Align = toBits(Layout.getAlignment());
2012  break;
2013  }
2014  case Type::Record:
2015  case Type::Enum: {
2016  const auto *TT = cast<TagType>(T);
2017 
2018  if (TT->getDecl()->isInvalidDecl()) {
2019  Width = 8;
2020  Align = 8;
2021  break;
2022  }
2023 
2024  if (const auto *ET = dyn_cast<EnumType>(TT)) {
2025  const EnumDecl *ED = ET->getDecl();
2026  TypeInfo Info =
2028  if (unsigned AttrAlign = ED->getMaxAlignment()) {
2029  Info.Align = AttrAlign;
2030  Info.AlignIsRequired = true;
2031  }
2032  return Info;
2033  }
2034 
2035  const auto *RT = cast<RecordType>(TT);
2036  const RecordDecl *RD = RT->getDecl();
2037  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2038  Width = toBits(Layout.getSize());
2039  Align = toBits(Layout.getAlignment());
2040  AlignIsRequired = RD->hasAttr<AlignedAttr>();
2041  break;
2042  }
2043 
2044  case Type::SubstTemplateTypeParm:
2045  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2046  getReplacementType().getTypePtr());
2047 
2048  case Type::Auto:
2049  case Type::DeducedTemplateSpecialization: {
2050  const auto *A = cast<DeducedType>(T);
2051  assert(!A->getDeducedType().isNull() &&
2052  "cannot request the size of an undeduced or dependent auto type");
2053  return getTypeInfo(A->getDeducedType().getTypePtr());
2054  }
2055 
2056  case Type::Paren:
2057  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2058 
2059  case Type::ObjCTypeParam:
2060  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2061 
2062  case Type::Typedef: {
2063  const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
2064  TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
2065  // If the typedef has an aligned attribute on it, it overrides any computed
2066  // alignment we have. This violates the GCC documentation (which says that
2067  // attribute(aligned) can only round up) but matches its implementation.
2068  if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
2069  Align = AttrAlign;
2070  AlignIsRequired = true;
2071  } else {
2072  Align = Info.Align;
2073  AlignIsRequired = Info.AlignIsRequired;
2074  }
2075  Width = Info.Width;
2076  break;
2077  }
2078 
2079  case Type::Elaborated:
2080  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2081 
2082  case Type::Attributed:
2083  return getTypeInfo(
2084  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2085 
2086  case Type::Atomic: {
2087  // Start with the base type information.
2088  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2089  Width = Info.Width;
2090  Align = Info.Align;
2091 
2092  if (!Width) {
2093  // An otherwise zero-sized type should still generate an
2094  // atomic operation.
2095  Width = Target->getCharWidth();
2096  assert(Align);
2097  } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2098  // If the size of the type doesn't exceed the platform's max
2099  // atomic promotion width, make the size and alignment more
2100  // favorable to atomic operations:
2101 
2102  // Round the size up to a power of 2.
2103  if (!llvm::isPowerOf2_64(Width))
2104  Width = llvm::NextPowerOf2(Width);
2105 
2106  // Set the alignment equal to the size.
2107  Align = static_cast<unsigned>(Width);
2108  }
2109  }
2110  break;
2111 
2112  case Type::Pipe:
2113  Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global));
2114  Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global));
2115  break;
2116  }
2117 
2118  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2119  return TypeInfo(Width, Align, AlignIsRequired);
2120 }
2121 
2122 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2123  UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2124  if (I != MemoizedUnadjustedAlign.end())
2125  return I->second;
2126 
2127  unsigned UnadjustedAlign;
2128  if (const auto *RT = T->getAs<RecordType>()) {
2129  const RecordDecl *RD = RT->getDecl();
2130  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2131  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2132  } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2133  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2134  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2135  } else {
2136  UnadjustedAlign = getTypeAlign(T);
2137  }
2138 
2139  MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2140  return UnadjustedAlign;
2141 }
2142 
2144  unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
2145  // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
2146  if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
2147  getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
2148  getTargetInfo().getABI() == "elfv1-qpx" &&
2149  T->isSpecificBuiltinType(BuiltinType::Double))
2150  SimdAlign = 256;
2151  return SimdAlign;
2152 }
2153 
2154 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2156  return CharUnits::fromQuantity(BitSize / getCharWidth());
2157 }
2158 
2159 /// toBits - Convert a size in characters to a size in characters.
2160 int64_t ASTContext::toBits(CharUnits CharSize) const {
2161  return CharSize.getQuantity() * getCharWidth();
2162 }
2163 
2164 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2165 /// This method does not work on incomplete types.
2167  return getTypeInfoInChars(T).first;
2168 }
2170  return getTypeInfoInChars(T).first;
2171 }
2172 
2173 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2174 /// characters. This method does not work on incomplete types.
2176  return toCharUnitsFromBits(getTypeAlign(T));
2177 }
2179  return toCharUnitsFromBits(getTypeAlign(T));
2180 }
2181 
2182 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2183 /// type, in characters, before alignment adustments. This method does
2184 /// not work on incomplete types.
2187 }
2190 }
2191 
2192 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2193 /// type for the current target in bits. This can be different than the ABI
2194 /// alignment in cases where it is beneficial for performance to overalign
2195 /// a data type.
2196 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2197  TypeInfo TI = getTypeInfo(T);
2198  unsigned ABIAlign = TI.Align;
2199 
2200  T = T->getBaseElementTypeUnsafe();
2201 
2202  // The preferred alignment of member pointers is that of a pointer.
2203  if (T->isMemberPointerType())
2205 
2206  if (!Target->allowsLargerPreferedTypeAlignment())
2207  return ABIAlign;
2208 
2209  // Double and long long should be naturally aligned if possible.
2210  if (const auto *CT = T->getAs<ComplexType>())
2211  T = CT->getElementType().getTypePtr();
2212  if (const auto *ET = T->getAs<EnumType>())
2213  T = ET->getDecl()->getIntegerType().getTypePtr();
2214  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2215  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2216  T->isSpecificBuiltinType(BuiltinType::ULongLong))
2217  // Don't increase the alignment if an alignment attribute was specified on a
2218  // typedef declaration.
2219  if (!TI.AlignIsRequired)
2220  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2221 
2222  return ABIAlign;
2223 }
2224 
2225 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2226 /// for __attribute__((aligned)) on this target, to be used if no alignment
2227 /// value is specified.
2230 }
2231 
2232 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2233 /// to a global variable of the specified type.
2235  return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign());
2236 }
2237 
2238 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2239 /// should be given to a global variable of the specified type.
2242 }
2243 
2246  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2247  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2248  Offset += Layout->getBaseClassOffset(Base);
2249  Layout = &getASTRecordLayout(Base);
2250  }
2251  return Offset;
2252 }
2253 
2254 /// DeepCollectObjCIvars -
2255 /// This routine first collects all declared, but not synthesized, ivars in
2256 /// super class and then collects all ivars, including those synthesized for
2257 /// current class. This routine is used for implementation of current class
2258 /// when all ivars, declared and synthesized are known.
2260  bool leafClass,
2261  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2262  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2263  DeepCollectObjCIvars(SuperClass, false, Ivars);
2264  if (!leafClass) {
2265  for (const auto *I : OI->ivars())
2266  Ivars.push_back(I);
2267  } else {
2268  auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2269  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2270  Iv= Iv->getNextIvar())
2271  Ivars.push_back(Iv);
2272  }
2273 }
2274 
2275 /// CollectInheritedProtocols - Collect all protocols in current class and
2276 /// those inherited by it.
2278  llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2279  if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2280  // We can use protocol_iterator here instead of
2281  // all_referenced_protocol_iterator since we are walking all categories.
2282  for (auto *Proto : OI->all_referenced_protocols()) {
2283  CollectInheritedProtocols(Proto, Protocols);
2284  }
2285 
2286  // Categories of this Interface.
2287  for (const auto *Cat : OI->visible_categories())
2288  CollectInheritedProtocols(Cat, Protocols);
2289 
2290  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2291  while (SD) {
2292  CollectInheritedProtocols(SD, Protocols);
2293  SD = SD->getSuperClass();
2294  }
2295  } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2296  for (auto *Proto : OC->protocols()) {
2297  CollectInheritedProtocols(Proto, Protocols);
2298  }
2299  } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2300  // Insert the protocol.
2301  if (!Protocols.insert(
2302  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2303  return;
2304 
2305  for (auto *Proto : OP->protocols())
2306  CollectInheritedProtocols(Proto, Protocols);
2307  }
2308 }
2309 
2311  const RecordDecl *RD) {
2312  assert(RD->isUnion() && "Must be union type");
2313  CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2314 
2315  for (const auto *Field : RD->fields()) {
2316  if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2317  return false;
2318  CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2319  if (FieldSize != UnionSize)
2320  return false;
2321  }
2322  return !RD->field_empty();
2323 }
2324 
2325 static bool isStructEmpty(QualType Ty) {
2326  const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
2327 
2328  if (!RD->field_empty())
2329  return false;
2330 
2331  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
2332  return ClassDecl->isEmpty();
2333 
2334  return true;
2335 }
2336 
2339  const RecordDecl *RD) {
2340  assert(!RD->isUnion() && "Must be struct/class type");
2341  const auto &Layout = Context.getASTRecordLayout(RD);
2342 
2343  int64_t CurOffsetInBits = 0;
2344  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2345  if (ClassDecl->isDynamicClass())
2346  return llvm::None;
2347 
2349  for (const auto Base : ClassDecl->bases()) {
2350  // Empty types can be inherited from, and non-empty types can potentially
2351  // have tail padding, so just make sure there isn't an error.
2352  if (!isStructEmpty(Base.getType())) {
2354  Context, Base.getType()->getAs<RecordType>()->getDecl());
2355  if (!Size)
2356  return llvm::None;
2357  Bases.emplace_back(Base.getType(), Size.getValue());
2358  }
2359  }
2360 
2361  llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L,
2362  const std::pair<QualType, int64_t> &R) {
2363  return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
2364  Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
2365  });
2366 
2367  for (const auto Base : Bases) {
2368  int64_t BaseOffset = Context.toBits(
2369  Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl()));
2370  int64_t BaseSize = Base.second;
2371  if (BaseOffset != CurOffsetInBits)
2372  return llvm::None;
2373  CurOffsetInBits = BaseOffset + BaseSize;
2374  }
2375  }
2376 
2377  for (const auto *Field : RD->fields()) {
2378  if (!Field->getType()->isReferenceType() &&
2379  !Context.hasUniqueObjectRepresentations(Field->getType()))
2380  return llvm::None;
2381 
2382  int64_t FieldSizeInBits =
2383  Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2384  if (Field->isBitField()) {
2385  int64_t BitfieldSize = Field->getBitWidthValue(Context);
2386 
2387  if (BitfieldSize > FieldSizeInBits)
2388  return llvm::None;
2389  FieldSizeInBits = BitfieldSize;
2390  }
2391 
2392  int64_t FieldOffsetInBits = Context.getFieldOffset(Field);
2393 
2394  if (FieldOffsetInBits != CurOffsetInBits)
2395  return llvm::None;
2396 
2397  CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits;
2398  }
2399 
2400  return CurOffsetInBits;
2401 }
2402 
2404  // C++17 [meta.unary.prop]:
2405  // The predicate condition for a template specialization
2406  // has_unique_object_representations<T> shall be
2407  // satisfied if and only if:
2408  // (9.1) - T is trivially copyable, and
2409  // (9.2) - any two objects of type T with the same value have the same
2410  // object representation, where two objects
2411  // of array or non-union class type are considered to have the same value
2412  // if their respective sequences of
2413  // direct subobjects have the same values, and two objects of union type
2414  // are considered to have the same
2415  // value if they have the same active member and the corresponding members
2416  // have the same value.
2417  // The set of scalar types for which this condition holds is
2418  // implementation-defined. [ Note: If a type has padding
2419  // bits, the condition does not hold; otherwise, the condition holds true
2420  // for unsigned integral types. -- end note ]
2421  assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2422 
2423  // Arrays are unique only if their element type is unique.
2424  if (Ty->isArrayType())
2426 
2427  // (9.1) - T is trivially copyable...
2428  if (!Ty.isTriviallyCopyableType(*this))
2429  return false;
2430 
2431  // All integrals and enums are unique.
2432  if (Ty->isIntegralOrEnumerationType())
2433  return true;
2434 
2435  // All other pointers are unique.
2436  if (Ty->isPointerType())
2437  return true;
2438 
2439  if (Ty->isMemberPointerType()) {
2440  const auto *MPT = Ty->getAs<MemberPointerType>();
2441  return !ABI->getMemberPointerInfo(MPT).HasPadding;
2442  }
2443 
2444  if (Ty->isRecordType()) {
2445  const RecordDecl *Record = Ty->getAs<RecordType>()->getDecl();
2446 
2447  if (Record->isInvalidDecl())
2448  return false;
2449 
2450  if (Record->isUnion())
2451  return unionHasUniqueObjectRepresentations(*this, Record);
2452 
2453  Optional<int64_t> StructSize =
2454  structHasUniqueObjectRepresentations(*this, Record);
2455 
2456  return StructSize &&
2457  StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
2458  }
2459 
2460  // FIXME: More cases to handle here (list by rsmith):
2461  // vectors (careful about, eg, vector of 3 foo)
2462  // _Complex int and friends
2463  // _Atomic T
2464  // Obj-C block pointers
2465  // Obj-C object pointers
2466  // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2467  // clk_event_t, queue_t, reserve_id_t)
2468  // There're also Obj-C class types and the Obj-C selector type, but I think it
2469  // makes sense for those to return false here.
2470 
2471  return false;
2472 }
2473 
2475  unsigned count = 0;
2476  // Count ivars declared in class extension.
2477  for (const auto *Ext : OI->known_extensions())
2478  count += Ext->ivar_size();
2479 
2480  // Count ivar defined in this class's implementation. This
2481  // includes synthesized ivars.
2482  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2483  count += ImplDecl->ivar_size();
2484 
2485  return count;
2486 }
2487 
2489  if (!E)
2490  return false;
2491 
2492  // nullptr_t is always treated as null.
2493  if (E->getType()->isNullPtrType()) return true;
2494 
2495  if (E->getType()->isAnyPointerType() &&
2498  return true;
2499 
2500  // Unfortunately, __null has type 'int'.
2501  if (isa<GNUNullExpr>(E)) return true;
2502 
2503  return false;
2504 }
2505 
2506 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2507 /// exists.
2509  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2510  I = ObjCImpls.find(D);
2511  if (I != ObjCImpls.end())
2512  return cast<ObjCImplementationDecl>(I->second);
2513  return nullptr;
2514 }
2515 
2516 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2517 /// exists.
2519  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2520  I = ObjCImpls.find(D);
2521  if (I != ObjCImpls.end())
2522  return cast<ObjCCategoryImplDecl>(I->second);
2523  return nullptr;
2524 }
2525 
2526 /// Set the implementation of ObjCInterfaceDecl.
2528  ObjCImplementationDecl *ImplD) {
2529  assert(IFaceD && ImplD && "Passed null params");
2530  ObjCImpls[IFaceD] = ImplD;
2531 }
2532 
2533 /// Set the implementation of ObjCCategoryDecl.
2535  ObjCCategoryImplDecl *ImplD) {
2536  assert(CatD && ImplD && "Passed null params");
2537  ObjCImpls[CatD] = ImplD;
2538 }
2539 
2540 const ObjCMethodDecl *
2542  return ObjCMethodRedecls.lookup(MD);
2543 }
2544 
2546  const ObjCMethodDecl *Redecl) {
2547  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2548  ObjCMethodRedecls[MD] = Redecl;
2549 }
2550 
2552  const NamedDecl *ND) const {
2553  if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2554  return ID;
2555  if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2556  return CD->getClassInterface();
2557  if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2558  return IMD->getClassInterface();
2559 
2560  return nullptr;
2561 }
2562 
2563 /// Get the copy initialization expression of VarDecl, or nullptr if
2564 /// none exists.
2567  assert(VD && "Passed null params");
2568  assert(VD->hasAttr<BlocksAttr>() &&
2569  "getBlockVarCopyInits - not __block var");
2570  auto I = BlockVarCopyInits.find(VD);
2571  if (I != BlockVarCopyInits.end())
2572  return I->second;
2573  return {nullptr, false};
2574 }
2575 
2576 /// Set the copy inialization expression of a block var decl.
2578  bool CanThrow) {
2579  assert(VD && CopyExpr && "Passed null params");
2580  assert(VD->hasAttr<BlocksAttr>() &&
2581  "setBlockVarCopyInits - not __block var");
2582  BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2583 }
2584 
2586  unsigned DataSize) const {
2587  if (!DataSize)
2588  DataSize = TypeLoc::getFullDataSizeForType(T);
2589  else
2590  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2591  "incorrect data size provided to CreateTypeSourceInfo!");
2592 
2593  auto *TInfo =
2594  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2595  new (TInfo) TypeSourceInfo(T);
2596  return TInfo;
2597 }
2598 
2600  SourceLocation L) const {
2602  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2603  return DI;
2604 }
2605 
2606 const ASTRecordLayout &
2608  return getObjCLayout(D, nullptr);
2609 }
2610 
2611 const ASTRecordLayout &
2613  const ObjCImplementationDecl *D) const {
2614  return getObjCLayout(D->getClassInterface(), D);
2615 }
2616 
2617 //===----------------------------------------------------------------------===//
2618 // Type creation/memoization methods
2619 //===----------------------------------------------------------------------===//
2620 
2621 QualType
2622 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2623  unsigned fastQuals = quals.getFastQualifiers();
2624  quals.removeFastQualifiers();
2625 
2626  // Check if we've already instantiated this type.
2627  llvm::FoldingSetNodeID ID;
2628  ExtQuals::Profile(ID, baseType, quals);
2629  void *insertPos = nullptr;
2630  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2631  assert(eq->getQualifiers() == quals);
2632  return QualType(eq, fastQuals);
2633  }
2634 
2635  // If the base type is not canonical, make the appropriate canonical type.
2636  QualType canon;
2637  if (!baseType->isCanonicalUnqualified()) {
2638  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2639  canonSplit.Quals.addConsistentQualifiers(quals);
2640  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
2641 
2642  // Re-find the insert position.
2643  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
2644  }
2645 
2646  auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
2647  ExtQualNodes.InsertNode(eq, insertPos);
2648  return QualType(eq, fastQuals);
2649 }
2650 
2652  LangAS AddressSpace) const {
2653  QualType CanT = getCanonicalType(T);
2654  if (CanT.getAddressSpace() == AddressSpace)
2655  return T;
2656 
2657  // If we are composing extended qualifiers together, merge together
2658  // into one ExtQuals node.
2659  QualifierCollector Quals;
2660  const Type *TypeNode = Quals.strip(T);
2661 
2662  // If this type already has an address space specified, it cannot get
2663  // another one.
2664  assert(!Quals.hasAddressSpace() &&
2665  "Type cannot be in multiple addr spaces!");
2666  Quals.addAddressSpace(AddressSpace);
2667 
2668  return getExtQualType(TypeNode, Quals);
2669 }
2670 
2672  // If we are composing extended qualifiers together, merge together
2673  // into one ExtQuals node.
2674  QualifierCollector Quals;
2675  const Type *TypeNode = Quals.strip(T);
2676 
2677  // If the qualifier doesn't have an address space just return it.
2678  if (!Quals.hasAddressSpace())
2679  return T;
2680 
2681  Quals.removeAddressSpace();
2682 
2683  // Removal of the address space can mean there are no longer any
2684  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
2685  // or required.
2686  if (Quals.hasNonFastQualifiers())
2687  return getExtQualType(TypeNode, Quals);
2688  else
2689  return QualType(TypeNode, Quals.getFastQualifiers());
2690 }
2691 
2693  Qualifiers::GC GCAttr) const {
2694  QualType CanT = getCanonicalType(T);
2695  if (CanT.getObjCGCAttr() == GCAttr)
2696  return T;
2697 
2698  if (const auto *ptr = T->getAs<PointerType>()) {
2699  QualType Pointee = ptr->getPointeeType();
2700  if (Pointee->isAnyPointerType()) {
2701  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
2702  return getPointerType(ResultType);
2703  }
2704  }
2705 
2706  // If we are composing extended qualifiers together, merge together
2707  // into one ExtQuals node.
2708  QualifierCollector Quals;
2709  const Type *TypeNode = Quals.strip(T);
2710 
2711  // If this type already has an ObjCGC specified, it cannot get
2712  // another one.
2713  assert(!Quals.hasObjCGCAttr() &&
2714  "Type cannot have multiple ObjCGCs!");
2715  Quals.addObjCGCAttr(GCAttr);
2716 
2717  return getExtQualType(TypeNode, Quals);
2718 }
2719 
2721  FunctionType::ExtInfo Info) {
2722  if (T->getExtInfo() == Info)
2723  return T;
2724 
2725  QualType Result;
2726  if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
2727  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
2728  } else {
2729  const auto *FPT = cast<FunctionProtoType>(T);
2730  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2731  EPI.ExtInfo = Info;
2732  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
2733  }
2734 
2735  return cast<FunctionType>(Result.getTypePtr());
2736 }
2737 
2739  QualType ResultType) {
2740  FD = FD->getMostRecentDecl();
2741  while (true) {
2742  const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
2743  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2744  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
2745  if (FunctionDecl *Next = FD->getPreviousDecl())
2746  FD = Next;
2747  else
2748  break;
2749  }
2751  L->DeducedReturnType(FD, ResultType);
2752 }
2753 
2754 /// Get a function type and produce the equivalent function type with the
2755 /// specified exception specification. Type sugar that can be present on a
2756 /// declaration of a function with an exception specification is permitted
2757 /// and preserved. Other type sugar (for instance, typedefs) is not.
2760  // Might have some parens.
2761  if (const auto *PT = dyn_cast<ParenType>(Orig))
2762  return getParenType(
2763  getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
2764 
2765  // Might have a calling-convention attribute.
2766  if (const auto *AT = dyn_cast<AttributedType>(Orig))
2767  return getAttributedType(
2768  AT->getAttrKind(),
2769  getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
2770  getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
2771 
2772  // Anything else must be a function type. Rebuild it with the new exception
2773  // specification.
2774  const auto *Proto = Orig->getAs<FunctionProtoType>();
2775  return getFunctionType(
2776  Proto->getReturnType(), Proto->getParamTypes(),
2777  Proto->getExtProtoInfo().withExceptionSpec(ESI));
2778 }
2779 
2781  QualType U) {
2782  return hasSameType(T, U) ||
2783  (getLangOpts().CPlusPlus17 &&
2786 }
2787 
2790  bool AsWritten) {
2791  // Update the type.
2792  QualType Updated =
2794  FD->setType(Updated);
2795 
2796  if (!AsWritten)
2797  return;
2798 
2799  // Update the type in the type source information too.
2800  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
2801  // If the type and the type-as-written differ, we may need to update
2802  // the type-as-written too.
2803  if (TSInfo->getType() != FD->getType())
2804  Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
2805 
2806  // FIXME: When we get proper type location information for exceptions,
2807  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
2808  // up the TypeSourceInfo;
2809  assert(TypeLoc::getFullDataSizeForType(Updated) ==
2810  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
2811  "TypeLoc size mismatch from updating exception specification");
2812  TSInfo->overrideType(Updated);
2813  }
2814 }
2815 
2816 /// getComplexType - Return the uniqued reference to the type for a complex
2817 /// number with the specified element type.
2819  // Unique pointers, to guarantee there is only one pointer of a particular
2820  // structure.
2821  llvm::FoldingSetNodeID ID;
2822  ComplexType::Profile(ID, T);
2823 
2824  void *InsertPos = nullptr;
2825  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
2826  return QualType(CT, 0);
2827 
2828  // If the pointee type isn't canonical, this won't be a canonical type either,
2829  // so fill in the canonical type field.
2830  QualType Canonical;
2831  if (!T.isCanonical()) {
2832  Canonical = getComplexType(getCanonicalType(T));
2833 
2834  // Get the new insert position for the node we care about.
2835  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
2836  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2837  }
2838  auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
2839  Types.push_back(New);
2840  ComplexTypes.InsertNode(New, InsertPos);
2841  return QualType(New, 0);
2842 }
2843 
2844 /// getPointerType - Return the uniqued reference to the type for a pointer to
2845 /// the specified type.
2847  // Unique pointers, to guarantee there is only one pointer of a particular
2848  // structure.
2849  llvm::FoldingSetNodeID ID;
2850  PointerType::Profile(ID, T);
2851 
2852  void *InsertPos = nullptr;
2853  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2854  return QualType(PT, 0);
2855 
2856  // If the pointee type isn't canonical, this won't be a canonical type either,
2857  // so fill in the canonical type field.
2858  QualType Canonical;
2859  if (!T.isCanonical()) {
2860  Canonical = getPointerType(getCanonicalType(T));
2861 
2862  // Get the new insert position for the node we care about.
2863  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2864  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2865  }
2866  auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
2867  Types.push_back(New);
2868  PointerTypes.InsertNode(New, InsertPos);
2869  return QualType(New, 0);
2870 }
2871 
2873  llvm::FoldingSetNodeID ID;
2874  AdjustedType::Profile(ID, Orig, New);
2875  void *InsertPos = nullptr;
2876  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2877  if (AT)
2878  return QualType(AT, 0);
2879 
2880  QualType Canonical = getCanonicalType(New);
2881 
2882  // Get the new insert position for the node we care about.
2883  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2884  assert(!AT && "Shouldn't be in the map!");
2885 
2886  AT = new (*this, TypeAlignment)
2887  AdjustedType(Type::Adjusted, Orig, New, Canonical);
2888  Types.push_back(AT);
2889  AdjustedTypes.InsertNode(AT, InsertPos);
2890  return QualType(AT, 0);
2891 }
2892 
2894  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
2895 
2896  QualType Decayed;
2897 
2898  // C99 6.7.5.3p7:
2899  // A declaration of a parameter as "array of type" shall be
2900  // adjusted to "qualified pointer to type", where the type
2901  // qualifiers (if any) are those specified within the [ and ] of
2902  // the array type derivation.
2903  if (T->isArrayType())
2904  Decayed = getArrayDecayedType(T);
2905 
2906  // C99 6.7.5.3p8:
2907  // A declaration of a parameter as "function returning type"
2908  // shall be adjusted to "pointer to function returning type", as
2909  // in 6.3.2.1.
2910  if (T->isFunctionType())
2911  Decayed = getPointerType(T);
2912 
2913  llvm::FoldingSetNodeID ID;
2914  AdjustedType::Profile(ID, T, Decayed);
2915  void *InsertPos = nullptr;
2916  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2917  if (AT)
2918  return QualType(AT, 0);
2919 
2920  QualType Canonical = getCanonicalType(Decayed);
2921 
2922  // Get the new insert position for the node we care about.
2923  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2924  assert(!AT && "Shouldn't be in the map!");
2925 
2926  AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
2927  Types.push_back(AT);
2928  AdjustedTypes.InsertNode(AT, InsertPos);
2929  return QualType(AT, 0);
2930 }
2931 
2932 /// getBlockPointerType - Return the uniqued reference to the type for
2933 /// a pointer to the specified block.
2935  assert(T->isFunctionType() && "block of function types only");
2936  // Unique pointers, to guarantee there is only one block of a particular
2937  // structure.
2938  llvm::FoldingSetNodeID ID;
2940 
2941  void *InsertPos = nullptr;
2942  if (BlockPointerType *PT =
2943  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2944  return QualType(PT, 0);
2945 
2946  // If the block pointee type isn't canonical, this won't be a canonical
2947  // type either so fill in the canonical type field.
2948  QualType Canonical;
2949  if (!T.isCanonical()) {
2950  Canonical = getBlockPointerType(getCanonicalType(T));
2951 
2952  // Get the new insert position for the node we care about.
2953  BlockPointerType *NewIP =
2954  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2955  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2956  }
2957  auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
2958  Types.push_back(New);
2959  BlockPointerTypes.InsertNode(New, InsertPos);
2960  return QualType(New, 0);
2961 }
2962 
2963 /// getLValueReferenceType - Return the uniqued reference to the type for an
2964 /// lvalue reference to the specified type.
2965 QualType
2966 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
2967  assert(getCanonicalType(T) != OverloadTy &&
2968  "Unresolved overloaded function type");
2969 
2970  // Unique pointers, to guarantee there is only one pointer of a particular
2971  // structure.
2972  llvm::FoldingSetNodeID ID;
2973  ReferenceType::Profile(ID, T, SpelledAsLValue);
2974 
2975  void *InsertPos = nullptr;
2976  if (LValueReferenceType *RT =
2977  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
2978  return QualType(RT, 0);
2979 
2980  const auto *InnerRef = T->getAs<ReferenceType>();
2981 
2982  // If the referencee type isn't canonical, this won't be a canonical type
2983  // either, so fill in the canonical type field.
2984  QualType Canonical;
2985  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
2986  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
2987  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
2988 
2989  // Get the new insert position for the node we care about.
2990  LValueReferenceType *NewIP =
2991  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
2992  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2993  }
2994 
2995  auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
2996  SpelledAsLValue);
2997  Types.push_back(New);
2998  LValueReferenceTypes.InsertNode(New, InsertPos);
2999 
3000  return QualType(New, 0);
3001 }
3002 
3003 /// getRValueReferenceType - Return the uniqued reference to the type for an
3004 /// rvalue reference to the specified type.
3006  // Unique pointers, to guarantee there is only one pointer of a particular
3007  // structure.
3008  llvm::FoldingSetNodeID ID;
3009  ReferenceType::Profile(ID, T, false);
3010 
3011  void *InsertPos = nullptr;
3012  if (RValueReferenceType *RT =
3013  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3014  return QualType(RT, 0);
3015 
3016  const auto *InnerRef = T->getAs<ReferenceType>();
3017 
3018  // If the referencee type isn't canonical, this won't be a canonical type
3019  // either, so fill in the canonical type field.
3020  QualType Canonical;
3021  if (InnerRef || !T.isCanonical()) {
3022  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3023  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3024 
3025  // Get the new insert position for the node we care about.
3026  RValueReferenceType *NewIP =
3027  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3028  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3029  }
3030 
3031  auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
3032  Types.push_back(New);
3033  RValueReferenceTypes.InsertNode(New, InsertPos);
3034  return QualType(New, 0);
3035 }
3036 
3037 /// getMemberPointerType - Return the uniqued reference to the type for a
3038 /// member pointer to the specified type, in the specified class.
3040  // Unique pointers, to guarantee there is only one pointer of a particular
3041  // structure.
3042  llvm::FoldingSetNodeID ID;
3043  MemberPointerType::Profile(ID, T, Cls);
3044 
3045  void *InsertPos = nullptr;
3046  if (MemberPointerType *PT =
3047  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3048  return QualType(PT, 0);
3049 
3050  // If the pointee or class type isn't canonical, this won't be a canonical
3051  // type either, so fill in the canonical type field.
3052  QualType Canonical;
3053  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3055 
3056  // Get the new insert position for the node we care about.
3057  MemberPointerType *NewIP =
3058  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3059  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3060  }
3061  auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
3062  Types.push_back(New);
3063  MemberPointerTypes.InsertNode(New, InsertPos);
3064  return QualType(New, 0);
3065 }
3066 
3067 /// getConstantArrayType - Return the unique reference to the type for an
3068 /// array of the specified element type.
3070  const llvm::APInt &ArySizeIn,
3072  unsigned IndexTypeQuals) const {
3073  assert((EltTy->isDependentType() ||
3074  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3075  "Constant array of VLAs is illegal!");
3076 
3077  // Convert the array size into a canonical width matching the pointer size for
3078  // the target.
3079  llvm::APInt ArySize(ArySizeIn);
3080  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3081 
3082  llvm::FoldingSetNodeID ID;
3083  ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
3084 
3085  void *InsertPos = nullptr;
3086  if (ConstantArrayType *ATP =
3087  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3088  return QualType(ATP, 0);
3089 
3090  // If the element type isn't canonical or has qualifiers, this won't
3091  // be a canonical type either, so fill in the canonical type field.
3092  QualType Canon;
3093  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3094  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3095  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
3096  ASM, IndexTypeQuals);
3097  Canon = getQualifiedType(Canon, canonSplit.Quals);
3098 
3099  // Get the new insert position for the node we care about.
3100  ConstantArrayType *NewIP =
3101  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3102  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3103  }
3104 
3105  auto *New = new (*this,TypeAlignment)
3106  ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
3107  ConstantArrayTypes.InsertNode(New, InsertPos);
3108  Types.push_back(New);
3109  return QualType(New, 0);
3110 }
3111 
3112 /// getVariableArrayDecayedType - Turns the given type, which may be
3113 /// variably-modified, into the corresponding type with all the known
3114 /// sizes replaced with [*].
3116  // Vastly most common case.
3117  if (!type->isVariablyModifiedType()) return type;
3118 
3119  QualType result;
3120 
3121  SplitQualType split = type.getSplitDesugaredType();
3122  const Type *ty = split.Ty;
3123  switch (ty->getTypeClass()) {
3124 #define TYPE(Class, Base)
3125 #define ABSTRACT_TYPE(Class, Base)
3126 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3127 #include "clang/AST/TypeNodes.def"
3128  llvm_unreachable("didn't desugar past all non-canonical types?");
3129 
3130  // These types should never be variably-modified.
3131  case Type::Builtin:
3132  case Type::Complex:
3133  case Type::Vector:
3134  case Type::DependentVector:
3135  case Type::ExtVector:
3136  case Type::DependentSizedExtVector:
3137  case Type::DependentAddressSpace:
3138  case Type::ObjCObject:
3139  case Type::ObjCInterface:
3140  case Type::ObjCObjectPointer:
3141  case Type::Record:
3142  case Type::Enum:
3143  case Type::UnresolvedUsing:
3144  case Type::TypeOfExpr:
3145  case Type::TypeOf:
3146  case Type::Decltype:
3147  case Type::UnaryTransform:
3148  case Type::DependentName:
3149  case Type::InjectedClassName:
3150  case Type::TemplateSpecialization:
3151  case Type::DependentTemplateSpecialization:
3152  case Type::TemplateTypeParm:
3153  case Type::SubstTemplateTypeParmPack:
3154  case Type::Auto:
3155  case Type::DeducedTemplateSpecialization:
3156  case Type::PackExpansion:
3157  llvm_unreachable("type should never be variably-modified");
3158 
3159  // These types can be variably-modified but should never need to
3160  // further decay.
3161  case Type::FunctionNoProto:
3162  case Type::FunctionProto:
3163  case Type::BlockPointer:
3164  case Type::MemberPointer:
3165  case Type::Pipe:
3166  return type;
3167 
3168  // These types can be variably-modified. All these modifications
3169  // preserve structure except as noted by comments.
3170  // TODO: if we ever care about optimizing VLAs, there are no-op
3171  // optimizations available here.
3172  case Type::Pointer:
3174  cast<PointerType>(ty)->getPointeeType()));
3175  break;
3176 
3177  case Type::LValueReference: {
3178  const auto *lv = cast<LValueReferenceType>(ty);
3179  result = getLValueReferenceType(
3180  getVariableArrayDecayedType(lv->getPointeeType()),
3181  lv->isSpelledAsLValue());
3182  break;
3183  }
3184 
3185  case Type::RValueReference: {
3186  const auto *lv = cast<RValueReferenceType>(ty);
3187  result = getRValueReferenceType(
3188  getVariableArrayDecayedType(lv->getPointeeType()));
3189  break;
3190  }
3191 
3192  case Type::Atomic: {
3193  const auto *at = cast<AtomicType>(ty);
3194  result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3195  break;
3196  }
3197 
3198  case Type::ConstantArray: {
3199  const auto *cat = cast<ConstantArrayType>(ty);
3200  result = getConstantArrayType(
3201  getVariableArrayDecayedType(cat->getElementType()),
3202  cat->getSize(),
3203  cat->getSizeModifier(),
3204  cat->getIndexTypeCVRQualifiers());
3205  break;
3206  }
3207 
3208  case Type::DependentSizedArray: {
3209  const auto *dat = cast<DependentSizedArrayType>(ty);
3210  result = getDependentSizedArrayType(
3211  getVariableArrayDecayedType(dat->getElementType()),
3212  dat->getSizeExpr(),
3213  dat->getSizeModifier(),
3214  dat->getIndexTypeCVRQualifiers(),
3215  dat->getBracketsRange());
3216  break;
3217  }
3218 
3219  // Turn incomplete types into [*] types.
3220  case Type::IncompleteArray: {
3221  const auto *iat = cast<IncompleteArrayType>(ty);
3222  result = getVariableArrayType(
3223  getVariableArrayDecayedType(iat->getElementType()),
3224  /*size*/ nullptr,
3226  iat->getIndexTypeCVRQualifiers(),
3227  SourceRange());
3228  break;
3229  }
3230 
3231  // Turn VLA types into [*] types.
3232  case Type::VariableArray: {
3233  const auto *vat = cast<VariableArrayType>(ty);
3234  result = getVariableArrayType(
3235  getVariableArrayDecayedType(vat->getElementType()),
3236  /*size*/ nullptr,
3238  vat->getIndexTypeCVRQualifiers(),
3239  vat->getBracketsRange());
3240  break;
3241  }
3242  }
3243 
3244  // Apply the top-level qualifiers from the original.
3245  return getQualifiedType(result, split.Quals);
3246 }
3247 
3248 /// getVariableArrayType - Returns a non-unique reference to the type for a
3249 /// variable array of the specified element type.
3251  Expr *NumElts,
3253  unsigned IndexTypeQuals,
3254  SourceRange Brackets) const {
3255  // Since we don't unique expressions, it isn't possible to unique VLA's
3256  // that have an expression provided for their size.
3257  QualType Canon;
3258 
3259  // Be sure to pull qualifiers off the element type.
3260  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3261  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3262  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3263  IndexTypeQuals, Brackets);
3264  Canon = getQualifiedType(Canon, canonSplit.Quals);
3265  }
3266 
3267  auto *New = new (*this, TypeAlignment)
3268  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3269 
3270  VariableArrayTypes.push_back(New);
3271  Types.push_back(New);
3272  return QualType(New, 0);
3273 }
3274 
3275 /// getDependentSizedArrayType - Returns a non-unique reference to
3276 /// the type for a dependently-sized array of the specified element
3277 /// type.
3279  Expr *numElements,
3281  unsigned elementTypeQuals,
3282  SourceRange brackets) const {
3283  assert((!numElements || numElements->isTypeDependent() ||
3284  numElements->isValueDependent()) &&
3285  "Size must be type- or value-dependent!");
3286 
3287  // Dependently-sized array types that do not have a specified number
3288  // of elements will have their sizes deduced from a dependent
3289  // initializer. We do no canonicalization here at all, which is okay
3290  // because they can't be used in most locations.
3291  if (!numElements) {
3292  auto *newType
3293  = new (*this, TypeAlignment)
3294  DependentSizedArrayType(*this, elementType, QualType(),
3295  numElements, ASM, elementTypeQuals,
3296  brackets);
3297  Types.push_back(newType);
3298  return QualType(newType, 0);
3299  }
3300 
3301  // Otherwise, we actually build a new type every time, but we
3302  // also build a canonical type.
3303 
3304  SplitQualType canonElementType = getCanonicalType(elementType).split();
3305 
3306  void *insertPos = nullptr;
3307  llvm::FoldingSetNodeID ID;
3309  QualType(canonElementType.Ty, 0),
3310  ASM, elementTypeQuals, numElements);
3311 
3312  // Look for an existing type with these properties.
3313  DependentSizedArrayType *canonTy =
3314  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3315 
3316  // If we don't have one, build one.
3317  if (!canonTy) {
3318  canonTy = new (*this, TypeAlignment)
3319  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3320  QualType(), numElements, ASM, elementTypeQuals,
3321  brackets);
3322  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3323  Types.push_back(canonTy);
3324  }
3325 
3326  // Apply qualifiers from the element type to the array.
3327  QualType canon = getQualifiedType(QualType(canonTy,0),
3328  canonElementType.Quals);
3329 
3330  // If we didn't need extra canonicalization for the element type or the size
3331  // expression, then just use that as our result.
3332  if (QualType(canonElementType.Ty, 0) == elementType &&
3333  canonTy->getSizeExpr() == numElements)
3334  return canon;
3335 
3336  // Otherwise, we need to build a type which follows the spelling
3337  // of the element type.
3338  auto *sugaredType
3339  = new (*this, TypeAlignment)
3340  DependentSizedArrayType(*this, elementType, canon, numElements,
3341  ASM, elementTypeQuals, brackets);
3342  Types.push_back(sugaredType);
3343  return QualType(sugaredType, 0);
3344 }
3345 
3348  unsigned elementTypeQuals) const {
3349  llvm::FoldingSetNodeID ID;
3350  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3351 
3352  void *insertPos = nullptr;
3353  if (IncompleteArrayType *iat =
3354  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3355  return QualType(iat, 0);
3356 
3357  // If the element type isn't canonical, this won't be a canonical type
3358  // either, so fill in the canonical type field. We also have to pull
3359  // qualifiers off the element type.
3360  QualType canon;
3361 
3362  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3363  SplitQualType canonSplit = getCanonicalType(elementType).split();
3364  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3365  ASM, elementTypeQuals);
3366  canon = getQualifiedType(canon, canonSplit.Quals);
3367 
3368  // Get the new insert position for the node we care about.
3369  IncompleteArrayType *existing =
3370  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3371  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3372  }
3373 
3374  auto *newType = new (*this, TypeAlignment)
3375  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3376 
3377  IncompleteArrayTypes.InsertNode(newType, insertPos);
3378  Types.push_back(newType);
3379  return QualType(newType, 0);
3380 }
3381 
3382 /// getVectorType - Return the unique reference to a vector type of
3383 /// the specified element type and size. VectorType must be a built-in type.
3384 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
3385  VectorType::VectorKind VecKind) const {
3386  assert(vecType->isBuiltinType());
3387 
3388  // Check if we've already instantiated a vector of this type.
3389  llvm::FoldingSetNodeID ID;
3390  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
3391 
3392  void *InsertPos = nullptr;
3393  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3394  return QualType(VTP, 0);
3395 
3396  // If the element type isn't canonical, this won't be a canonical type either,
3397  // so fill in the canonical type field.
3398  QualType Canonical;
3399  if (!vecType.isCanonical()) {
3400  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
3401 
3402  // Get the new insert position for the node we care about.
3403  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3404  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3405  }
3406  auto *New = new (*this, TypeAlignment)
3407  VectorType(vecType, NumElts, Canonical, VecKind);
3408  VectorTypes.InsertNode(New, InsertPos);
3409  Types.push_back(New);
3410  return QualType(New, 0);
3411 }
3412 
3413 QualType
3415  SourceLocation AttrLoc,
3416  VectorType::VectorKind VecKind) const {
3417  llvm::FoldingSetNodeID ID;
3418  DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
3419  VecKind);
3420  void *InsertPos = nullptr;
3421  DependentVectorType *Canon =
3422  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3423  DependentVectorType *New;
3424 
3425  if (Canon) {
3426  New = new (*this, TypeAlignment) DependentVectorType(
3427  *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
3428  } else {
3429  QualType CanonVecTy = getCanonicalType(VecType);
3430  if (CanonVecTy == VecType) {
3431  New = new (*this, TypeAlignment) DependentVectorType(
3432  *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
3433 
3434  DependentVectorType *CanonCheck =
3435  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3436  assert(!CanonCheck &&
3437  "Dependent-sized vector_size canonical type broken");
3438  (void)CanonCheck;
3439  DependentVectorTypes.InsertNode(New, InsertPos);
3440  } else {
3441  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3442  SourceLocation());
3443  New = new (*this, TypeAlignment) DependentVectorType(
3444  *this, VecType, Canon, SizeExpr, AttrLoc, VecKind);
3445  }
3446  }
3447 
3448  Types.push_back(New);
3449  return QualType(New, 0);
3450 }
3451 
3452 /// getExtVectorType - Return the unique reference to an extended vector type of
3453 /// the specified element type and size. VectorType must be a built-in type.
3454 QualType
3455 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
3456  assert(vecType->isBuiltinType() || vecType->isDependentType());
3457 
3458  // Check if we've already instantiated a vector of this type.
3459  llvm::FoldingSetNodeID ID;
3460  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
3462  void *InsertPos = nullptr;
3463  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3464  return QualType(VTP, 0);
3465 
3466  // If the element type isn't canonical, this won't be a canonical type either,
3467  // so fill in the canonical type field.
3468  QualType Canonical;
3469  if (!vecType.isCanonical()) {
3470  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
3471 
3472  // Get the new insert position for the node we care about.
3473  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3474  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3475  }
3476  auto *New = new (*this, TypeAlignment)
3477  ExtVectorType(vecType, NumElts, Canonical);
3478  VectorTypes.InsertNode(New, InsertPos);
3479  Types.push_back(New);
3480  return QualType(New, 0);
3481 }
3482 
3483 QualType
3485  Expr *SizeExpr,
3486  SourceLocation AttrLoc) const {
3487  llvm::FoldingSetNodeID ID;
3489  SizeExpr);
3490 
3491  void *InsertPos = nullptr;
3493  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3495  if (Canon) {
3496  // We already have a canonical version of this array type; use it as
3497  // the canonical type for a newly-built type.
3498  New = new (*this, TypeAlignment)
3499  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
3500  SizeExpr, AttrLoc);
3501  } else {
3502  QualType CanonVecTy = getCanonicalType(vecType);
3503  if (CanonVecTy == vecType) {
3504  New = new (*this, TypeAlignment)
3505  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
3506  AttrLoc);
3507 
3508  DependentSizedExtVectorType *CanonCheck
3509  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3510  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
3511  (void)CanonCheck;
3512  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
3513  } else {
3514  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3515  SourceLocation());
3516  New = new (*this, TypeAlignment)
3517  DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
3518  }
3519  }
3520 
3521  Types.push_back(New);
3522  return QualType(New, 0);
3523 }
3524 
3526  Expr *AddrSpaceExpr,
3527  SourceLocation AttrLoc) const {
3528  assert(AddrSpaceExpr->isInstantiationDependent());
3529 
3530  QualType canonPointeeType = getCanonicalType(PointeeType);
3531 
3532  void *insertPos = nullptr;
3533  llvm::FoldingSetNodeID ID;
3534  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
3535  AddrSpaceExpr);
3536 
3537  DependentAddressSpaceType *canonTy =
3538  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
3539 
3540  if (!canonTy) {
3541  canonTy = new (*this, TypeAlignment)
3542  DependentAddressSpaceType(*this, canonPointeeType,
3543  QualType(), AddrSpaceExpr, AttrLoc);
3544  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
3545  Types.push_back(canonTy);
3546  }
3547 
3548  if (canonPointeeType == PointeeType &&
3549  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
3550  return QualType(canonTy, 0);
3551 
3552  auto *sugaredType
3553  = new (*this, TypeAlignment)
3554  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
3555  AddrSpaceExpr, AttrLoc);
3556  Types.push_back(sugaredType);
3557  return QualType(sugaredType, 0);
3558 }
3559 
3560 /// Determine whether \p T is canonical as the result type of a function.
3562  return T.isCanonical() &&
3565 }
3566 
3567 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
3568 QualType
3570  const FunctionType::ExtInfo &Info) const {
3571  // Unique functions, to guarantee there is only one function of a particular
3572  // structure.
3573  llvm::FoldingSetNodeID ID;
3574  FunctionNoProtoType::Profile(ID, ResultTy, Info);
3575 
3576  void *InsertPos = nullptr;
3577  if (FunctionNoProtoType *FT =
3578  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
3579  return QualType(FT, 0);
3580 
3581  QualType Canonical;
3582  if (!isCanonicalResultType(ResultTy)) {
3583  Canonical =
3585 
3586  // Get the new insert position for the node we care about.
3587  FunctionNoProtoType *NewIP =
3588  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3589  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3590  }
3591 
3592  auto *New = new (*this, TypeAlignment)
3593  FunctionNoProtoType(ResultTy, Canonical, Info);
3594  Types.push_back(New);
3595  FunctionNoProtoTypes.InsertNode(New, InsertPos);
3596  return QualType(New, 0);
3597 }
3598 
3601  CanQualType CanResultType = getCanonicalType(ResultType);
3602 
3603  // Canonical result types do not have ARC lifetime qualifiers.
3604  if (CanResultType.getQualifiers().hasObjCLifetime()) {
3605  Qualifiers Qs = CanResultType.getQualifiers();
3606  Qs.removeObjCLifetime();
3608  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
3609  }
3610 
3611  return CanResultType;
3612 }
3613 
3615  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
3616  if (ESI.Type == EST_None)
3617  return true;
3618  if (!NoexceptInType)
3619  return false;
3620 
3621  // C++17 onwards: exception specification is part of the type, as a simple
3622  // boolean "can this function type throw".
3623  if (ESI.Type == EST_BasicNoexcept)
3624  return true;
3625 
3626  // A noexcept(expr) specification is (possibly) canonical if expr is
3627  // value-dependent.
3628  if (ESI.Type == EST_DependentNoexcept)
3629  return true;
3630 
3631  // A dynamic exception specification is canonical if it only contains pack
3632  // expansions (so we can't tell whether it's non-throwing) and all its
3633  // contained types are canonical.
3634  if (ESI.Type == EST_Dynamic) {
3635  bool AnyPackExpansions = false;
3636  for (QualType ET : ESI.Exceptions) {
3637  if (!ET.isCanonical())
3638  return false;
3639  if (ET->getAs<PackExpansionType>())
3640  AnyPackExpansions = true;
3641  }
3642  return AnyPackExpansions;
3643  }
3644 
3645  return false;
3646 }
3647 
3648 QualType ASTContext::getFunctionTypeInternal(
3649  QualType ResultTy, ArrayRef<QualType> ArgArray,
3650  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
3651  size_t NumArgs = ArgArray.size();
3652 
3653  // Unique functions, to guarantee there is only one function of a particular
3654  // structure.
3655  llvm::FoldingSetNodeID ID;
3656  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
3657  *this, true);
3658 
3659  QualType Canonical;
3660  bool Unique = false;
3661 
3662  void *InsertPos = nullptr;
3663  if (FunctionProtoType *FPT =
3664  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
3665  QualType Existing = QualType(FPT, 0);
3666 
3667  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
3668  // it so long as our exception specification doesn't contain a dependent
3669  // noexcept expression, or we're just looking for a canonical type.
3670  // Otherwise, we're going to need to create a type
3671  // sugar node to hold the concrete expression.
3672  if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
3673  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
3674  return Existing;
3675 
3676  // We need a new type sugar node for this one, to hold the new noexcept
3677  // expression. We do no canonicalization here, but that's OK since we don't
3678  // expect to see the same noexcept expression much more than once.
3679  Canonical = getCanonicalType(Existing);
3680  Unique = true;
3681  }
3682 
3683  bool NoexceptInType = getLangOpts().CPlusPlus17;
3684  bool IsCanonicalExceptionSpec =
3685  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
3686 
3687  // Determine whether the type being created is already canonical or not.
3688  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
3689  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
3690  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
3691  if (!ArgArray[i].isCanonicalAsParam())
3692  isCanonical = false;
3693 
3694  if (OnlyWantCanonical)
3695  assert(isCanonical &&
3696  "given non-canonical parameters constructing canonical type");
3697 
3698  // If this type isn't canonical, get the canonical version of it if we don't
3699  // already have it. The exception spec is only partially part of the
3700  // canonical type, and only in C++17 onwards.
3701  if (!isCanonical && Canonical.isNull()) {
3702  SmallVector<QualType, 16> CanonicalArgs;
3703  CanonicalArgs.reserve(NumArgs);
3704  for (unsigned i = 0; i != NumArgs; ++i)
3705  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
3706 
3707  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
3708  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
3709  CanonicalEPI.HasTrailingReturn = false;
3710 
3711  if (IsCanonicalExceptionSpec) {
3712  // Exception spec is already OK.
3713  } else if (NoexceptInType) {
3714  switch (EPI.ExceptionSpec.Type) {
3716  // We don't know yet. It shouldn't matter what we pick here; no-one
3717  // should ever look at this.
3718  LLVM_FALLTHROUGH;
3719  case EST_None: case EST_MSAny: case EST_NoexceptFalse:
3720  CanonicalEPI.ExceptionSpec.Type = EST_None;
3721  break;
3722 
3723  // A dynamic exception specification is almost always "not noexcept",
3724  // with the exception that a pack expansion might expand to no types.
3725  case EST_Dynamic: {
3726  bool AnyPacks = false;
3727  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
3728  if (ET->getAs<PackExpansionType>())
3729  AnyPacks = true;
3730  ExceptionTypeStorage.push_back(getCanonicalType(ET));
3731  }
3732  if (!AnyPacks)
3733  CanonicalEPI.ExceptionSpec.Type = EST_None;
3734  else {
3735  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
3736  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
3737  }
3738  break;
3739  }
3740 
3742  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
3743  break;
3744 
3745  case EST_DependentNoexcept:
3746  llvm_unreachable("dependent noexcept is already canonical");
3747  }
3748  } else {
3750  }
3751 
3752  // Adjust the canonical function result type.
3753  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
3754  Canonical =
3755  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
3756 
3757  // Get the new insert position for the node we care about.
3758  FunctionProtoType *NewIP =
3759  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3760  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3761  }
3762 
3763  // Compute the needed size to hold this FunctionProtoType and the
3764  // various trailing objects.
3765  auto ESH = FunctionProtoType::getExceptionSpecSize(
3766  EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
3767  size_t Size = FunctionProtoType::totalSizeToAlloc<
3770  FunctionProtoType::ExtParameterInfo, Qualifiers>(
3771  NumArgs, FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
3772  ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
3773  EPI.ExtParameterInfos ? NumArgs : 0,
3774  EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
3775 
3776  auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
3777  FunctionProtoType::ExtProtoInfo newEPI = EPI;
3778  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
3779  Types.push_back(FTP);
3780  if (!Unique)
3781  FunctionProtoTypes.InsertNode(FTP, InsertPos);
3782  return QualType(FTP, 0);
3783 }
3784 
3785 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
3786  llvm::FoldingSetNodeID ID;
3787  PipeType::Profile(ID, T, ReadOnly);
3788 
3789  void *InsertPos = nullptr;
3790  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
3791  return QualType(PT, 0);
3792 
3793  // If the pipe element type isn't canonical, this won't be a canonical type
3794  // either, so fill in the canonical type field.
3795  QualType Canonical;
3796  if (!T.isCanonical()) {
3797  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
3798 
3799  // Get the new insert position for the node we care about.
3800  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
3801  assert(!NewIP && "Shouldn't be in the map!");
3802  (void)NewIP;
3803  }
3804  auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
3805  Types.push_back(New);
3806  PipeTypes.InsertNode(New, InsertPos);
3807  return QualType(New, 0);
3808 }
3809 
3811  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
3812  return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
3813  : Ty;
3814 }
3815 
3817  return getPipeType(T, true);
3818 }
3819 
3821  return getPipeType(T, false);
3822 }
3823 
3824 #ifndef NDEBUG
3826  if (!isa<CXXRecordDecl>(D)) return false;
3827  const auto *RD = cast<CXXRecordDecl>(D);
3828  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
3829  return true;
3830  if (RD->getDescribedClassTemplate() &&
3831  !isa<ClassTemplateSpecializationDecl>(RD))
3832  return true;
3833  return false;
3834 }
3835 #endif
3836 
3837 /// getInjectedClassNameType - Return the unique reference to the
3838 /// injected class name type for the specified templated declaration.
3840  QualType TST) const {
3841  assert(NeedsInjectedClassNameType(Decl));
3842  if (Decl->TypeForDecl) {
3843  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3844  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
3845  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
3846  Decl->TypeForDecl = PrevDecl->TypeForDecl;
3847  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3848  } else {
3849  Type *newType =
3850  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
3851  Decl->TypeForDecl = newType;
3852  Types.push_back(newType);
3853  }
3854  return QualType(Decl->TypeForDecl, 0);
3855 }
3856 
3857 /// getTypeDeclType - Return the unique reference to the type for the
3858 /// specified type declaration.
3859 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
3860  assert(Decl && "Passed null for Decl param");
3861  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
3862 
3863  if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
3864  return getTypedefType(Typedef);
3865 
3866  assert(!isa<TemplateTypeParmDecl>(Decl) &&
3867  "Template type parameter types are always available.");
3868 
3869  if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
3870  assert(Record->isFirstDecl() && "struct/union has previous declaration");
3871  assert(!NeedsInjectedClassNameType(Record));
3872  return getRecordType(Record);
3873  } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
3874  assert(Enum->isFirstDecl() && "enum has previous declaration");
3875  return getEnumType(Enum);
3876  } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
3877  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
3878  Decl->TypeForDecl = newType;
3879  Types.push_back(newType);
3880  } else
3881  llvm_unreachable("TypeDecl without a type?");
3882 
3883  return QualType(Decl->TypeForDecl, 0);
3884 }
3885 
3886 /// getTypedefType - Return the unique reference to the type for the
3887 /// specified typedef name decl.
3888 QualType
3890  QualType Canonical) const {
3891  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3892 
3893  if (Canonical.isNull())
3894  Canonical = getCanonicalType(Decl->getUnderlyingType());
3895  auto *newType = new (*this, TypeAlignment)
3896  TypedefType(Type::Typedef, Decl, Canonical);
3897  Decl->TypeForDecl = newType;
3898  Types.push_back(newType);
3899  return QualType(newType, 0);
3900 }
3901 
3903  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3904 
3905  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
3906  if (PrevDecl->TypeForDecl)
3907  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3908 
3909  auto *newType = new (*this, TypeAlignment) RecordType(Decl);
3910  Decl->TypeForDecl = newType;
3911  Types.push_back(newType);
3912  return QualType(newType, 0);
3913 }
3914 
3916  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3917 
3918  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
3919  if (PrevDecl->TypeForDecl)
3920  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3921 
3922  auto *newType = new (*this, TypeAlignment) EnumType(Decl);
3923  Decl->TypeForDecl = newType;
3924  Types.push_back(newType);
3925  return QualType(newType, 0);
3926 }
3927 
3929  QualType modifiedType,
3930  QualType equivalentType) {
3931  llvm::FoldingSetNodeID id;
3932  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
3933 
3934  void *insertPos = nullptr;
3935  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
3936  if (type) return QualType(type, 0);
3937 
3938  QualType canon = getCanonicalType(equivalentType);
3939  type = new (*this, TypeAlignment)
3940  AttributedType(canon, attrKind, modifiedType, equivalentType);
3941 
3942  Types.push_back(type);
3943  AttributedTypes.InsertNode(type, insertPos);
3944 
3945  return QualType(type, 0);
3946 }
3947 
3948 /// Retrieve a substitution-result type.
3949 QualType
3951  QualType Replacement) const {
3952  assert(Replacement.isCanonical()
3953  && "replacement types must always be canonical");
3954 
3955  llvm::FoldingSetNodeID ID;
3956  SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
3957  void *InsertPos = nullptr;
3958  SubstTemplateTypeParmType *SubstParm
3959  = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3960 
3961  if (!SubstParm) {
3962  SubstParm = new (*this, TypeAlignment)
3963  SubstTemplateTypeParmType(Parm, Replacement);
3964  Types.push_back(SubstParm);
3965  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
3966  }
3967 
3968  return QualType(SubstParm, 0);
3969 }
3970 
3971 /// Retrieve a
3973  const TemplateTypeParmType *Parm,
3974  const TemplateArgument &ArgPack) {
3975 #ifndef NDEBUG
3976  for (const auto &P : ArgPack.pack_elements()) {
3977  assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
3978  assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
3979  }
3980 #endif
3981 
3982  llvm::FoldingSetNodeID ID;
3983  SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
3984  void *InsertPos = nullptr;
3985  if (SubstTemplateTypeParmPackType *SubstParm
3986  = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
3987  return QualType(SubstParm, 0);
3988 
3989  QualType Canon;
3990  if (!Parm->isCanonicalUnqualified()) {
3991  Canon = getCanonicalType(QualType(Parm, 0));
3992  Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
3993  ArgPack);
3994  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
3995  }
3996 
3997  auto *SubstParm
3998  = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
3999  ArgPack);
4000  Types.push_back(SubstParm);
4001  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
4002  return QualType(SubstParm, 0);
4003 }
4004 
4005 /// Retrieve the template type parameter type for a template
4006 /// parameter or parameter pack with the given depth, index, and (optionally)
4007 /// name.
4009  bool ParameterPack,
4010  TemplateTypeParmDecl *TTPDecl) const {
4011  llvm::FoldingSetNodeID ID;
4012  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4013  void *InsertPos = nullptr;
4014  TemplateTypeParmType *TypeParm
4015  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4016 
4017  if (TypeParm)
4018  return QualType(TypeParm, 0);
4019 
4020  if (TTPDecl) {
4021  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4022  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
4023 
4024  TemplateTypeParmType *TypeCheck
4025  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4026  assert(!TypeCheck && "Template type parameter canonical type broken");
4027  (void)TypeCheck;
4028  } else
4029  TypeParm = new (*this, TypeAlignment)
4030  TemplateTypeParmType(Depth, Index, ParameterPack);
4031 
4032  Types.push_back(TypeParm);
4033  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
4034 
4035  return QualType(TypeParm, 0);
4036 }
4037 
4040  SourceLocation NameLoc,
4041  const TemplateArgumentListInfo &Args,
4042  QualType Underlying) const {
4043  assert(!Name.getAsDependentTemplateName() &&
4044  "No dependent template names here!");
4045  QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
4046 
4051  TL.setTemplateNameLoc(NameLoc);
4052  TL.setLAngleLoc(Args.getLAngleLoc());
4053  TL.setRAngleLoc(Args.getRAngleLoc());
4054  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4055  TL.setArgLocInfo(i, Args[i].getLocInfo());
4056  return DI;
4057 }
4058 
4059 QualType
4061  const TemplateArgumentListInfo &Args,
4062  QualType Underlying) const {
4063  assert(!Template.getAsDependentTemplateName() &&
4064  "No dependent template names here!");
4065 
4067  ArgVec.reserve(Args.size());
4068  for (const TemplateArgumentLoc &Arg : Args.arguments())
4069  ArgVec.push_back(Arg.getArgument());
4070 
4071  return getTemplateSpecializationType(Template, ArgVec, Underlying);
4072 }
4073 
4074 #ifndef NDEBUG
4076  for (const TemplateArgument &Arg : Args)
4077  if (Arg.isPackExpansion())
4078  return true;
4079 
4080  return true;
4081 }
4082 #endif
4083 
4084 QualType
4087  QualType Underlying) const {
4088  assert(!Template.getAsDependentTemplateName() &&
4089  "No dependent template names here!");
4090  // Look through qualified template names.
4091  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4092  Template = TemplateName(QTN->getTemplateDecl());
4093 
4094  bool IsTypeAlias =
4095  Template.getAsTemplateDecl() &&
4096  isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
4097  QualType CanonType;
4098  if (!Underlying.isNull())
4099  CanonType = getCanonicalType(Underlying);
4100  else {
4101  // We can get here with an alias template when the specialization contains
4102  // a pack expansion that does not match up with a parameter pack.
4103  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
4104  "Caller must compute aliased type");
4105  IsTypeAlias = false;
4106  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
4107  }
4108 
4109  // Allocate the (non-canonical) template specialization type, but don't
4110  // try to unique it: these types typically have location information that
4111  // we don't unique and don't want to lose.
4112  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
4113  sizeof(TemplateArgument) * Args.size() +
4114  (IsTypeAlias? sizeof(QualType) : 0),
4115  TypeAlignment);
4116  auto *Spec
4117  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4118  IsTypeAlias ? Underlying : QualType());
4119 
4120  Types.push_back(Spec);
4121  return QualType(Spec, 0);
4122 }
4123 
4125  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4126  assert(!Template.getAsDependentTemplateName() &&
4127  "No dependent template names here!");
4128 
4129  // Look through qualified template names.
4130  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4131  Template = TemplateName(QTN->getTemplateDecl());
4132 
4133  // Build the canonical template specialization type.
4134  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4136  unsigned NumArgs = Args.size();
4137  CanonArgs.reserve(NumArgs);
4138  for (const TemplateArgument &Arg : Args)
4139  CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
4140 
4141  // Determine whether this canonical template specialization type already
4142  // exists.
4143  llvm::FoldingSetNodeID ID;
4144  TemplateSpecializationType::Profile(ID, CanonTemplate,
4145  CanonArgs, *this);
4146 
4147  void *InsertPos = nullptr;
4149  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4150 
4151  if (!Spec) {
4152  // Allocate a new canonical template specialization type.
4153  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
4154  sizeof(TemplateArgument) * NumArgs),
4155  TypeAlignment);
4156  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
4157  CanonArgs,
4158  QualType(), QualType());
4159  Types.push_back(Spec);
4160  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
4161  }
4162 
4163  assert(Spec->isDependentType() &&
4164  "Non-dependent template-id type must have a canonical type");
4165  return QualType(Spec, 0);
4166 }
4167 
4169  NestedNameSpecifier *NNS,
4170  QualType NamedType,
4171  TagDecl *OwnedTagDecl) const {
4172  llvm::FoldingSetNodeID ID;
4173  ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
4174 
4175  void *InsertPos = nullptr;
4176  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4177  if (T)
4178  return QualType(T, 0);
4179 
4180  QualType Canon = NamedType;
4181  if (!Canon.isCanonical()) {
4182  Canon = getCanonicalType(NamedType);
4183  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4184  assert(!CheckT && "Elaborated canonical type broken");
4185  (void)CheckT;
4186  }
4187 
4188  void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
4189  TypeAlignment);
4190  T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
4191 
4192  Types.push_back(T);
4193  ElaboratedTypes.InsertNode(T, InsertPos);
4194  return QualType(T, 0);
4195 }
4196 
4197 QualType
4199  llvm::FoldingSetNodeID ID;
4200  ParenType::Profile(ID, InnerType);
4201 
4202  void *InsertPos = nullptr;
4203  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4204  if (T)
4205  return QualType(T, 0);
4206 
4207  QualType Canon = InnerType;
4208  if (!Canon.isCanonical()) {
4209  Canon = getCanonicalType(InnerType);
4210  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4211  assert(!CheckT && "Paren canonical type broken");
4212  (void)CheckT;
4213  }
4214 
4215  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
4216  Types.push_back(T);
4217  ParenTypes.InsertNode(T, InsertPos);
4218  return QualType(T, 0);
4219 }
4220 
4222  NestedNameSpecifier *NNS,
4223  const IdentifierInfo *Name,
4224  QualType Canon) const {
4225  if (Canon.isNull()) {
4227  if (CanonNNS != NNS)
4228  Canon = getDependentNameType(Keyword, CanonNNS, Name);
4229  }
4230 
4231  llvm::FoldingSetNodeID ID;
4232  DependentNameType::Profile(ID, Keyword, NNS, Name);
4233 
4234  void *InsertPos = nullptr;
4236  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
4237  if (T)
4238  return QualType(T, 0);
4239 
4240  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
4241  Types.push_back(T);
4242  DependentNameTypes.InsertNode(T, InsertPos);
4243  return QualType(T, 0);
4244 }
4245 
4246 QualType
4248  ElaboratedTypeKeyword Keyword,
4249  NestedNameSpecifier *NNS,
4250  const IdentifierInfo *Name,
4251  const TemplateArgumentListInfo &Args) const {
4252  // TODO: avoid this copy
4254  for (unsigned I = 0, E = Args.size(); I != E; ++I)
4255  ArgCopy.push_back(Args[I].getArgument());
4256  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
4257 }
4258 
4259 QualType
4261  ElaboratedTypeKeyword Keyword,
4262  NestedNameSpecifier *NNS,
4263  const IdentifierInfo *Name,
4264  ArrayRef<TemplateArgument> Args) const {
4265  assert((!NNS || NNS->isDependent()) &&
4266  "nested-name-specifier must be dependent");
4267 
4268  llvm::FoldingSetNodeID ID;
4269  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
4270  Name, Args);
4271 
4272  void *InsertPos = nullptr;
4274  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4275  if (T)
4276  return QualType(T, 0);
4277 
4279 
4280  ElaboratedTypeKeyword CanonKeyword = Keyword;
4281  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
4282 
4283  bool AnyNonCanonArgs = false;
4284  unsigned NumArgs = Args.size();
4285  SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
4286  for (unsigned I = 0; I != NumArgs; ++I) {
4287  CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
4288  if (!CanonArgs[I].structurallyEquals(Args[I]))
4289  AnyNonCanonArgs = true;
4290  }
4291 
4292  QualType Canon;
4293  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
4294  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
4295  Name,
4296  CanonArgs);
4297 
4298  // Find the insert position again.
4299  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4300  }
4301 
4302  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
4303  sizeof(TemplateArgument) * NumArgs),
4304  TypeAlignment);
4305  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
4306  Name, Args, Canon);
4307  Types.push_back(T);
4308  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
4309  return QualType(T, 0);
4310 }
4311 
4313  TemplateArgument Arg;
4314  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
4315  QualType ArgType = getTypeDeclType(TTP);
4316  if (TTP->isParameterPack())
4317  ArgType = getPackExpansionType(ArgType, None);
4318 
4319  Arg = TemplateArgument(ArgType);
4320  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
4321  Expr *E = new (*this) DeclRefExpr(
4322  *this, NTTP, /*enclosing*/ false,
4323  NTTP->getType().getNonLValueExprType(*this),
4324  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
4325 
4326  if (NTTP->isParameterPack())
4327  E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
4328  None);
4329  Arg = TemplateArgument(E);
4330  } else {
4331  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
4332  if (TTP->isParameterPack())
4334  else
4335  Arg = TemplateArgument(TemplateName(TTP));
4336  }
4337 
4338  if (Param->isTemplateParameterPack())
4339  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
4340 
4341  return Arg;
4342 }
4343 
4344 void
4347  Args.reserve(Args.size() + Params->size());
4348 
4349  for (NamedDecl *Param : *Params)
4350  Args.push_back(getInjectedTemplateArg(Param));
4351 }
4352 
4354  Optional<unsigned> NumExpansions) {
4355  llvm::FoldingSetNodeID ID;
4356  PackExpansionType::Profile(ID, Pattern, NumExpansions);
4357 
4358  assert(Pattern->containsUnexpandedParameterPack() &&
4359  "Pack expansions must expand one or more parameter packs");
4360  void *InsertPos = nullptr;
4362  = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4363  if (T)
4364  return QualType(T, 0);
4365 
4366  QualType Canon;
4367  if (!Pattern.isCanonical()) {
4368  Canon = getCanonicalType(Pattern);
4369  // The canonical type might not contain an unexpanded parameter pack, if it
4370  // contains an alias template specialization which ignores one of its
4371  // parameters.
4372  if (Canon->containsUnexpandedParameterPack()) {
4373  Canon = getPackExpansionType(Canon, NumExpansions);
4374 
4375  // Find the insert position again, in case we inserted an element into
4376  // PackExpansionTypes and invalidated our insert position.
4377  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4378  }
4379  }
4380 
4381  T = new (*this, TypeAlignment)
4382  PackExpansionType(Pattern, Canon, NumExpansions);
4383  Types.push_back(T);
4384  PackExpansionTypes.InsertNode(T, InsertPos);
4385  return QualType(T, 0);
4386 }
4387 
4388 /// CmpProtocolNames - Comparison predicate for sorting protocols
4389 /// alphabetically.
4390 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
4391  ObjCProtocolDecl *const *RHS) {
4392  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
4393 }
4394 
4396  if (Protocols.empty()) return true;
4397 
4398  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
4399  return false;
4400 
4401  for (unsigned i = 1; i != Protocols.size(); ++i)
4402  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
4403  Protocols[i]->getCanonicalDecl() != Protocols[i])
4404  return false;
4405  return true;
4406 }
4407 
4408 static void
4410  // Sort protocols, keyed by name.
4411  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
4412 
4413  // Canonicalize.
4414  for (ObjCProtocolDecl *&P : Protocols)
4415  P = P->getCanonicalDecl();
4416 
4417  // Remove duplicates.
4418  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
4419  Protocols.erase(ProtocolsEnd, Protocols.end());
4420 }
4421 
4423  ObjCProtocolDecl * const *Protocols,
4424  unsigned NumProtocols) const {
4425  return getObjCObjectType(BaseType, {},
4426  llvm::makeArrayRef(Protocols, NumProtocols),
4427  /*isKindOf=*/false);
4428 }
4429 
4431  QualType baseType,
4432  ArrayRef<QualType> typeArgs,
4433  ArrayRef<ObjCProtocolDecl *> protocols,
4434  bool isKindOf) const {
4435  // If the base type is an interface and there aren't any protocols or
4436  // type arguments to add, then the interface type will do just fine.
4437  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
4438  isa<ObjCInterfaceType>(baseType))
4439  return baseType;
4440 
4441  // Look in the folding set for an existing type.
4442  llvm::FoldingSetNodeID ID;
4443  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
4444  void *InsertPos = nullptr;
4445  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
4446  return QualType(QT, 0);
4447 
4448  // Determine the type arguments to be used for canonicalization,
4449  // which may be explicitly specified here or written on the base
4450  // type.
4451  ArrayRef<QualType> effectiveTypeArgs = typeArgs;
4452  if (effectiveTypeArgs.empty()) {
4453  if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
4454  effectiveTypeArgs = baseObject->getTypeArgs();
4455  }
4456 
4457  // Build the canonical type, which has the canonical base type and a
4458  // sorted-and-uniqued list of protocols and the type arguments
4459  // canonicalized.
4460  QualType canonical;
4461  bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
4462  effectiveTypeArgs.end(),
4463  [&](QualType type) {
4464  return type.isCanonical();
4465  });
4466  bool protocolsSorted = areSortedAndUniqued(protocols);
4467  if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
4468  // Determine the canonical type arguments.
4469  ArrayRef<QualType> canonTypeArgs;
4470  SmallVector<QualType, 4> canonTypeArgsVec;
4471  if (!typeArgsAreCanonical) {
4472  canonTypeArgsVec.reserve(effectiveTypeArgs.size());
4473  for (auto typeArg : effectiveTypeArgs)
4474  canonTypeArgsVec.push_back(getCanonicalType(typeArg));
4475  canonTypeArgs = canonTypeArgsVec;
4476  } else {
4477  canonTypeArgs = effectiveTypeArgs;
4478  }
4479 
4480  ArrayRef<ObjCProtocolDecl *> canonProtocols;
4481  SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
4482  if (!protocolsSorted) {
4483  canonProtocolsVec.append(protocols.begin(), protocols.end());
4484  SortAndUniqueProtocols(canonProtocolsVec);
4485  canonProtocols = canonProtocolsVec;
4486  } else {
4487  canonProtocols = protocols;
4488  }
4489 
4490  canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
4491  canonProtocols, isKindOf);
4492 
4493  // Regenerate InsertPos.
4494  ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
4495  }
4496 
4497  unsigned size = sizeof(ObjCObjectTypeImpl);
4498  size += typeArgs.size() * sizeof(QualType);
4499  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4500  void *mem = Allocate(size, TypeAlignment);
4501  auto *T =
4502  new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
4503  isKindOf);
4504 
4505  Types.push_back(T);
4506  ObjCObjectTypes.InsertNode(T, InsertPos);
4507  return QualType(T, 0);
4508 }
4509 
4510 /// Apply Objective-C protocol qualifiers to the given type.
4511 /// If this is for the canonical type of a type parameter, we can apply
4512 /// protocol qualifiers on the ObjCObjectPointerType.
4513 QualType
4515  ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
4516  bool allowOnPointerType) const {
4517  hasError = false;
4518 
4519  if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
4520  return getObjCTypeParamType(objT->getDecl(), protocols);
4521  }
4522 
4523  // Apply protocol qualifiers to ObjCObjectPointerType.
4524  if (allowOnPointerType) {
4525  if (const auto *objPtr =
4526  dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
4527  const ObjCObjectType *objT = objPtr->getObjectType();
4528  // Merge protocol lists and construct ObjCObjectType.
4529  SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
4530  protocolsVec.append(objT->qual_begin(),
4531  objT->qual_end());
4532  protocolsVec.append(protocols.begin(), protocols.end());
4533  ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
4534  type = getObjCObjectType(
4535  objT->getBaseType(),
4536  objT->getTypeArgsAsWritten(),
4537  protocols,
4538  objT->isKindOfTypeAsWritten());
4539  return getObjCObjectPointerType(type);
4540  }
4541  }
4542 
4543  // Apply protocol qualifiers to ObjCObjectType.
4544  if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
4545  // FIXME: Check for protocols to which the class type is already
4546  // known to conform.
4547 
4548  return getObjCObjectType(objT->getBaseType(),
4549  objT->getTypeArgsAsWritten(),
4550  protocols,
4551  objT->isKindOfTypeAsWritten());
4552  }
4553 
4554  // If the canonical type is ObjCObjectType, ...
4555  if (type->isObjCObjectType()) {
4556  // Silently overwrite any existing protocol qualifiers.
4557  // TODO: determine whether that's the right thing to do.
4558 
4559  // FIXME: Check for protocols to which the class type is already
4560  // known to conform.
4561  return getObjCObjectType(type, {}, protocols, false);
4562  }
4563 
4564  // id<protocol-list>
4565  if (type->isObjCIdType()) {
4566  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4567  type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
4568  objPtr->isKindOfType());
4569  return getObjCObjectPointerType(type);
4570  }
4571 
4572  // Class<protocol-list>
4573  if (type->isObjCClassType()) {
4574  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4575  type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
4576  objPtr->isKindOfType());
4577  return getObjCObjectPointerType(type);
4578  }
4579 
4580  hasError = true;
4581  return type;
4582 }
4583 
4584 QualType
4586  ArrayRef<ObjCProtocolDecl *> protocols,
4587  QualType Canonical) const {
4588  // Look in the folding set for an existing type.
4589  llvm::FoldingSetNodeID ID;
4590  ObjCTypeParamType::Profile(ID, Decl, protocols);
4591  void *InsertPos = nullptr;
4592  if (ObjCTypeParamType *TypeParam =
4593  ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
4594  return QualType(TypeParam, 0);
4595 
4596  if (Canonical.isNull()) {
4597  // We canonicalize to the underlying type.
4598  Canonical = getCanonicalType(Decl->getUnderlyingType());
4599  if (!protocols.empty()) {
4600  // Apply the protocol qualifers.
4601  bool hasError;
4603  Canonical, protocols, hasError, true /*allowOnPointerType*/));
4604  assert(!hasError && "Error when apply protocol qualifier to bound type");
4605  }
4606  }
4607 
4608  unsigned size = sizeof(ObjCTypeParamType);
4609  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4610  void *mem = Allocate(size, TypeAlignment);
4611  auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
4612 
4613  Types.push_back(newType);
4614  ObjCTypeParamTypes.InsertNode(newType, InsertPos);
4615  return QualType(newType, 0);
4616 }
4617 
4618 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
4619 /// protocol list adopt all protocols in QT's qualified-id protocol
4620 /// list.
4622  ObjCInterfaceDecl *IC) {
4623  if (!QT->isObjCQualifiedIdType())
4624  return false;
4625 
4626  if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
4627  // If both the right and left sides have qualifiers.
4628  for (auto *Proto : OPT->quals()) {
4629  if (!IC->ClassImplementsProtocol(Proto, false))
4630  return false;
4631  }
4632  return true;
4633  }
4634  return false;
4635 }
4636 
4637 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
4638 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
4639 /// of protocols.
4641  ObjCInterfaceDecl *IDecl) {
4642  if (!QT->isObjCQualifiedIdType())
4643  return false;
4644  const auto *OPT = QT->getAs<ObjCObjectPointerType>();
4645  if (!OPT)
4646  return false;
4647  if (!IDecl->hasDefinition())
4648  return false;
4649  llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
4650  CollectInheritedProtocols(IDecl, InheritedProtocols);
4651  if (InheritedProtocols.empty())
4652  return false;
4653  // Check that if every protocol in list of id<plist> conforms to a protocol
4654  // of IDecl's, then bridge casting is ok.
4655  bool Conforms = false;
4656  for (auto *Proto : OPT->quals()) {
4657  Conforms = false;
4658  for (auto *PI : InheritedProtocols) {
4659  if (ProtocolCompatibleWithProtocol(Proto, PI)) {
4660  Conforms = true;
4661  break;
4662  }
4663  }
4664  if (!Conforms)
4665  break;
4666  }
4667  if (Conforms)
4668  return true;
4669 
4670  for (auto *PI : InheritedProtocols) {
4671  // If both the right and left sides have qualifiers.
4672  bool Adopts = false;
4673  for (auto *Proto : OPT->quals()) {
4674  // return 'true' if 'PI' is in the inheritance hierarchy of Proto
4675  if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
4676  break;
4677  }
4678  if (!Adopts)
4679  return false;
4680  }
4681  return true;
4682 }
4683 
4684 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
4685 /// the given object type.
4687  llvm::FoldingSetNodeID ID;
4688  ObjCObjectPointerType::Profile(ID, ObjectT);
4689 
4690  void *InsertPos = nullptr;
4691  if (ObjCObjectPointerType *QT =
4692  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4693  return QualType(QT, 0);
4694 
4695  // Find the canonical object type.
4696  QualType Canonical;
4697  if (!ObjectT.isCanonical()) {
4698  Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
4699 
4700  // Regenerate InsertPos.
4701  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4702  }
4703 
4704  // No match.
4705  void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
4706  auto *QType =
4707  new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
4708 
4709  Types.push_back(QType);
4710  ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
4711  return QualType(QType, 0);
4712 }
4713 
4714 /// getObjCInterfaceType - Return the unique reference to the type for the
4715 /// specified ObjC interface decl. The list of protocols is optional.
4717  ObjCInterfaceDecl *PrevDecl) const {
4718  if (Decl->TypeForDecl)
4719  return QualType(Decl->TypeForDecl, 0);
4720 
4721  if (PrevDecl) {
4722  assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
4723  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4724  return QualType(PrevDecl->TypeForDecl, 0);
4725  }
4726 
4727  // Prefer the definition, if there is one.
4728  if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
4729  Decl = Def;
4730 
4731  void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
4732  auto *T = new (Mem) ObjCInterfaceType(Decl);
4733  Decl->TypeForDecl = T;
4734  Types.push_back(T);
4735  return QualType(T, 0);
4736 }
4737 
4738 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
4739 /// TypeOfExprType AST's (since expression's are never shared). For example,
4740 /// multiple declarations that refer to "typeof(x)" all contain different
4741 /// DeclRefExpr's. This doesn't effect the type checker, since it operates
4742 /// on canonical type's (which are always unique).
4744  TypeOfExprType *toe;
4745  if (tofExpr->isTypeDependent()) {
4746  llvm::FoldingSetNodeID ID;
4747  DependentTypeOfExprType::Profile(ID, *this, tofExpr);
4748 
4749  void *InsertPos = nullptr;
4751  = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
4752  if (Canon) {
4753  // We already have a "canonical" version of an identical, dependent
4754  // typeof(expr) type. Use that as our canonical type.
4755  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
4756  QualType((TypeOfExprType*)Canon, 0));
4757  } else {
4758  // Build a new, canonical typeof(expr) type.
4759  Canon
4760  = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
4761  DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
4762  toe = Canon;
4763  }
4764  } else {
4765  QualType Canonical = getCanonicalType(tofExpr->getType());
4766  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
4767  }
4768  Types.push_back(toe);
4769  return QualType(toe, 0);
4770 }
4771 
4772 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
4773 /// TypeOfType nodes. The only motivation to unique these nodes would be
4774 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
4775 /// an issue. This doesn't affect the type checker, since it operates
4776 /// on canonical types (which are always unique).
4778  QualType Canonical = getCanonicalType(tofType);
4779  auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
4780  Types.push_back(tot);
4781  return QualType(tot, 0);
4782 }
4783 
4784 /// Unlike many "get<Type>" functions, we don't unique DecltypeType
4785 /// nodes. This would never be helpful, since each such type has its own
4786 /// expression, and would not give a significant memory saving, since there
4787 /// is an Expr tree under each such type.
4789  DecltypeType *dt;
4790 
4791  // C++11 [temp.type]p2:
4792  // If an expression e involves a template parameter, decltype(e) denotes a
4793  // unique dependent type. Two such decltype-specifiers refer to the same
4794  // type only if their expressions are equivalent (14.5.6.1).
4795  if (e->isInstantiationDependent()) {
4796  llvm::FoldingSetNodeID ID;
4797  DependentDecltypeType::Profile(ID, *this, e);
4798 
4799  void *InsertPos = nullptr;
4800  DependentDecltypeType *Canon
4801  = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
4802  if (!Canon) {
4803  // Build a new, canonical decltype(expr) type.
4804  Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
4805  DependentDecltypeTypes.InsertNode(Canon, InsertPos);
4806  }
4807  dt = new (*this, TypeAlignment)
4808  DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
4809  } else {
4810  dt = new (*this, TypeAlignment)
4811  DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
4812  }
4813  Types.push_back(dt);
4814  return QualType(dt, 0);
4815 }
4816 
4817 /// getUnaryTransformationType - We don't unique these, since the memory
4818 /// savings are minimal and these are rare.
4820  QualType UnderlyingType,
4822  const {
4823  UnaryTransformType *ut = nullptr;
4824 
4825  if (BaseType->isDependentType()) {
4826  // Look in the folding set for an existing type.
4827  llvm::FoldingSetNodeID ID;
4829 
4830  void *InsertPos = nullptr;
4832  = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
4833 
4834  if (!Canon) {
4835  // Build a new, canonical __underlying_type(type) type.
4836  Canon = new (*this, TypeAlignment)
4838  Kind);
4839  DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
4840  }
4841  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4842  QualType(), Kind,
4843  QualType(Canon, 0));
4844  } else {
4845  QualType CanonType = getCanonicalType(UnderlyingType);
4846  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4847  UnderlyingType, Kind,
4848  CanonType);
4849  }
4850  Types.push_back(ut);
4851  return QualType(ut, 0);
4852 }
4853 
4854 /// getAutoType - Return the uniqued reference to the 'auto' type which has been
4855 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the
4856 /// canonical deduced-but-dependent 'auto' type.
4858  bool IsDependent) const {
4859  if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
4860  return getAutoDeductType();
4861 
4862  // Look in the folding set for an existing type.
4863  void *InsertPos = nullptr;
4864  llvm::FoldingSetNodeID ID;
4865  AutoType::Profile(ID, DeducedType, Keyword, IsDependent);
4866  if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
4867  return QualType(AT, 0);
4868 
4869  auto *AT = new (*this, TypeAlignment)
4870  AutoType(DeducedType, Keyword, IsDependent);
4871  Types.push_back(AT);
4872  if (InsertPos)
4873  AutoTypes.InsertNode(AT, InsertPos);
4874  return QualType(AT, 0);
4875 }
4876 
4877 /// Return the uniqued reference to the deduced template specialization type
4878 /// which has been deduced to the given type, or to the canonical undeduced
4879 /// such type, or the canonical deduced-but-dependent such type.
4881  TemplateName Template, QualType DeducedType, bool IsDependent) const {
4882  // Look in the folding set for an existing type.
4883  void *InsertPos = nullptr;
4884  llvm::FoldingSetNodeID ID;
4885  DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
4886  IsDependent);
4888  DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
4889  return QualType(DTST, 0);
4890 
4891  auto *DTST = new (*this, TypeAlignment)
4892  DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
4893  Types.push_back(DTST);
4894  if (InsertPos)
4895  DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
4896  return QualType(DTST, 0);
4897 }
4898 
4899 /// getAtomicType - Return the uniqued reference to the atomic type for
4900 /// the given value type.
4902  // Unique pointers, to guarantee there is only one pointer of a particular
4903  // structure.
4904  llvm::FoldingSetNodeID ID;
4905  AtomicType::Profile(ID, T);
4906 
4907  void *InsertPos = nullptr;
4908  if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
4909  return QualType(AT, 0);
4910 
4911  // If the atomic value type isn't canonical, this won't be a canonical type
4912  // either, so fill in the canonical type field.
4913  QualType Canonical;
4914  if (!T.isCanonical()) {
4915  Canonical = getAtomicType(getCanonicalType(T));
4916 
4917  // Get the new insert position for the node we care about.
4918  AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
4919  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4920  }
4921  auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
4922  Types.push_back(New);
4923  AtomicTypes.InsertNode(New, InsertPos);
4924  return QualType(New, 0);
4925 }
4926 
4927 /// getAutoDeductType - Get type pattern for deducing against 'auto'.
4929  if (AutoDeductTy.isNull())
4932  /*dependent*/false),
4933  0);
4934  return AutoDeductTy;
4935 }
4936 
4937 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
4939  if (AutoRRefDeductTy.isNull())
4941  assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
4942  return AutoRRefDeductTy;
4943 }
4944 
4945 /// getTagDeclType - Return the unique reference to the type for the
4946 /// specified TagDecl (struct/union/class/enum) decl.
4948  assert(Decl);
4949  // FIXME: What is the design on getTagDeclType when it requires casting
4950  // away const? mutable?
4951  return getTypeDeclType(const_cast<TagDecl*>(Decl));
4952 }
4953 
4954 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
4955 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
4956 /// needs to agree with the definition in <stddef.h>.
4958  return getFromTargetType(Target->getSizeType());
4959 }
4960 
4961 /// Return the unique signed counterpart of the integer type
4962 /// corresponding to size_t.
4964  return getFromTargetType(Target->getSignedSizeType());
4965 }
4966 
4967 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
4969  return getFromTargetType(Target->getIntMaxType());
4970 }
4971 
4972 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
4974  return getFromTargetType(Target->getUIntMaxType());
4975 }
4976 
4977 /// getSignedWCharType - Return the type of "signed wchar_t".
4978 /// Used when in C++, as a GCC extension.
4980  // FIXME: derive from "Target" ?
4981  return WCharTy;
4982 }
4983 
4984 /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
4985 /// Used when in C++, as a GCC extension.
4987  // FIXME: derive from "Target" ?
4988  return UnsignedIntTy;
4989 }
4990 
4992  return getFromTargetType(Target->getIntPtrType());
4993 }
4994 
4997 }
4998 
4999 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
5000 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
5002  return getFromTargetType(Target->getPtrDiffType(0));
5003 }
5004 
5005 /// Return the unique unsigned counterpart of "ptrdiff_t"
5006 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
5007 /// in the definition of %tu format specifier.
5009  return getFromTargetType(Target->getUnsignedPtrDiffType(0));
5010 }
5011 
5012 /// Return the unique type for "pid_t" defined in
5013 /// <sys/types.h>. We need this to compute the correct type for vfork().
5015  return getFromTargetType(Target->getProcessIDType());
5016 }
5017 
5018 //===----------------------------------------------------------------------===//
5019 // Type Operators
5020 //===----------------------------------------------------------------------===//
5021 
5023  // Push qualifiers into arrays, and then discard any remaining
5024  // qualifiers.
5025  T = getCanonicalType(T);
5027  const Type *Ty = T.getTypePtr();
5028  QualType Result;
5029  if (isa<ArrayType>(Ty)) {
5030  Result = getArrayDecayedType(QualType(Ty,0));
5031  } else if (isa<FunctionType>(Ty)) {
5032  Result = getPointerType(QualType(Ty, 0));
5033  } else {
5034  Result = QualType(Ty, 0);
5035  }
5036 
5037  return CanQualType::CreateUnsafe(Result);
5038 }
5039 
5041  Qualifiers &quals) {
5042  SplitQualType splitType = type.getSplitUnqualifiedType();
5043 
5044  // FIXME: getSplitUnqualifiedType() actually walks all the way to
5045  // the unqualified desugared type and then drops it on the floor.
5046  // We then have to strip that sugar back off with
5047  // getUnqualifiedDesugaredType(), which is silly.
5048  const auto *AT =
5049  dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
5050 
5051  // If we don't have an array, just use the results in splitType.
5052  if (!AT) {
5053  quals = splitType.Quals;
5054  return QualType(splitType.Ty, 0);
5055  }
5056 
5057  // Otherwise, recurse on the array's element type.
5058  QualType elementType = AT->getElementType();
5059  QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
5060 
5061  // If that didn't change the element type, AT has no qualifiers, so we
5062  // can just use the results in splitType.
5063  if (elementType == unqualElementType) {
5064  assert(quals.empty()); // from the recursive call
5065  quals = splitType.Quals;
5066  return QualType(splitType.Ty, 0);
5067  }
5068 
5069  // Otherwise, add in the qualifiers from the outermost type, then
5070  // build the type back up.
5071  quals.addConsistentQualifiers(splitType.Quals);
5072 
5073  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
5074  return getConstantArrayType(unqualElementType, CAT->getSize(),
5075  CAT->getSizeModifier(), 0);
5076  }
5077 
5078  if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
5079  return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
5080  }
5081 
5082  if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
5083  return getVariableArrayType(unqualElementType,
5084  VAT->getSizeExpr(),
5085  VAT->getSizeModifier(),
5086  VAT->getIndexTypeCVRQualifiers(),
5087  VAT->getBracketsRange());
5088  }
5089 
5090  const auto *DSAT = cast<DependentSizedArrayType>(AT);
5091  return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
5092  DSAT->getSizeModifier(), 0,
5093  SourceRange());
5094 }
5095 
5096 /// Attempt to unwrap two types that may both be array types with the same bound
5097 /// (or both be array types of unknown bound) for the purpose of comparing the
5098 /// cv-decomposition of two types per C++ [conv.qual].
5100  bool UnwrappedAny = false;
5101  while (true) {
5102  auto *AT1 = getAsArrayType(T1);
5103  if (!AT1) return UnwrappedAny;
5104 
5105  auto *AT2 = getAsArrayType(T2);
5106  if (!AT2) return UnwrappedAny;
5107 
5108  // If we don't have two array types with the same constant bound nor two
5109  // incomplete array types, we've unwrapped everything we can.
5110  if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
5111  auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
5112  if (!CAT2 || CAT1->getSize() != CAT2->getSize())
5113  return UnwrappedAny;
5114  } else if (!isa<IncompleteArrayType>(AT1) ||
5115  !isa<IncompleteArrayType>(AT2)) {
5116  return UnwrappedAny;
5117  }
5118 
5119  T1 = AT1->getElementType();
5120  T2 = AT2->getElementType();
5121  UnwrappedAny = true;
5122  }
5123 }
5124 
5125 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
5126 ///
5127 /// If T1 and T2 are both pointer types of the same kind, or both array types
5128 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is
5129 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
5130 ///
5131 /// This function will typically be called in a loop that successively
5132 /// "unwraps" pointer and pointer-to-member types to compare them at each
5133 /// level.
5134 ///
5135 /// \return \c true if a pointer type was unwrapped, \c false if we reached a
5136 /// pair of types that can't be unwrapped further.
5138  UnwrapSimilarArrayTypes(T1, T2);
5139 
5140  const auto *T1PtrType = T1->getAs<PointerType>();
5141  const auto *T2PtrType = T2->getAs<PointerType>();
5142  if (T1PtrType && T2PtrType) {
5143  T1 = T1PtrType->getPointeeType();
5144  T2 = T2PtrType->getPointeeType();
5145  return true;
5146  }
5147 
5148  const auto *T1MPType = T1->getAs<MemberPointerType>();
5149  const auto *T2MPType = T2->getAs<MemberPointerType>();
5150  if (T1MPType && T2MPType &&
5151  hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
5152  QualType(T2MPType->getClass(), 0))) {
5153  T1 = T1MPType->getPointeeType();
5154  T2 = T2MPType->getPointeeType();
5155  return true;
5156  }
5157 
5158  if (getLangOpts().ObjC) {
5159  const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
5160  const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
5161  if (T1OPType && T2OPType) {
5162  T1 = T1OPType->getPointeeType();
5163  T2 = T2OPType->getPointeeType();
5164  return true;
5165  }
5166  }
5167 
5168  // FIXME: Block pointers, too?
5169 
5170  return false;
5171 }
5172 
5174  while (true) {
5175  Qualifiers Quals;
5176  T1 = getUnqualifiedArrayType(T1, Quals);
5177  T2 = getUnqualifiedArrayType(T2, Quals);
5178  if (hasSameType(T1, T2))
5179  return true;
5180  if (!UnwrapSimilarTypes(T1, T2))
5181  return false;
5182  }
5183 }
5184 
5186  while (true) {
5187  Qualifiers Quals1, Quals2;
5188  T1 = getUnqualifiedArrayType(T1, Quals1);
5189  T2 = getUnqualifiedArrayType(T2, Quals2);
5190 
5191  Quals1.removeCVRQualifiers();
5192  Quals2.removeCVRQualifiers();
5193  if (Quals1 != Quals2)
5194  return false;
5195 
5196  if (hasSameType(T1, T2))
5197  return true;
5198 
5199  if (!UnwrapSimilarTypes(T1, T2))
5200  return false;
5201  }
5202 }
5203 
5206  SourceLocation NameLoc) const {
5207  switch (Name.getKind()) {
5210  // DNInfo work in progress: CHECKME: what about DNLoc?
5212  NameLoc);
5213 
5216  // DNInfo work in progress: CHECKME: what about DNLoc?
5217  return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
5218  }
5219 
5222  DeclarationName DName;
5223  if (DTN->isIdentifier()) {
5225  return DeclarationNameInfo(DName, NameLoc);
5226  } else {
5228  // DNInfo work in progress: FIXME: source locations?
5229  DeclarationNameLoc DNLoc;
5232  return DeclarationNameInfo(DName, NameLoc, DNLoc);
5233  }
5234  }
5235 
5239  return DeclarationNameInfo(subst->getParameter()->getDeclName(),
5240  NameLoc);
5241  }
5242 
5247  NameLoc);
5248  }
5249  }
5250 
5251  llvm_unreachable("bad template name kind!");
5252 }
5253 
5255  switch (Name.getKind()) {
5257  case TemplateName::Template: {
5258  TemplateDecl *Template = Name.getAsTemplateDecl();
5259  if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
5260  Template = getCanonicalTemplateTemplateParmDecl(TTP);
5261 
5262  // The canonical template name is the canonical template declaration.
5263  return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
5264  }
5265 
5267  llvm_unreachable("cannot canonicalize overloaded template");
5268 
5271  assert(DTN && "Non-dependent template names must refer to template decls.");
5272  return DTN->CanonicalTemplateName;
5273  }
5274 
5278  return getCanonicalTemplateName(subst->getReplacement());
5279  }
5280 
5284  TemplateTemplateParmDecl *canonParameter
5285  = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
5286  TemplateArgument canonArgPack
5288  return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
5289  }
5290  }
5291 
5292  llvm_unreachable("bad template name!");
5293 }
5294 
5296  X = getCanonicalTemplateName(X);
5297  Y = getCanonicalTemplateName(Y);
5298  return X.getAsVoidPointer() == Y.getAsVoidPointer();
5299 }
5300 
5303  switch (Arg.getKind()) {
5305  return Arg;
5306 
5308  return Arg;
5309 
5311  auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
5312  return TemplateArgument(D, Arg.getParamTypeForDecl());
5313  }
5314 
5317  /*isNullPtr*/true);
5318 
5321 
5325  Arg.getNumTemplateExpansions());
5326 
5329 
5332 
5333  case TemplateArgument::Pack: {
5334  if (Arg.pack_size() == 0)
5335  return Arg;
5336 
5337  auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
5338  unsigned Idx = 0;
5340  AEnd = Arg.pack_end();
5341  A != AEnd; (void)++A, ++Idx)
5342  CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
5343 
5344  return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
5345  }
5346  }
5347 
5348  // Silence GCC warning
5349  llvm_unreachable("Unhandled template argument kind");
5350 }
5351 
5354  if (!NNS)
5355  return nullptr;
5356 
5357  switch (NNS->getKind()) {
5359  // Canonicalize the prefix but keep the identifier the same.
5360  return NestedNameSpecifier::Create(*this,
5362  NNS->getAsIdentifier());
5363 
5365  // A namespace is canonical; build a nested-name-specifier with
5366  // this namespace and no prefix.
5367  return NestedNameSpecifier::Create(*this, nullptr,
5369 
5371  // A namespace is canonical; build a nested-name-specifier with
5372  // this namespace and no prefix.
5373  return NestedNameSpecifier::Create(*this, nullptr,
5375  ->getOriginalNamespace());
5376 
5379  QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
5380 
5381  // If we have some kind of dependent-named type (e.g., "typename T::type"),
5382  // break it apart into its prefix and identifier, then reconsititute those
5383  // as the canonical nested-name-specifier. This is required to canonicalize
5384  // a dependent nested-name-specifier involving typedefs of dependent-name
5385  // types, e.g.,
5386  // typedef typename T::type T1;
5387  // typedef typename T1::type T2;
5388  if (const auto *DNT = T->getAs<DependentNameType>())
5389  return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
5390  const_cast<IdentifierInfo *>(DNT->getIdentifier()));
5391 
5392  // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
5393  // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
5394  // first place?
5395  return NestedNameSpecifier::Create(*this, nullptr, false,
5396  const_cast<Type *>(T.getTypePtr()));
5397  }
5398 
5401  // The global specifier and __super specifer are canonical and unique.
5402  return NNS;
5403  }
5404 
5405  llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
5406 }
5407 
5409  // Handle the non-qualified case efficiently.
5410  if (!T.hasLocalQualifiers()) {
5411  // Handle the common positive case fast.
5412  if (const auto *AT = dyn_cast<ArrayType>(T))
5413  return AT;
5414  }
5415 
5416  // Handle the common negative case fast.
5417  if (!isa<ArrayType>(T.getCanonicalType()))
5418  return nullptr;
5419 
5420  // Apply any qualifiers from the array type to the element type. This
5421  // implements C99 6.7.3p8: "If the specification of an array type includes
5422  // any type qualifiers, the element type is so qualified, not the array type."
5423 
5424  // If we get here, we either have type qualifiers on the type, or we have
5425  // sugar such as a typedef in the way. If we have type qualifiers on the type
5426  // we must propagate them down into the element type.
5427 
5429  Qualifiers qs = split.Quals;
5430 
5431  // If we have a simple case, just return now.
5432  const auto *ATy = dyn_cast<ArrayType>(split.Ty);
5433  if (!ATy || qs.empty())
5434  return ATy;
5435 
5436  // Otherwise, we have an array and we have qualifiers on it. Push the
5437  // qualifiers into the array element type and return a new array type.
5438  QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
5439 
5440  if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy))
5441  return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
5442  CAT->getSizeModifier(),
5443  CAT->getIndexTypeCVRQualifiers()));
5444  if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy))
5445  return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
5446  IAT->getSizeModifier(),
5447  IAT->getIndexTypeCVRQualifiers()));
5448 
5449  if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy))
5450  return cast<ArrayType>(
5451  getDependentSizedArrayType(NewEltTy,
5452  DSAT->getSizeExpr(),
5453  DSAT->getSizeModifier(),
5454  DSAT->getIndexTypeCVRQualifiers(),
5455  DSAT->getBracketsRange()));
5456 
5457  const auto *VAT = cast<VariableArrayType>(ATy);
5458  return cast<ArrayType>(getVariableArrayType(NewEltTy,
5459  VAT->getSizeExpr(),
5460  VAT->getSizeModifier(),
5461  VAT->getIndexTypeCVRQualifiers(),
5462  VAT->getBracketsRange()));
5463 }
5464 
5466  if (T->isArrayType() || T->isFunctionType())
5467  return getDecayedType(T);
5468  return T;
5469 }
5470 
5473  T = getAdjustedParameterType(T);
5474  return T.getUnqualifiedType();
5475 }
5476 
5478  // C++ [except.throw]p3:
5479  // A throw-expression initializes a temporary object, called the exception
5480  // object, the type of which is determined by removing any top-level
5481  // cv-qualifiers from the static type of the operand of throw and adjusting
5482  // the type from "array of T" or "function returning T" to "pointer to T"
5483  // or "pointer to function returning T", [...]
5485  if (T->isArrayType() || T->isFunctionType())
5486  T = getDecayedType(T);
5487  return T.getUnqualifiedType();
5488 }
5489 
5490 /// getArrayDecayedType - Return the properly qualified result of decaying the
5491 /// specified array type to a pointer. This operation is non-trivial when
5492 /// handling typedefs etc. The canonical type of "T" must be an array type,
5493 /// this returns a pointer to a properly qualified element of the array.
5494 ///
5495 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
5497  // Get the element type with 'getAsArrayType' so that we don't lose any
5498  // typedefs in the element type of the array. This also handles propagation
5499  // of type qualifiers from the array type into the element type if present
5500  // (C99 6.7.3p8).
5501  const ArrayType *PrettyArrayType = getAsArrayType(Ty);
5502  assert(PrettyArrayType && "Not an array type!");
5503 
5504  QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
5505 
5506  // int x[restrict 4] -> int *restrict
5508  PrettyArrayType->getIndexTypeQualifiers());
5509 
5510  // int x[_Nullable] -> int * _Nullable
5511  if (auto Nullability = Ty->getNullability(*this)) {
5512  Result = const_cast<ASTContext *>(this)->getAttributedType(
5514  }
5515  return Result;
5516 }
5517 
5519  return getBaseElementType(array->getElementType());
5520 }
5521 
5523  Qualifiers qs;
5524  while (true) {
5525  SplitQualType