clang  9.0.0svn
ASTContext.cpp
Go to the documentation of this file.
1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ASTContext interface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/AST/ASTContext.h"
14 #include "CXXABI.h"
15 #include "clang/AST/APValue.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/AttrIterator.h"
20 #include "clang/AST/CharUnits.h"
21 #include "clang/AST/Comment.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclBase.h"
24 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/AST/DeclOpenMP.h"
28 #include "clang/AST/DeclTemplate.h"
30 #include "clang/AST/Expr.h"
31 #include "clang/AST/ExprCXX.h"
33 #include "clang/AST/Mangle.h"
37 #include "clang/AST/RecordLayout.h"
39 #include "clang/AST/Stmt.h"
40 #include "clang/AST/TemplateBase.h"
41 #include "clang/AST/TemplateName.h"
42 #include "clang/AST/Type.h"
43 #include "clang/AST/TypeLoc.h"
47 #include "clang/Basic/Builtins.h"
50 #include "clang/Basic/FixedPoint.h"
52 #include "clang/Basic/LLVM.h"
54 #include "clang/Basic/Linkage.h"
59 #include "clang/Basic/Specifiers.h"
61 #include "clang/Basic/TargetInfo.h"
62 #include "clang/Basic/XRayLists.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/APSInt.h"
65 #include "llvm/ADT/ArrayRef.h"
66 #include "llvm/ADT/DenseMap.h"
67 #include "llvm/ADT/DenseSet.h"
68 #include "llvm/ADT/FoldingSet.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/PointerUnion.h"
72 #include "llvm/ADT/STLExtras.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/StringExtras.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Triple.h"
78 #include "llvm/Support/Capacity.h"
79 #include "llvm/Support/Casting.h"
80 #include "llvm/Support/Compiler.h"
81 #include "llvm/Support/ErrorHandling.h"
82 #include "llvm/Support/MathExtras.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include <algorithm>
85 #include <cassert>
86 #include <cstddef>
87 #include <cstdint>
88 #include <cstdlib>
89 #include <map>
90 #include <memory>
91 #include <string>
92 #include <tuple>
93 #include <utility>
94 
95 using namespace clang;
96 
99 };
100 
102  assert(D);
103 
104  // If we already tried to load comments but there are none,
105  // we won't find anything.
106  if (CommentsLoaded && Comments.getComments().empty())
107  return nullptr;
108 
109  // User can not attach documentation to implicit declarations.
110  if (D->isImplicit())
111  return nullptr;
112 
113  // User can not attach documentation to implicit instantiations.
114  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
115  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
116  return nullptr;
117  }
118 
119  if (const auto *VD = dyn_cast<VarDecl>(D)) {
120  if (VD->isStaticDataMember() &&
121  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
122  return nullptr;
123  }
124 
125  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
126  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
127  return nullptr;
128  }
129 
130  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
131  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
132  if (TSK == TSK_ImplicitInstantiation ||
133  TSK == TSK_Undeclared)
134  return nullptr;
135  }
136 
137  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
138  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
139  return nullptr;
140  }
141  if (const auto *TD = dyn_cast<TagDecl>(D)) {
142  // When tag declaration (but not definition!) is part of the
143  // decl-specifier-seq of some other declaration, it doesn't get comment
144  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
145  return nullptr;
146  }
147  // TODO: handle comments for function parameters properly.
148  if (isa<ParmVarDecl>(D))
149  return nullptr;
150 
151  // TODO: we could look up template parameter documentation in the template
152  // documentation.
153  if (isa<TemplateTypeParmDecl>(D) ||
154  isa<NonTypeTemplateParmDecl>(D) ||
155  isa<TemplateTemplateParmDecl>(D))
156  return nullptr;
157 
158  // Find declaration location.
159  // For Objective-C declarations we generally don't expect to have multiple
160  // declarators, thus use declaration starting location as the "declaration
161  // location".
162  // For all other declarations multiple declarators are used quite frequently,
163  // so we use the location of the identifier as the "declaration location".
164  SourceLocation DeclLoc;
165  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
166  isa<ObjCPropertyDecl>(D) ||
167  isa<RedeclarableTemplateDecl>(D) ||
168  isa<ClassTemplateSpecializationDecl>(D))
169  DeclLoc = D->getBeginLoc();
170  else {
171  DeclLoc = D->getLocation();
172  if (DeclLoc.isMacroID()) {
173  if (isa<TypedefDecl>(D)) {
174  // If location of the typedef name is in a macro, it is because being
175  // declared via a macro. Try using declaration's starting location as
176  // the "declaration location".
177  DeclLoc = D->getBeginLoc();
178  } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
179  // If location of the tag decl is inside a macro, but the spelling of
180  // the tag name comes from a macro argument, it looks like a special
181  // macro like NS_ENUM is being used to define the tag decl. In that
182  // case, adjust the source location to the expansion loc so that we can
183  // attach the comment to the tag decl.
184  if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
185  TD->isCompleteDefinition())
186  DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
187  }
188  }
189  }
190 
191  // If the declaration doesn't map directly to a location in a file, we
192  // can't find the comment.
193  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
194  return nullptr;
195 
196  if (!CommentsLoaded && ExternalSource) {
197  ExternalSource->ReadComments();
198 
199 #ifndef NDEBUG
201  assert(std::is_sorted(RawComments.begin(), RawComments.end(),
202  BeforeThanCompare<RawComment>(SourceMgr)));
203 #endif
204 
205  CommentsLoaded = true;
206  }
207 
209  // If there are no comments anywhere, we won't find anything.
210  if (RawComments.empty())
211  return nullptr;
212 
213  // Find the comment that occurs just after this declaration.
215  {
216  // When searching for comments during parsing, the comment we are looking
217  // for is usually among the last two comments we parsed -- check them
218  // first.
219  RawComment CommentAtDeclLoc(
220  SourceMgr, SourceRange(DeclLoc), LangOpts.CommentOpts, false);
221  BeforeThanCompare<RawComment> Compare(SourceMgr);
222  ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
223  bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
224  if (!Found && RawComments.size() >= 2) {
225  MaybeBeforeDecl--;
226  Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
227  }
228 
229  if (Found) {
230  Comment = MaybeBeforeDecl + 1;
231  assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
232  &CommentAtDeclLoc, Compare));
233  } else {
234  // Slow path.
235  Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
236  &CommentAtDeclLoc, Compare);
237  }
238  }
239 
240  // Decompose the location for the declaration and find the beginning of the
241  // file buffer.
242  std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc);
243 
244  // First check whether we have a trailing comment.
245  if (Comment != RawComments.end() &&
246  ((*Comment)->isDocumentation() || LangOpts.CommentOpts.ParseAllComments)
247  && (*Comment)->isTrailingComment() &&
248  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
249  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
250  std::pair<FileID, unsigned> CommentBeginDecomp
251  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin());
252  // Check that Doxygen trailing comment comes after the declaration, starts
253  // on the same line and in the same file as the declaration.
254  if (DeclLocDecomp.first == CommentBeginDecomp.first &&
255  SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
256  == SourceMgr.getLineNumber(CommentBeginDecomp.first,
257  CommentBeginDecomp.second)) {
258  return *Comment;
259  }
260  }
261 
262  // The comment just after the declaration was not a trailing comment.
263  // Let's look at the previous comment.
264  if (Comment == RawComments.begin())
265  return nullptr;
266  --Comment;
267 
268  // Check that we actually have a non-member Doxygen comment.
269  if (!((*Comment)->isDocumentation() ||
270  LangOpts.CommentOpts.ParseAllComments) ||
271  (*Comment)->isTrailingComment())
272  return nullptr;
273 
274  // Decompose the end of the comment.
275  std::pair<FileID, unsigned> CommentEndDecomp
276  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd());
277 
278  // If the comment and the declaration aren't in the same file, then they
279  // aren't related.
280  if (DeclLocDecomp.first != CommentEndDecomp.first)
281  return nullptr;
282 
283  // Get the corresponding buffer.
284  bool Invalid = false;
285  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
286  &Invalid).data();
287  if (Invalid)
288  return nullptr;
289 
290  // Extract text between the comment and declaration.
291  StringRef Text(Buffer + CommentEndDecomp.second,
292  DeclLocDecomp.second - CommentEndDecomp.second);
293 
294  // There should be no other declarations or preprocessor directives between
295  // comment and declaration.
296  if (Text.find_first_of(";{}#@") != StringRef::npos)
297  return nullptr;
298 
299  return *Comment;
300 }
301 
302 /// If we have a 'templated' declaration for a template, adjust 'D' to
303 /// refer to the actual template.
304 /// If we have an implicit instantiation, adjust 'D' to refer to template.
305 static const Decl *adjustDeclToTemplate(const Decl *D) {
306  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
307  // Is this function declaration part of a function template?
308  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
309  return FTD;
310 
311  // Nothing to do if function is not an implicit instantiation.
312  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
313  return D;
314 
315  // Function is an implicit instantiation of a function template?
316  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
317  return FTD;
318 
319  // Function is instantiated from a member definition of a class template?
320  if (const FunctionDecl *MemberDecl =
322  return MemberDecl;
323 
324  return D;
325  }
326  if (const auto *VD = dyn_cast<VarDecl>(D)) {
327  // Static data member is instantiated from a member definition of a class
328  // template?
329  if (VD->isStaticDataMember())
330  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
331  return MemberDecl;
332 
333  return D;
334  }
335  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
336  // Is this class declaration part of a class template?
337  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
338  return CTD;
339 
340  // Class is an implicit instantiation of a class template or partial
341  // specialization?
342  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
343  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
344  return D;
345  llvm::PointerUnion<ClassTemplateDecl *,
347  PU = CTSD->getSpecializedTemplateOrPartial();
348  return PU.is<ClassTemplateDecl*>() ?
349  static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) :
350  static_cast<const Decl*>(
352  }
353 
354  // Class is instantiated from a member definition of a class template?
355  if (const MemberSpecializationInfo *Info =
356  CRD->getMemberSpecializationInfo())
357  return Info->getInstantiatedFrom();
358 
359  return D;
360  }
361  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
362  // Enum is instantiated from a member definition of a class template?
363  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
364  return MemberDecl;
365 
366  return D;
367  }
368  // FIXME: Adjust alias templates?
369  return D;
370 }
371 
373  const Decl *D,
374  const Decl **OriginalDecl) const {
375  D = adjustDeclToTemplate(D);
376 
377  // Check whether we have cached a comment for this declaration already.
378  {
379  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
380  RedeclComments.find(D);
381  if (Pos != RedeclComments.end()) {
382  const RawCommentAndCacheFlags &Raw = Pos->second;
384  if (OriginalDecl)
385  *OriginalDecl = Raw.getOriginalDecl();
386  return Raw.getRaw();
387  }
388  }
389  }
390 
391  // Search for comments attached to declarations in the redeclaration chain.
392  const RawComment *RC = nullptr;
393  const Decl *OriginalDeclForRC = nullptr;
394  for (auto I : D->redecls()) {
395  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
396  RedeclComments.find(I);
397  if (Pos != RedeclComments.end()) {
398  const RawCommentAndCacheFlags &Raw = Pos->second;
400  RC = Raw.getRaw();
401  OriginalDeclForRC = Raw.getOriginalDecl();
402  break;
403  }
404  } else {
406  OriginalDeclForRC = I;
408  if (RC) {
409  // Call order swapped to work around ICE in VS2015 RTM (Release Win32)
410  // https://connect.microsoft.com/VisualStudio/feedback/details/1741530
412  Raw.setRaw(RC);
413  } else
415  Raw.setOriginalDecl(I);
416  RedeclComments[I] = Raw;
417  if (RC)
418  break;
419  }
420  }
421 
422  // If we found a comment, it should be a documentation comment.
423  assert(!RC || RC->isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
424 
425  if (OriginalDecl)
426  *OriginalDecl = OriginalDeclForRC;
427 
428  // Update cache for every declaration in the redeclaration chain.
430  Raw.setRaw(RC);
432  Raw.setOriginalDecl(OriginalDeclForRC);
433 
434  for (auto I : D->redecls()) {
437  R = Raw;
438  }
439 
440  return RC;
441 }
442 
443 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
445  const DeclContext *DC = ObjCMethod->getDeclContext();
446  if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
447  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
448  if (!ID)
449  return;
450  // Add redeclared method here.
451  for (const auto *Ext : ID->known_extensions()) {
452  if (ObjCMethodDecl *RedeclaredMethod =
453  Ext->getMethod(ObjCMethod->getSelector(),
454  ObjCMethod->isInstanceMethod()))
455  Redeclared.push_back(RedeclaredMethod);
456  }
457  }
458 }
459 
461  const Decl *D) const {
462  auto *ThisDeclInfo = new (*this) comments::DeclInfo;
463  ThisDeclInfo->CommentDecl = D;
464  ThisDeclInfo->IsFilled = false;
465  ThisDeclInfo->fill();
466  ThisDeclInfo->CommentDecl = FC->getDecl();
467  if (!ThisDeclInfo->TemplateParameters)
468  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
469  comments::FullComment *CFC =
470  new (*this) comments::FullComment(FC->getBlocks(),
471  ThisDeclInfo);
472  return CFC;
473 }
474 
477  return RC ? RC->parse(*this, nullptr, D) : nullptr;
478 }
479 
481  const Decl *D,
482  const Preprocessor *PP) const {
483  if (D->isInvalidDecl())
484  return nullptr;
485  D = adjustDeclToTemplate(D);
486 
487  const Decl *Canonical = D->getCanonicalDecl();
488  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
489  ParsedComments.find(Canonical);
490 
491  if (Pos != ParsedComments.end()) {
492  if (Canonical != D) {
493  comments::FullComment *FC = Pos->second;
495  return CFC;
496  }
497  return Pos->second;
498  }
499 
500  const Decl *OriginalDecl;
501 
502  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
503  if (!RC) {
504  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
506  const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
507  if (OMD && OMD->isPropertyAccessor())
508  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
509  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
510  return cloneFullComment(FC, D);
511  if (OMD)
512  addRedeclaredMethods(OMD, Overridden);
513  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
514  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
515  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
516  return cloneFullComment(FC, D);
517  }
518  else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
519  // Attach any tag type's documentation to its typedef if latter
520  // does not have one of its own.
521  QualType QT = TD->getUnderlyingType();
522  if (const auto *TT = QT->getAs<TagType>())
523  if (const Decl *TD = TT->getDecl())
524  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
525  return cloneFullComment(FC, D);
526  }
527  else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
528  while (IC->getSuperClass()) {
529  IC = IC->getSuperClass();
530  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
531  return cloneFullComment(FC, D);
532  }
533  }
534  else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
535  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
536  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
537  return cloneFullComment(FC, D);
538  }
539  else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
540  if (!(RD = RD->getDefinition()))
541  return nullptr;
542  // Check non-virtual bases.
543  for (const auto &I : RD->bases()) {
544  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
545  continue;
546  QualType Ty = I.getType();
547  if (Ty.isNull())
548  continue;
549  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
550  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
551  continue;
552 
553  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
554  return cloneFullComment(FC, D);
555  }
556  }
557  // Check virtual bases.
558  for (const auto &I : RD->vbases()) {
559  if (I.getAccessSpecifier() != AS_public)
560  continue;
561  QualType Ty = I.getType();
562  if (Ty.isNull())
563  continue;
564  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
565  if (!(VirtualBase= VirtualBase->getDefinition()))
566  continue;
567  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
568  return cloneFullComment(FC, D);
569  }
570  }
571  }
572  return nullptr;
573  }
574 
575  // If the RawComment was attached to other redeclaration of this Decl, we
576  // should parse the comment in context of that other Decl. This is important
577  // because comments can contain references to parameter names which can be
578  // different across redeclarations.
579  if (D != OriginalDecl)
580  return getCommentForDecl(OriginalDecl, PP);
581 
582  comments::FullComment *FC = RC->parse(*this, PP, D);
583  ParsedComments[Canonical] = FC;
584  return FC;
585 }
586 
587 void
588 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
589  TemplateTemplateParmDecl *Parm) {
590  ID.AddInteger(Parm->getDepth());
591  ID.AddInteger(Parm->getPosition());
592  ID.AddBoolean(Parm->isParameterPack());
593 
595  ID.AddInteger(Params->size());
597  PEnd = Params->end();
598  P != PEnd; ++P) {
599  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
600  ID.AddInteger(0);
601  ID.AddBoolean(TTP->isParameterPack());
602  continue;
603  }
604 
605  if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
606  ID.AddInteger(1);
607  ID.AddBoolean(NTTP->isParameterPack());
608  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
609  if (NTTP->isExpandedParameterPack()) {
610  ID.AddBoolean(true);
611  ID.AddInteger(NTTP->getNumExpansionTypes());
612  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
613  QualType T = NTTP->getExpansionType(I);
614  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
615  }
616  } else
617  ID.AddBoolean(false);
618  continue;
619  }
620 
621  auto *TTP = cast<TemplateTemplateParmDecl>(*P);
622  ID.AddInteger(2);
623  Profile(ID, TTP);
624  }
625 }
626 
628 ASTContext::getCanonicalTemplateTemplateParmDecl(
629  TemplateTemplateParmDecl *TTP) const {
630  // Check if we already have a canonical template template parameter.
631  llvm::FoldingSetNodeID ID;
632  CanonicalTemplateTemplateParm::Profile(ID, TTP);
633  void *InsertPos = nullptr;
634  CanonicalTemplateTemplateParm *Canonical
635  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
636  if (Canonical)
637  return Canonical->getParam();
638 
639  // Build a canonical template parameter list.
641  SmallVector<NamedDecl *, 4> CanonParams;
642  CanonParams.reserve(Params->size());
644  PEnd = Params->end();
645  P != PEnd; ++P) {
646  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
647  CanonParams.push_back(
649  SourceLocation(),
650  SourceLocation(),
651  TTP->getDepth(),
652  TTP->getIndex(), nullptr, false,
653  TTP->isParameterPack()));
654  else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
655  QualType T = getCanonicalType(NTTP->getType());
658  if (NTTP->isExpandedParameterPack()) {
659  SmallVector<QualType, 2> ExpandedTypes;
660  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
661  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
662  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
663  ExpandedTInfos.push_back(
664  getTrivialTypeSourceInfo(ExpandedTypes.back()));
665  }
666 
668  SourceLocation(),
669  SourceLocation(),
670  NTTP->getDepth(),
671  NTTP->getPosition(), nullptr,
672  T,
673  TInfo,
674  ExpandedTypes,
675  ExpandedTInfos);
676  } else {
678  SourceLocation(),
679  SourceLocation(),
680  NTTP->getDepth(),
681  NTTP->getPosition(), nullptr,
682  T,
683  NTTP->isParameterPack(),
684  TInfo);
685  }
686  CanonParams.push_back(Param);
687 
688  } else
689  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
690  cast<TemplateTemplateParmDecl>(*P)));
691  }
692 
693  assert(!TTP->getRequiresClause() &&
694  "Unexpected requires-clause on template template-parameter");
695  Expr *const CanonRequiresClause = nullptr;
696 
697  TemplateTemplateParmDecl *CanonTTP
699  SourceLocation(), TTP->getDepth(),
700  TTP->getPosition(),
701  TTP->isParameterPack(),
702  nullptr,
704  SourceLocation(),
705  CanonParams,
706  SourceLocation(),
707  CanonRequiresClause));
708 
709  // Get the new insert position for the node we care about.
710  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
711  assert(!Canonical && "Shouldn't be in the map!");
712  (void)Canonical;
713 
714  // Create the canonical template template parameter entry.
715  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
716  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
717  return CanonTTP;
718 }
719 
720 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
721  if (!LangOpts.CPlusPlus) return nullptr;
722 
723  switch (T.getCXXABI().getKind()) {
724  case TargetCXXABI::GenericARM: // Same as Itanium at this level
725  case TargetCXXABI::iOS:
726  case TargetCXXABI::iOS64:
732  return CreateItaniumCXXABI(*this);
734  return CreateMicrosoftCXXABI(*this);
735  }
736  llvm_unreachable("Invalid CXXABI type!");
737 }
738 
739 static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
740  const LangOptions &LOpts) {
741  if (LOpts.FakeAddressSpaceMap) {
742  // The fake address space map must have a distinct entry for each
743  // language-specific address space.
744  static const unsigned FakeAddrSpaceMap[] = {
745  0, // Default
746  1, // opencl_global
747  3, // opencl_local
748  2, // opencl_constant
749  0, // opencl_private
750  4, // opencl_generic
751  5, // cuda_device
752  6, // cuda_constant
753  7 // cuda_shared
754  };
755  return &FakeAddrSpaceMap;
756  } else {
757  return &T.getAddressSpaceMap();
758  }
759 }
760 
762  const LangOptions &LangOpts) {
763  switch (LangOpts.getAddressSpaceMapMangling()) {
765  return TI.useAddressSpaceMapMangling();
767  return true;
769  return false;
770  }
771  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
772 }
773 
775  IdentifierTable &idents, SelectorTable &sels,
776  Builtin::Context &builtins)
777  : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
778  DependentTemplateSpecializationTypes(this_()),
779  SubstTemplateTemplateParmPacks(this_()), SourceMgr(SM), LangOpts(LOpts),
780  SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
781  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
782  LangOpts.XRayNeverInstrumentFiles,
783  LangOpts.XRayAttrListFiles, SM)),
784  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
785  BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
786  CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
787  CompCategories(this_()), LastSDM(nullptr, 0) {
788  TUDecl = TranslationUnitDecl::Create(*this);
789  TraversalScope = {TUDecl};
790 }
791 
793  // Release the DenseMaps associated with DeclContext objects.
794  // FIXME: Is this the ideal solution?
795  ReleaseDeclContextMaps();
796 
797  // Call all of the deallocation functions on all of their targets.
798  for (auto &Pair : Deallocations)
799  (Pair.first)(Pair.second);
800 
801  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
802  // because they can contain DenseMaps.
803  for (llvm::DenseMap<const ObjCContainerDecl*,
804  const ASTRecordLayout*>::iterator
805  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
806  // Increment in loop to prevent using deallocated memory.
807  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
808  R->Destroy(*this);
809 
810  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
811  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
812  // Increment in loop to prevent using deallocated memory.
813  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
814  R->Destroy(*this);
815  }
816 
817  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
818  AEnd = DeclAttrs.end();
819  A != AEnd; ++A)
820  A->second->~AttrVec();
821 
822  for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
823  MaterializedTemporaryValues)
824  MTVPair.second->~APValue();
825 
826  for (const auto &Value : ModuleInitializers)
827  Value.second->~PerModuleInitializers();
828 }
829 
831  /// Contains parents of a node.
833 
834  /// Maps from a node to its parents. This is used for nodes that have
835  /// pointer identity only, which are more common and we can save space by
836  /// only storing a unique pointer to them.
837  using ParentMapPointers = llvm::DenseMap<
838  const void *,
839  llvm::PointerUnion4<const Decl *, const Stmt *,
841 
842  /// Parent map for nodes without pointer identity. We store a full
843  /// DynTypedNode for all keys.
844  using ParentMapOtherNodes = llvm::DenseMap<
846  llvm::PointerUnion4<const Decl *, const Stmt *,
847  ast_type_traits::DynTypedNode *, ParentVector *>>;
848 
849  ParentMapPointers PointerParents;
850  ParentMapOtherNodes OtherParents;
851  class ASTVisitor;
852 
853  static ast_type_traits::DynTypedNode
854  getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
855  if (const auto *D = U.dyn_cast<const Decl *>())
857  if (const auto *S = U.dyn_cast<const Stmt *>())
859  return *U.get<ast_type_traits::DynTypedNode *>();
860  }
861 
862  template <typename NodeTy, typename MapTy>
863  static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
864  const MapTy &Map) {
865  auto I = Map.find(Node);
866  if (I == Map.end()) {
868  }
869  if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
870  return llvm::makeArrayRef(*V);
871  }
872  return getSingleDynTypedNodeFromParentMap(I->second);
873  }
874 
875 public:
876  ParentMap(ASTContext &Ctx);
878  for (const auto &Entry : PointerParents) {
879  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
880  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
881  } else if (Entry.second.is<ParentVector *>()) {
882  delete Entry.second.get<ParentVector *>();
883  }
884  }
885  for (const auto &Entry : OtherParents) {
886  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
887  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
888  } else if (Entry.second.is<ParentVector *>()) {
889  delete Entry.second.get<ParentVector *>();
890  }
891  }
892  }
893 
894  DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
895  if (Node.getNodeKind().hasPointerIdentity())
896  return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
897  return getDynNodeFromMap(Node, OtherParents);
898  }
899 };
900 
901 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
902  TraversalScope = TopLevelDecls;
903  Parents.reset();
904 }
905 
906 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
907  Deallocations.push_back({Callback, Data});
908 }
909 
910 void
912  ExternalSource = std::move(Source);
913 }
914 
916  llvm::errs() << "\n*** AST Context Stats:\n";
917  llvm::errs() << " " << Types.size() << " types total.\n";
918 
919  unsigned counts[] = {
920 #define TYPE(Name, Parent) 0,
921 #define ABSTRACT_TYPE(Name, Parent)
922 #include "clang/AST/TypeNodes.def"
923  0 // Extra
924  };
925 
926  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
927  Type *T = Types[i];
928  counts[(unsigned)T->getTypeClass()]++;
929  }
930 
931  unsigned Idx = 0;
932  unsigned TotalBytes = 0;
933 #define TYPE(Name, Parent) \
934  if (counts[Idx]) \
935  llvm::errs() << " " << counts[Idx] << " " << #Name \
936  << " types, " << sizeof(Name##Type) << " each " \
937  << "(" << counts[Idx] * sizeof(Name##Type) \
938  << " bytes)\n"; \
939  TotalBytes += counts[Idx] * sizeof(Name##Type); \
940  ++Idx;
941 #define ABSTRACT_TYPE(Name, Parent)
942 #include "clang/AST/TypeNodes.def"
943 
944  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
945 
946  // Implicit special member functions.
947  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
949  << " implicit default constructors created\n";
950  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
952  << " implicit copy constructors created\n";
953  if (getLangOpts().CPlusPlus)
954  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
956  << " implicit move constructors created\n";
957  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
959  << " implicit copy assignment operators created\n";
960  if (getLangOpts().CPlusPlus)
961  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
963  << " implicit move assignment operators created\n";
964  llvm::errs() << NumImplicitDestructorsDeclared << "/"
966  << " implicit destructors created\n";
967 
968  if (ExternalSource) {
969  llvm::errs() << "\n";
970  ExternalSource->PrintStats();
971  }
972 
973  BumpAlloc.PrintStats();
974 }
975 
977  bool NotifyListeners) {
978  if (NotifyListeners)
979  if (auto *Listener = getASTMutationListener())
981 
982  MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
983 }
984 
986  auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
987  if (It == MergedDefModules.end())
988  return;
989 
990  auto &Merged = It->second;
992  for (Module *&M : Merged)
993  if (!Found.insert(M).second)
994  M = nullptr;
995  Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
996 }
997 
998 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
999  if (LazyInitializers.empty())
1000  return;
1001 
1002  auto *Source = Ctx.getExternalSource();
1003  assert(Source && "lazy initializers but no external source");
1004 
1005  auto LazyInits = std::move(LazyInitializers);
1006  LazyInitializers.clear();
1007 
1008  for (auto ID : LazyInits)
1009  Initializers.push_back(Source->GetExternalDecl(ID));
1010 
1011  assert(LazyInitializers.empty() &&
1012  "GetExternalDecl for lazy module initializer added more inits");
1013 }
1014 
1016  // One special case: if we add a module initializer that imports another
1017  // module, and that module's only initializer is an ImportDecl, simplify.
1018  if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1019  auto It = ModuleInitializers.find(ID->getImportedModule());
1020 
1021  // Maybe the ImportDecl does nothing at all. (Common case.)
1022  if (It == ModuleInitializers.end())
1023  return;
1024 
1025  // Maybe the ImportDecl only imports another ImportDecl.
1026  auto &Imported = *It->second;
1027  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1028  Imported.resolve(*this);
1029  auto *OnlyDecl = Imported.Initializers.front();
1030  if (isa<ImportDecl>(OnlyDecl))
1031  D = OnlyDecl;
1032  }
1033  }
1034 
1035  auto *&Inits = ModuleInitializers[M];
1036  if (!Inits)
1037  Inits = new (*this) PerModuleInitializers;
1038  Inits->Initializers.push_back(D);
1039 }
1040 
1042  auto *&Inits = ModuleInitializers[M];
1043  if (!Inits)
1044  Inits = new (*this) PerModuleInitializers;
1045  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1046  IDs.begin(), IDs.end());
1047 }
1048 
1050  auto It = ModuleInitializers.find(M);
1051  if (It == ModuleInitializers.end())
1052  return None;
1053 
1054  auto *Inits = It->second;
1055  Inits->resolve(*this);
1056  return Inits->Initializers;
1057 }
1058 
1060  if (!ExternCContext)
1061  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1062 
1063  return ExternCContext;
1064 }
1065 
1068  const IdentifierInfo *II) const {
1069  auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
1070  BuiltinTemplate->setImplicit();
1071  TUDecl->addDecl(BuiltinTemplate);
1072 
1073  return BuiltinTemplate;
1074 }
1075 
1078  if (!MakeIntegerSeqDecl)
1079  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1081  return MakeIntegerSeqDecl;
1082 }
1083 
1086  if (!TypePackElementDecl)
1087  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1089  return TypePackElementDecl;
1090 }
1091 
1093  RecordDecl::TagKind TK) const {
1094  SourceLocation Loc;
1095  RecordDecl *NewDecl;
1096  if (getLangOpts().CPlusPlus)
1097  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1098  Loc, &Idents.get(Name));
1099  else
1100  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1101  &Idents.get(Name));
1102  NewDecl->setImplicit();
1103  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1104  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1105  return NewDecl;
1106 }
1107 
1109  StringRef Name) const {
1111  TypedefDecl *NewDecl = TypedefDecl::Create(
1112  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1113  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1114  NewDecl->setImplicit();
1115  return NewDecl;
1116 }
1117 
1119  if (!Int128Decl)
1120  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1121  return Int128Decl;
1122 }
1123 
1125  if (!UInt128Decl)
1126  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1127  return UInt128Decl;
1128 }
1129 
1130 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1131  auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1132  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1133  Types.push_back(Ty);
1134 }
1135 
1137  const TargetInfo *AuxTarget) {
1138  assert((!this->Target || this->Target == &Target) &&
1139  "Incorrect target reinitialization");
1140  assert(VoidTy.isNull() && "Context reinitialized?");
1141 
1142  this->Target = &Target;
1143  this->AuxTarget = AuxTarget;
1144 
1145  ABI.reset(createCXXABI(Target));
1146  AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1147  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1148 
1149  // C99 6.2.5p19.
1150  InitBuiltinType(VoidTy, BuiltinType::Void);
1151 
1152  // C99 6.2.5p2.
1153  InitBuiltinType(BoolTy, BuiltinType::Bool);
1154  // C99 6.2.5p3.
1155  if (LangOpts.CharIsSigned)
1156  InitBuiltinType(CharTy, BuiltinType::Char_S);
1157  else
1158  InitBuiltinType(CharTy, BuiltinType::Char_U);
1159  // C99 6.2.5p4.
1160  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1161  InitBuiltinType(ShortTy, BuiltinType::Short);
1162  InitBuiltinType(IntTy, BuiltinType::Int);
1163  InitBuiltinType(LongTy, BuiltinType::Long);
1164  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1165 
1166  // C99 6.2.5p6.
1167  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1168  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1169  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1170  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1171  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1172 
1173  // C99 6.2.5p10.
1174  InitBuiltinType(FloatTy, BuiltinType::Float);
1175  InitBuiltinType(DoubleTy, BuiltinType::Double);
1176  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1177 
1178  // GNU extension, __float128 for IEEE quadruple precision
1179  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1180 
1181  // C11 extension ISO/IEC TS 18661-3
1182  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1183 
1184  // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1185  InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1186  InitBuiltinType(AccumTy, BuiltinType::Accum);
1187  InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1188  InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1189  InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1190  InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1191  InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1192  InitBuiltinType(FractTy, BuiltinType::Fract);
1193  InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1194  InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1195  InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1196  InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1197  InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1198  InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1199  InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1200  InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1201  InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1202  InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1203  InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1204  InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1205  InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1206  InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1207  InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1208  InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1209 
1210  // GNU extension, 128-bit integers.
1211  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1212  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1213 
1214  // C++ 3.9.1p5
1215  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1216  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1217  else // -fshort-wchar makes wchar_t be unsigned.
1218  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1219  if (LangOpts.CPlusPlus && LangOpts.WChar)
1220  WideCharTy = WCharTy;
1221  else {
1222  // C99 (or C++ using -fno-wchar).
1223  WideCharTy = getFromTargetType(Target.getWCharType());
1224  }
1225 
1226  WIntTy = getFromTargetType(Target.getWIntType());
1227 
1228  // C++20 (proposed)
1229  InitBuiltinType(Char8Ty, BuiltinType::Char8);
1230 
1231  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1232  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1233  else // C99
1234  Char16Ty = getFromTargetType(Target.getChar16Type());
1235 
1236  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1237  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1238  else // C99
1239  Char32Ty = getFromTargetType(Target.getChar32Type());
1240 
1241  // Placeholder type for type-dependent expressions whose type is
1242  // completely unknown. No code should ever check a type against
1243  // DependentTy and users should never see it; however, it is here to
1244  // help diagnose failures to properly check for type-dependent
1245  // expressions.
1246  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1247 
1248  // Placeholder type for functions.
1249  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1250 
1251  // Placeholder type for bound members.
1252  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1253 
1254  // Placeholder type for pseudo-objects.
1255  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1256 
1257  // "any" type; useful for debugger-like clients.
1258  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1259 
1260  // Placeholder type for unbridged ARC casts.
1261  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1262 
1263  // Placeholder type for builtin functions.
1264  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1265 
1266  // Placeholder type for OMP array sections.
1267  if (LangOpts.OpenMP)
1268  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1269 
1270  // C99 6.2.5p11.
1275 
1276  // Builtin types for 'id', 'Class', and 'SEL'.
1277  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1278  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1279  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1280 
1281  if (LangOpts.OpenCL) {
1282 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1283  InitBuiltinType(SingletonId, BuiltinType::Id);
1284 #include "clang/Basic/OpenCLImageTypes.def"
1285 
1286  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1287  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1288  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1289  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1290  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1291 
1292 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1293  InitBuiltinType(Id##Ty, BuiltinType::Id);
1294 #include "clang/Basic/OpenCLExtensionTypes.def"
1295  }
1296 
1297  // Builtin type for __objc_yes and __objc_no
1298  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1299  SignedCharTy : BoolTy);
1300 
1301  ObjCConstantStringType = QualType();
1302 
1303  ObjCSuperType = QualType();
1304 
1305  // void * type
1306  if (LangOpts.OpenCLVersion >= 200) {
1307  auto Q = VoidTy.getQualifiers();
1311  } else {
1313  }
1314 
1315  // nullptr type (C++0x 2.14.7)
1316  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1317 
1318  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1319  InitBuiltinType(HalfTy, BuiltinType::Half);
1320 
1321  // Builtin type used to help define __builtin_va_list.
1322  VaListTagDecl = nullptr;
1323 }
1324 
1326  return SourceMgr.getDiagnostics();
1327 }
1328 
1330  AttrVec *&Result = DeclAttrs[D];
1331  if (!Result) {
1332  void *Mem = Allocate(sizeof(AttrVec));
1333  Result = new (Mem) AttrVec;
1334  }
1335 
1336  return *Result;
1337 }
1338 
1339 /// Erase the attributes corresponding to the given declaration.
1341  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1342  if (Pos != DeclAttrs.end()) {
1343  Pos->second->~AttrVec();
1344  DeclAttrs.erase(Pos);
1345  }
1346 }
1347 
1348 // FIXME: Remove ?
1351  assert(Var->isStaticDataMember() && "Not a static data member");
1353  .dyn_cast<MemberSpecializationInfo *>();
1354 }
1355 
1358  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1359  TemplateOrInstantiation.find(Var);
1360  if (Pos == TemplateOrInstantiation.end())
1361  return {};
1362 
1363  return Pos->second;
1364 }
1365 
1366 void
1369  SourceLocation PointOfInstantiation) {
1370  assert(Inst->isStaticDataMember() && "Not a static data member");
1371  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1373  Tmpl, TSK, PointOfInstantiation));
1374 }
1375 
1376 void
1379  assert(!TemplateOrInstantiation[Inst] &&
1380  "Already noted what the variable was instantiated from");
1381  TemplateOrInstantiation[Inst] = TSI;
1382 }
1383 
1385  const FunctionDecl *FD){
1386  assert(FD && "Specialization is 0");
1387  llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
1388  = ClassScopeSpecializationPattern.find(FD);
1389  if (Pos == ClassScopeSpecializationPattern.end())
1390  return nullptr;
1391 
1392  return Pos->second;
1393 }
1394 
1396  FunctionDecl *Pattern) {
1397  assert(FD && "Specialization is 0");
1398  assert(Pattern && "Class scope specialization pattern is 0");
1399  ClassScopeSpecializationPattern[FD] = Pattern;
1400 }
1401 
1402 NamedDecl *
1404  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1405  if (Pos == InstantiatedFromUsingDecl.end())
1406  return nullptr;
1407 
1408  return Pos->second;
1409 }
1410 
1411 void
1413  assert((isa<UsingDecl>(Pattern) ||
1414  isa<UnresolvedUsingValueDecl>(Pattern) ||
1415  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1416  "pattern decl is not a using decl");
1417  assert((isa<UsingDecl>(Inst) ||
1418  isa<UnresolvedUsingValueDecl>(Inst) ||
1419  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1420  "instantiation did not produce a using decl");
1421  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1422  InstantiatedFromUsingDecl[Inst] = Pattern;
1423 }
1424 
1427  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1428  = InstantiatedFromUsingShadowDecl.find(Inst);
1429  if (Pos == InstantiatedFromUsingShadowDecl.end())
1430  return nullptr;
1431 
1432  return Pos->second;
1433 }
1434 
1435 void
1437  UsingShadowDecl *Pattern) {
1438  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1439  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1440 }
1441 
1443  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1444  = InstantiatedFromUnnamedFieldDecl.find(Field);
1445  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1446  return nullptr;
1447 
1448  return Pos->second;
1449 }
1450 
1452  FieldDecl *Tmpl) {
1453  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1454  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1455  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1456  "Already noted what unnamed field was instantiated from");
1457 
1458  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1459 }
1460 
1463  return overridden_methods(Method).begin();
1464 }
1465 
1468  return overridden_methods(Method).end();
1469 }
1470 
1471 unsigned
1473  auto Range = overridden_methods(Method);
1474  return Range.end() - Range.begin();
1475 }
1476 
1479  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1480  OverriddenMethods.find(Method->getCanonicalDecl());
1481  if (Pos == OverriddenMethods.end())
1482  return overridden_method_range(nullptr, nullptr);
1483  return overridden_method_range(Pos->second.begin(), Pos->second.end());
1484 }
1485 
1487  const CXXMethodDecl *Overridden) {
1488  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1489  OverriddenMethods[Method].push_back(Overridden);
1490 }
1491 
1493  const NamedDecl *D,
1494  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1495  assert(D);
1496 
1497  if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1498  Overridden.append(overridden_methods_begin(CXXMethod),
1499  overridden_methods_end(CXXMethod));
1500  return;
1501  }
1502 
1503  const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1504  if (!Method)
1505  return;
1506 
1508  Method->getOverriddenMethods(OverDecls);
1509  Overridden.append(OverDecls.begin(), OverDecls.end());
1510 }
1511 
1513  assert(!Import->NextLocalImport && "Import declaration already in the chain");
1514  assert(!Import->isFromASTFile() && "Non-local import declaration");
1515  if (!FirstLocalImport) {
1516  FirstLocalImport = Import;
1517  LastLocalImport = Import;
1518  return;
1519  }
1520 
1521  LastLocalImport->NextLocalImport = Import;
1522  LastLocalImport = Import;
1523 }
1524 
1525 //===----------------------------------------------------------------------===//
1526 // Type Sizing and Analysis
1527 //===----------------------------------------------------------------------===//
1528 
1529 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1530 /// scalar floating point type.
1531 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1532  const auto *BT = T->getAs<BuiltinType>();
1533  assert(BT && "Not a floating point type!");
1534  switch (BT->getKind()) {
1535  default: llvm_unreachable("Not a floating point type!");
1536  case BuiltinType::Float16:
1537  case BuiltinType::Half:
1538  return Target->getHalfFormat();
1539  case BuiltinType::Float: return Target->getFloatFormat();
1540  case BuiltinType::Double: return Target->getDoubleFormat();
1541  case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
1542  case BuiltinType::Float128: return Target->getFloat128Format();
1543  }
1544 }
1545 
1546 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1547  unsigned Align = Target->getCharWidth();
1548 
1549  bool UseAlignAttrOnly = false;
1550  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1551  Align = AlignFromAttr;
1552 
1553  // __attribute__((aligned)) can increase or decrease alignment
1554  // *except* on a struct or struct member, where it only increases
1555  // alignment unless 'packed' is also specified.
1556  //
1557  // It is an error for alignas to decrease alignment, so we can
1558  // ignore that possibility; Sema should diagnose it.
1559  if (isa<FieldDecl>(D)) {
1560  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1561  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1562  } else {
1563  UseAlignAttrOnly = true;
1564  }
1565  }
1566  else if (isa<FieldDecl>(D))
1567  UseAlignAttrOnly =
1568  D->hasAttr<PackedAttr>() ||
1569  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1570 
1571  // If we're using the align attribute only, just ignore everything
1572  // else about the declaration and its type.
1573  if (UseAlignAttrOnly) {
1574  // do nothing
1575  } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1576  QualType T = VD->getType();
1577  if (const auto *RT = T->getAs<ReferenceType>()) {
1578  if (ForAlignof)
1579  T = RT->getPointeeType();
1580  else
1581  T = getPointerType(RT->getPointeeType());
1582  }
1583  QualType BaseT = getBaseElementType(T);
1584  if (T->isFunctionType())
1585  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1586  else if (!BaseT->isIncompleteType()) {
1587  // Adjust alignments of declarations with array type by the
1588  // large-array alignment on the target.
1589  if (const ArrayType *arrayType = getAsArrayType(T)) {
1590  unsigned MinWidth = Target->getLargeArrayMinWidth();
1591  if (!ForAlignof && MinWidth) {
1592  if (isa<VariableArrayType>(arrayType))
1593  Align = std::max(Align, Target->getLargeArrayAlign());
1594  else if (isa<ConstantArrayType>(arrayType) &&
1595  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1596  Align = std::max(Align, Target->getLargeArrayAlign());
1597  }
1598  }
1599  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1600  if (BaseT.getQualifiers().hasUnaligned())
1601  Align = Target->getCharWidth();
1602  if (const auto *VD = dyn_cast<VarDecl>(D)) {
1603  if (VD->hasGlobalStorage() && !ForAlignof)
1604  Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
1605  }
1606  }
1607 
1608  // Fields can be subject to extra alignment constraints, like if
1609  // the field is packed, the struct is packed, or the struct has a
1610  // a max-field-alignment constraint (#pragma pack). So calculate
1611  // the actual alignment of the field within the struct, and then
1612  // (as we're expected to) constrain that by the alignment of the type.
1613  if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1614  const RecordDecl *Parent = Field->getParent();
1615  // We can only produce a sensible answer if the record is valid.
1616  if (!Parent->isInvalidDecl()) {
1617  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1618 
1619  // Start with the record's overall alignment.
1620  unsigned FieldAlign = toBits(Layout.getAlignment());
1621 
1622  // Use the GCD of that and the offset within the record.
1623  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1624  if (Offset > 0) {
1625  // Alignment is always a power of 2, so the GCD will be a power of 2,
1626  // which means we get to do this crazy thing instead of Euclid's.
1627  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1628  if (LowBitOfOffset < FieldAlign)
1629  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1630  }
1631 
1632  Align = std::min(Align, FieldAlign);
1633  }
1634  }
1635  }
1636 
1637  return toCharUnitsFromBits(Align);
1638 }
1639 
1640 // getTypeInfoDataSizeInChars - Return the size of a type, in
1641 // chars. If the type is a record, its data size is returned. This is
1642 // the size of the memcpy that's performed when assigning this type
1643 // using a trivial copy/move assignment operator.
1644 std::pair<CharUnits, CharUnits>
1646  std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
1647 
1648  // In C++, objects can sometimes be allocated into the tail padding
1649  // of a base-class subobject. We decide whether that's possible
1650  // during class layout, so here we can just trust the layout results.
1651  if (getLangOpts().CPlusPlus) {
1652  if (const auto *RT = T->getAs<RecordType>()) {
1653  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1654  sizeAndAlign.first = layout.getDataSize();
1655  }
1656  }
1657 
1658  return sizeAndAlign;
1659 }
1660 
1661 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1662 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1663 std::pair<CharUnits, CharUnits>
1665  const ConstantArrayType *CAT) {
1666  std::pair<CharUnits, CharUnits> EltInfo =
1667  Context.getTypeInfoInChars(CAT->getElementType());
1668  uint64_t Size = CAT->getSize().getZExtValue();
1669  assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
1670  (uint64_t)(-1)/Size) &&
1671  "Overflow in array type char size evaluation");
1672  uint64_t Width = EltInfo.first.getQuantity() * Size;
1673  unsigned Align = EltInfo.second.getQuantity();
1674  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1675  Context.getTargetInfo().getPointerWidth(0) == 64)
1676  Width = llvm::alignTo(Width, Align);
1677  return std::make_pair(CharUnits::fromQuantity(Width),
1678  CharUnits::fromQuantity(Align));
1679 }
1680 
1681 std::pair<CharUnits, CharUnits>
1683  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1684  return getConstantArrayInfoInChars(*this, CAT);
1685  TypeInfo Info = getTypeInfo(T);
1686  return std::make_pair(toCharUnitsFromBits(Info.Width),
1687  toCharUnitsFromBits(Info.Align));
1688 }
1689 
1690 std::pair<CharUnits, CharUnits>
1692  return getTypeInfoInChars(T.getTypePtr());
1693 }
1694 
1696  return getTypeInfo(T).AlignIsRequired;
1697 }
1698 
1700  return isAlignmentRequired(T.getTypePtr());
1701 }
1702 
1704  // An alignment on a typedef overrides anything else.
1705  if (const auto *TT = T->getAs<TypedefType>())
1706  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1707  return Align;
1708 
1709  // If we have an (array of) complete type, we're done.
1710  T = getBaseElementType(T);
1711  if (!T->isIncompleteType())
1712  return getTypeAlign(T);
1713 
1714  // If we had an array type, its element type might be a typedef
1715  // type with an alignment attribute.
1716  if (const auto *TT = T->getAs<TypedefType>())
1717  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1718  return Align;
1719 
1720  // Otherwise, see if the declaration of the type had an attribute.
1721  if (const auto *TT = T->getAs<TagType>())
1722  return TT->getDecl()->getMaxAlignment();
1723 
1724  return 0;
1725 }
1726 
1728  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1729  if (I != MemoizedTypeInfo.end())
1730  return I->second;
1731 
1732  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1733  TypeInfo TI = getTypeInfoImpl(T);
1734  MemoizedTypeInfo[T] = TI;
1735  return TI;
1736 }
1737 
1738 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1739 /// method does not work on incomplete types.
1740 ///
1741 /// FIXME: Pointers into different addr spaces could have different sizes and
1742 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1743 /// should take a QualType, &c.
1744 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1745  uint64_t Width = 0;
1746  unsigned Align = 8;
1747  bool AlignIsRequired = false;
1748  unsigned AS = 0;
1749  switch (T->getTypeClass()) {
1750 #define TYPE(Class, Base)
1751 #define ABSTRACT_TYPE(Class, Base)
1752 #define NON_CANONICAL_TYPE(Class, Base)
1753 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1754 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1755  case Type::Class: \
1756  assert(!T->isDependentType() && "should not see dependent types here"); \
1757  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1758 #include "clang/AST/TypeNodes.def"
1759  llvm_unreachable("Should not see dependent types");
1760 
1761  case Type::FunctionNoProto:
1762  case Type::FunctionProto:
1763  // GCC extension: alignof(function) = 32 bits
1764  Width = 0;
1765  Align = 32;
1766  break;
1767 
1768  case Type::IncompleteArray:
1769  case Type::VariableArray:
1770  Width = 0;
1771  Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
1772  break;
1773 
1774  case Type::ConstantArray: {
1775  const auto *CAT = cast<ConstantArrayType>(T);
1776 
1777  TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
1778  uint64_t Size = CAT->getSize().getZExtValue();
1779  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1780  "Overflow in array type bit size evaluation");
1781  Width = EltInfo.Width * Size;
1782  Align = EltInfo.Align;
1783  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1784  getTargetInfo().getPointerWidth(0) == 64)
1785  Width = llvm::alignTo(Width, Align);
1786  break;
1787  }
1788  case Type::ExtVector:
1789  case Type::Vector: {
1790  const auto *VT = cast<VectorType>(T);
1791  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1792  Width = EltInfo.Width * VT->getNumElements();
1793  Align = Width;
1794  // If the alignment is not a power of 2, round up to the next power of 2.
1795  // This happens for non-power-of-2 length vectors.
1796  if (Align & (Align-1)) {
1797  Align = llvm::NextPowerOf2(Align);
1798  Width = llvm::alignTo(Width, Align);
1799  }
1800  // Adjust the alignment based on the target max.
1801  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1802  if (TargetVectorAlign && TargetVectorAlign < Align)
1803  Align = TargetVectorAlign;
1804  break;
1805  }
1806 
1807  case Type::Builtin:
1808  switch (cast<BuiltinType>(T)->getKind()) {
1809  default: llvm_unreachable("Unknown builtin type!");
1810  case BuiltinType::Void:
1811  // GCC extension: alignof(void) = 8 bits.
1812  Width = 0;
1813  Align = 8;
1814  break;
1815  case BuiltinType::Bool:
1816  Width = Target->getBoolWidth();
1817  Align = Target->getBoolAlign();
1818  break;
1819  case BuiltinType::Char_S:
1820  case BuiltinType::Char_U:
1821  case BuiltinType::UChar:
1822  case BuiltinType::SChar:
1823  case BuiltinType::Char8:
1824  Width = Target->getCharWidth();
1825  Align = Target->getCharAlign();
1826  break;
1827  case BuiltinType::WChar_S:
1828  case BuiltinType::WChar_U:
1829  Width = Target->getWCharWidth();
1830  Align = Target->getWCharAlign();
1831  break;
1832  case BuiltinType::Char16:
1833  Width = Target->getChar16Width();
1834  Align = Target->getChar16Align();
1835  break;
1836  case BuiltinType::Char32:
1837  Width = Target->getChar32Width();
1838  Align = Target->getChar32Align();
1839  break;
1840  case BuiltinType::UShort:
1841  case BuiltinType::Short:
1842  Width = Target->getShortWidth();
1843  Align = Target->getShortAlign();
1844  break;
1845  case BuiltinType::UInt:
1846  case BuiltinType::Int:
1847  Width = Target->getIntWidth();
1848  Align = Target->getIntAlign();
1849  break;
1850  case BuiltinType::ULong:
1851  case BuiltinType::Long:
1852  Width = Target->getLongWidth();
1853  Align = Target->getLongAlign();
1854  break;
1855  case BuiltinType::ULongLong:
1856  case BuiltinType::LongLong:
1857  Width = Target->getLongLongWidth();
1858  Align = Target->getLongLongAlign();
1859  break;
1860  case BuiltinType::Int128:
1861  case BuiltinType::UInt128:
1862  Width = 128;
1863  Align = 128; // int128_t is 128-bit aligned on all targets.
1864  break;
1865  case BuiltinType::ShortAccum:
1866  case BuiltinType::UShortAccum:
1867  case BuiltinType::SatShortAccum:
1868  case BuiltinType::SatUShortAccum:
1869  Width = Target->getShortAccumWidth();
1870  Align = Target->getShortAccumAlign();
1871  break;
1872  case BuiltinType::Accum:
1873  case BuiltinType::UAccum:
1874  case BuiltinType::SatAccum:
1875  case BuiltinType::SatUAccum:
1876  Width = Target->getAccumWidth();
1877  Align = Target->getAccumAlign();
1878  break;
1879  case BuiltinType::LongAccum:
1880  case BuiltinType::ULongAccum:
1881  case BuiltinType::SatLongAccum:
1882  case BuiltinType::SatULongAccum:
1883  Width = Target->getLongAccumWidth();
1884  Align = Target->getLongAccumAlign();
1885  break;
1886  case BuiltinType::ShortFract:
1887  case BuiltinType::UShortFract:
1888  case BuiltinType::SatShortFract:
1889  case BuiltinType::SatUShortFract:
1890  Width = Target->getShortFractWidth();
1891  Align = Target->getShortFractAlign();
1892  break;
1893  case BuiltinType::Fract:
1894  case BuiltinType::UFract:
1895  case BuiltinType::SatFract:
1896  case BuiltinType::SatUFract:
1897  Width = Target->getFractWidth();
1898  Align = Target->getFractAlign();
1899  break;
1900  case BuiltinType::LongFract:
1901  case BuiltinType::ULongFract:
1902  case BuiltinType::SatLongFract:
1903  case BuiltinType::SatULongFract:
1904  Width = Target->getLongFractWidth();
1905  Align = Target->getLongFractAlign();
1906  break;
1907  case BuiltinType::Float16:
1908  case BuiltinType::Half:
1909  if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
1910  !getLangOpts().OpenMPIsDevice) {
1911  Width = Target->getHalfWidth();
1912  Align = Target->getHalfAlign();
1913  } else {
1914  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1915  "Expected OpenMP device compilation.");
1916  Width = AuxTarget->getHalfWidth();
1917  Align = AuxTarget->getHalfAlign();
1918  }
1919  break;
1920  case BuiltinType::Float:
1921  Width = Target->getFloatWidth();
1922  Align = Target->getFloatAlign();
1923  break;
1924  case BuiltinType::Double:
1925  Width = Target->getDoubleWidth();
1926  Align = Target->getDoubleAlign();
1927  break;
1928  case BuiltinType::LongDouble:
1929  Width = Target->getLongDoubleWidth();
1930  Align = Target->getLongDoubleAlign();
1931  break;
1932  case BuiltinType::Float128:
1933  if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
1934  !getLangOpts().OpenMPIsDevice) {
1935  Width = Target->getFloat128Width();
1936  Align = Target->getFloat128Align();
1937  } else {
1938  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1939  "Expected OpenMP device compilation.");
1940  Width = AuxTarget->getFloat128Width();
1941  Align = AuxTarget->getFloat128Align();
1942  }
1943  break;
1944  case BuiltinType::NullPtr:
1945  Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
1946  Align = Target->getPointerAlign(0); // == sizeof(void*)
1947  break;
1948  case BuiltinType::ObjCId:
1949  case BuiltinType::ObjCClass:
1950  case BuiltinType::ObjCSel:
1951  Width = Target->getPointerWidth(0);
1952  Align = Target->getPointerAlign(0);
1953  break;
1954  case BuiltinType::OCLSampler:
1955  case BuiltinType::OCLEvent:
1956  case BuiltinType::OCLClkEvent:
1957  case BuiltinType::OCLQueue:
1958  case BuiltinType::OCLReserveID:
1959 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1960  case BuiltinType::Id:
1961 #include "clang/Basic/OpenCLImageTypes.def"
1962 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1963  case BuiltinType::Id:
1964 #include "clang/Basic/OpenCLExtensionTypes.def"
1965  AS = getTargetAddressSpace(
1966  Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)));
1967  Width = Target->getPointerWidth(AS);
1968  Align = Target->getPointerAlign(AS);
1969  break;
1970  }
1971  break;
1972  case Type::ObjCObjectPointer:
1973  Width = Target->getPointerWidth(0);
1974  Align = Target->getPointerAlign(0);
1975  break;
1976  case Type::BlockPointer:
1977  AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
1978  Width = Target->getPointerWidth(AS);
1979  Align = Target->getPointerAlign(AS);
1980  break;
1981  case Type::LValueReference:
1982  case Type::RValueReference:
1983  // alignof and sizeof should never enter this code path here, so we go
1984  // the pointer route.
1985  AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
1986  Width = Target->getPointerWidth(AS);
1987  Align = Target->getPointerAlign(AS);
1988  break;
1989  case Type::Pointer:
1990  AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
1991  Width = Target->getPointerWidth(AS);
1992  Align = Target->getPointerAlign(AS);
1993  break;
1994  case Type::MemberPointer: {
1995  const auto *MPT = cast<MemberPointerType>(T);
1996  CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
1997  Width = MPI.Width;
1998  Align = MPI.Align;
1999  break;
2000  }
2001  case Type::Complex: {
2002  // Complex types have the same alignment as their elements, but twice the
2003  // size.
2004  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2005  Width = EltInfo.Width * 2;
2006  Align = EltInfo.Align;
2007  break;
2008  }
2009  case Type::ObjCObject:
2010  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2011  case Type::Adjusted:
2012  case Type::Decayed:
2013  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2014  case Type::ObjCInterface: {
2015  const auto *ObjCI = cast<ObjCInterfaceType>(T);
2016  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2017  Width = toBits(Layout.getSize());
2018  Align = toBits(Layout.getAlignment());
2019  break;
2020  }
2021  case Type::Record:
2022  case Type::Enum: {
2023  const auto *TT = cast<TagType>(T);
2024 
2025  if (TT->getDecl()->isInvalidDecl()) {
2026  Width = 8;
2027  Align = 8;
2028  break;
2029  }
2030 
2031  if (const auto *ET = dyn_cast<EnumType>(TT)) {
2032  const EnumDecl *ED = ET->getDecl();
2033  TypeInfo Info =
2035  if (unsigned AttrAlign = ED->getMaxAlignment()) {
2036  Info.Align = AttrAlign;
2037  Info.AlignIsRequired = true;
2038  }
2039  return Info;
2040  }
2041 
2042  const auto *RT = cast<RecordType>(TT);
2043  const RecordDecl *RD = RT->getDecl();
2044  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2045  Width = toBits(Layout.getSize());
2046  Align = toBits(Layout.getAlignment());
2047  AlignIsRequired = RD->hasAttr<AlignedAttr>();
2048  break;
2049  }
2050 
2051  case Type::SubstTemplateTypeParm:
2052  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2053  getReplacementType().getTypePtr());
2054 
2055  case Type::Auto:
2056  case Type::DeducedTemplateSpecialization: {
2057  const auto *A = cast<DeducedType>(T);
2058  assert(!A->getDeducedType().isNull() &&
2059  "cannot request the size of an undeduced or dependent auto type");
2060  return getTypeInfo(A->getDeducedType().getTypePtr());
2061  }
2062 
2063  case Type::Paren:
2064  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2065 
2066  case Type::ObjCTypeParam:
2067  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2068 
2069  case Type::Typedef: {
2070  const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
2071  TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
2072  // If the typedef has an aligned attribute on it, it overrides any computed
2073  // alignment we have. This violates the GCC documentation (which says that
2074  // attribute(aligned) can only round up) but matches its implementation.
2075  if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
2076  Align = AttrAlign;
2077  AlignIsRequired = true;
2078  } else {
2079  Align = Info.Align;
2080  AlignIsRequired = Info.AlignIsRequired;
2081  }
2082  Width = Info.Width;
2083  break;
2084  }
2085 
2086  case Type::Elaborated:
2087  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2088 
2089  case Type::Attributed:
2090  return getTypeInfo(
2091  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2092 
2093  case Type::Atomic: {
2094  // Start with the base type information.
2095  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2096  Width = Info.Width;
2097  Align = Info.Align;
2098 
2099  if (!Width) {
2100  // An otherwise zero-sized type should still generate an
2101  // atomic operation.
2102  Width = Target->getCharWidth();
2103  assert(Align);
2104  } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2105  // If the size of the type doesn't exceed the platform's max
2106  // atomic promotion width, make the size and alignment more
2107  // favorable to atomic operations:
2108 
2109  // Round the size up to a power of 2.
2110  if (!llvm::isPowerOf2_64(Width))
2111  Width = llvm::NextPowerOf2(Width);
2112 
2113  // Set the alignment equal to the size.
2114  Align = static_cast<unsigned>(Width);
2115  }
2116  }
2117  break;
2118 
2119  case Type::Pipe:
2120  Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global));
2121  Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global));
2122  break;
2123  }
2124 
2125  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2126  return TypeInfo(Width, Align, AlignIsRequired);
2127 }
2128 
2129 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2130  UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2131  if (I != MemoizedUnadjustedAlign.end())
2132  return I->second;
2133 
2134  unsigned UnadjustedAlign;
2135  if (const auto *RT = T->getAs<RecordType>()) {
2136  const RecordDecl *RD = RT->getDecl();
2137  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2138  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2139  } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2140  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2141  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2142  } else {
2143  UnadjustedAlign = getTypeAlign(T);
2144  }
2145 
2146  MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2147  return UnadjustedAlign;
2148 }
2149 
2151  unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
2152  // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
2153  if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
2154  getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
2155  getTargetInfo().getABI() == "elfv1-qpx" &&
2156  T->isSpecificBuiltinType(BuiltinType::Double))
2157  SimdAlign = 256;
2158  return SimdAlign;
2159 }
2160 
2161 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2163  return CharUnits::fromQuantity(BitSize / getCharWidth());
2164 }
2165 
2166 /// toBits - Convert a size in characters to a size in characters.
2167 int64_t ASTContext::toBits(CharUnits CharSize) const {
2168  return CharSize.getQuantity() * getCharWidth();
2169 }
2170 
2171 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2172 /// This method does not work on incomplete types.
2174  return getTypeInfoInChars(T).first;
2175 }
2177  return getTypeInfoInChars(T).first;
2178 }
2179 
2180 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2181 /// characters. This method does not work on incomplete types.
2183  return toCharUnitsFromBits(getTypeAlign(T));
2184 }
2186  return toCharUnitsFromBits(getTypeAlign(T));
2187 }
2188 
2189 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2190 /// type, in characters, before alignment adustments. This method does
2191 /// not work on incomplete types.
2194 }
2197 }
2198 
2199 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2200 /// type for the current target in bits. This can be different than the ABI
2201 /// alignment in cases where it is beneficial for performance to overalign
2202 /// a data type.
2203 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2204  TypeInfo TI = getTypeInfo(T);
2205  unsigned ABIAlign = TI.Align;
2206 
2207  T = T->getBaseElementTypeUnsafe();
2208 
2209  // The preferred alignment of member pointers is that of a pointer.
2210  if (T->isMemberPointerType())
2212 
2213  if (!Target->allowsLargerPreferedTypeAlignment())
2214  return ABIAlign;
2215 
2216  // Double and long long should be naturally aligned if possible.
2217  if (const auto *CT = T->getAs<ComplexType>())
2218  T = CT->getElementType().getTypePtr();
2219  if (const auto *ET = T->getAs<EnumType>())
2220  T = ET->getDecl()->getIntegerType().getTypePtr();
2221  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2222  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2223  T->isSpecificBuiltinType(BuiltinType::ULongLong))
2224  // Don't increase the alignment if an alignment attribute was specified on a
2225  // typedef declaration.
2226  if (!TI.AlignIsRequired)
2227  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2228 
2229  return ABIAlign;
2230 }
2231 
2232 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2233 /// for __attribute__((aligned)) on this target, to be used if no alignment
2234 /// value is specified.
2237 }
2238 
2239 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2240 /// to a global variable of the specified type.
2242  return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign());
2243 }
2244 
2245 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2246 /// should be given to a global variable of the specified type.
2249 }
2250 
2253  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2254  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2255  Offset += Layout->getBaseClassOffset(Base);
2256  Layout = &getASTRecordLayout(Base);
2257  }
2258  return Offset;
2259 }
2260 
2261 /// DeepCollectObjCIvars -
2262 /// This routine first collects all declared, but not synthesized, ivars in
2263 /// super class and then collects all ivars, including those synthesized for
2264 /// current class. This routine is used for implementation of current class
2265 /// when all ivars, declared and synthesized are known.
2267  bool leafClass,
2268  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2269  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2270  DeepCollectObjCIvars(SuperClass, false, Ivars);
2271  if (!leafClass) {
2272  for (const auto *I : OI->ivars())
2273  Ivars.push_back(I);
2274  } else {
2275  auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2276  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2277  Iv= Iv->getNextIvar())
2278  Ivars.push_back(Iv);
2279  }
2280 }
2281 
2282 /// CollectInheritedProtocols - Collect all protocols in current class and
2283 /// those inherited by it.
2285  llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2286  if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2287  // We can use protocol_iterator here instead of
2288  // all_referenced_protocol_iterator since we are walking all categories.
2289  for (auto *Proto : OI->all_referenced_protocols()) {
2290  CollectInheritedProtocols(Proto, Protocols);
2291  }
2292 
2293  // Categories of this Interface.
2294  for (const auto *Cat : OI->visible_categories())
2295  CollectInheritedProtocols(Cat, Protocols);
2296 
2297  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2298  while (SD) {
2299  CollectInheritedProtocols(SD, Protocols);
2300  SD = SD->getSuperClass();
2301  }
2302  } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2303  for (auto *Proto : OC->protocols()) {
2304  CollectInheritedProtocols(Proto, Protocols);
2305  }
2306  } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2307  // Insert the protocol.
2308  if (!Protocols.insert(
2309  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2310  return;
2311 
2312  for (auto *Proto : OP->protocols())
2313  CollectInheritedProtocols(Proto, Protocols);
2314  }
2315 }
2316 
2318  const RecordDecl *RD) {
2319  assert(RD->isUnion() && "Must be union type");
2320  CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2321 
2322  for (const auto *Field : RD->fields()) {
2323  if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2324  return false;
2325  CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2326  if (FieldSize != UnionSize)
2327  return false;
2328  }
2329  return !RD->field_empty();
2330 }
2331 
2332 static bool isStructEmpty(QualType Ty) {
2333  const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
2334 
2335  if (!RD->field_empty())
2336  return false;
2337 
2338  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
2339  return ClassDecl->isEmpty();
2340 
2341  return true;
2342 }
2343 
2346  const RecordDecl *RD) {
2347  assert(!RD->isUnion() && "Must be struct/class type");
2348  const auto &Layout = Context.getASTRecordLayout(RD);
2349 
2350  int64_t CurOffsetInBits = 0;
2351  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2352  if (ClassDecl->isDynamicClass())
2353  return llvm::None;
2354 
2356  for (const auto Base : ClassDecl->bases()) {
2357  // Empty types can be inherited from, and non-empty types can potentially
2358  // have tail padding, so just make sure there isn't an error.
2359  if (!isStructEmpty(Base.getType())) {
2361  Context, Base.getType()->getAs<RecordType>()->getDecl());
2362  if (!Size)
2363  return llvm::None;
2364  Bases.emplace_back(Base.getType(), Size.getValue());
2365  }
2366  }
2367 
2368  llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L,
2369  const std::pair<QualType, int64_t> &R) {
2370  return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
2371  Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
2372  });
2373 
2374  for (const auto Base : Bases) {
2375  int64_t BaseOffset = Context.toBits(
2376  Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl()));
2377  int64_t BaseSize = Base.second;
2378  if (BaseOffset != CurOffsetInBits)
2379  return llvm::None;
2380  CurOffsetInBits = BaseOffset + BaseSize;
2381  }
2382  }
2383 
2384  for (const auto *Field : RD->fields()) {
2385  if (!Field->getType()->isReferenceType() &&
2386  !Context.hasUniqueObjectRepresentations(Field->getType()))
2387  return llvm::None;
2388 
2389  int64_t FieldSizeInBits =
2390  Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2391  if (Field->isBitField()) {
2392  int64_t BitfieldSize = Field->getBitWidthValue(Context);
2393 
2394  if (BitfieldSize > FieldSizeInBits)
2395  return llvm::None;
2396  FieldSizeInBits = BitfieldSize;
2397  }
2398 
2399  int64_t FieldOffsetInBits = Context.getFieldOffset(Field);
2400 
2401  if (FieldOffsetInBits != CurOffsetInBits)
2402  return llvm::None;
2403 
2404  CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits;
2405  }
2406 
2407  return CurOffsetInBits;
2408 }
2409 
2411  // C++17 [meta.unary.prop]:
2412  // The predicate condition for a template specialization
2413  // has_unique_object_representations<T> shall be
2414  // satisfied if and only if:
2415  // (9.1) - T is trivially copyable, and
2416  // (9.2) - any two objects of type T with the same value have the same
2417  // object representation, where two objects
2418  // of array or non-union class type are considered to have the same value
2419  // if their respective sequences of
2420  // direct subobjects have the same values, and two objects of union type
2421  // are considered to have the same
2422  // value if they have the same active member and the corresponding members
2423  // have the same value.
2424  // The set of scalar types for which this condition holds is
2425  // implementation-defined. [ Note: If a type has padding
2426  // bits, the condition does not hold; otherwise, the condition holds true
2427  // for unsigned integral types. -- end note ]
2428  assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2429 
2430  // Arrays are unique only if their element type is unique.
2431  if (Ty->isArrayType())
2433 
2434  // (9.1) - T is trivially copyable...
2435  if (!Ty.isTriviallyCopyableType(*this))
2436  return false;
2437 
2438  // All integrals and enums are unique.
2439  if (Ty->isIntegralOrEnumerationType())
2440  return true;
2441 
2442  // All other pointers are unique.
2443  if (Ty->isPointerType())
2444  return true;
2445 
2446  if (Ty->isMemberPointerType()) {
2447  const auto *MPT = Ty->getAs<MemberPointerType>();
2448  return !ABI->getMemberPointerInfo(MPT).HasPadding;
2449  }
2450 
2451  if (Ty->isRecordType()) {
2452  const RecordDecl *Record = Ty->getAs<RecordType>()->getDecl();
2453 
2454  if (Record->isInvalidDecl())
2455  return false;
2456 
2457  if (Record->isUnion())
2458  return unionHasUniqueObjectRepresentations(*this, Record);
2459 
2460  Optional<int64_t> StructSize =
2461  structHasUniqueObjectRepresentations(*this, Record);
2462 
2463  return StructSize &&
2464  StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
2465  }
2466 
2467  // FIXME: More cases to handle here (list by rsmith):
2468  // vectors (careful about, eg, vector of 3 foo)
2469  // _Complex int and friends
2470  // _Atomic T
2471  // Obj-C block pointers
2472  // Obj-C object pointers
2473  // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2474  // clk_event_t, queue_t, reserve_id_t)
2475  // There're also Obj-C class types and the Obj-C selector type, but I think it
2476  // makes sense for those to return false here.
2477 
2478  return false;
2479 }
2480 
2482  unsigned count = 0;
2483  // Count ivars declared in class extension.
2484  for (const auto *Ext : OI->known_extensions())
2485  count += Ext->ivar_size();
2486 
2487  // Count ivar defined in this class's implementation. This
2488  // includes synthesized ivars.
2489  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2490  count += ImplDecl->ivar_size();
2491 
2492  return count;
2493 }
2494 
2496  if (!E)
2497  return false;
2498 
2499  // nullptr_t is always treated as null.
2500  if (E->getType()->isNullPtrType()) return true;
2501 
2502  if (E->getType()->isAnyPointerType() &&
2505  return true;
2506 
2507  // Unfortunately, __null has type 'int'.
2508  if (isa<GNUNullExpr>(E)) return true;
2509 
2510  return false;
2511 }
2512 
2513 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2514 /// exists.
2516  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2517  I = ObjCImpls.find(D);
2518  if (I != ObjCImpls.end())
2519  return cast<ObjCImplementationDecl>(I->second);
2520  return nullptr;
2521 }
2522 
2523 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2524 /// exists.
2526  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2527  I = ObjCImpls.find(D);
2528  if (I != ObjCImpls.end())
2529  return cast<ObjCCategoryImplDecl>(I->second);
2530  return nullptr;
2531 }
2532 
2533 /// Set the implementation of ObjCInterfaceDecl.
2535  ObjCImplementationDecl *ImplD) {
2536  assert(IFaceD && ImplD && "Passed null params");
2537  ObjCImpls[IFaceD] = ImplD;
2538 }
2539 
2540 /// Set the implementation of ObjCCategoryDecl.
2542  ObjCCategoryImplDecl *ImplD) {
2543  assert(CatD && ImplD && "Passed null params");
2544  ObjCImpls[CatD] = ImplD;
2545 }
2546 
2547 const ObjCMethodDecl *
2549  return ObjCMethodRedecls.lookup(MD);
2550 }
2551 
2553  const ObjCMethodDecl *Redecl) {
2554  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2555  ObjCMethodRedecls[MD] = Redecl;
2556 }
2557 
2559  const NamedDecl *ND) const {
2560  if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2561  return ID;
2562  if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2563  return CD->getClassInterface();
2564  if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2565  return IMD->getClassInterface();
2566 
2567  return nullptr;
2568 }
2569 
2570 /// Get the copy initialization expression of VarDecl, or nullptr if
2571 /// none exists.
2574  assert(VD && "Passed null params");
2575  assert(VD->hasAttr<BlocksAttr>() &&
2576  "getBlockVarCopyInits - not __block var");
2577  auto I = BlockVarCopyInits.find(VD);
2578  if (I != BlockVarCopyInits.end())
2579  return I->second;
2580  return {nullptr, false};
2581 }
2582 
2583 /// Set the copy inialization expression of a block var decl.
2585  bool CanThrow) {
2586  assert(VD && CopyExpr && "Passed null params");
2587  assert(VD->hasAttr<BlocksAttr>() &&
2588  "setBlockVarCopyInits - not __block var");
2589  BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2590 }
2591 
2593  unsigned DataSize) const {
2594  if (!DataSize)
2595  DataSize = TypeLoc::getFullDataSizeForType(T);
2596  else
2597  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2598  "incorrect data size provided to CreateTypeSourceInfo!");
2599 
2600  auto *TInfo =
2601  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2602  new (TInfo) TypeSourceInfo(T);
2603  return TInfo;
2604 }
2605 
2607  SourceLocation L) const {
2609  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2610  return DI;
2611 }
2612 
2613 const ASTRecordLayout &
2615  return getObjCLayout(D, nullptr);
2616 }
2617 
2618 const ASTRecordLayout &
2620  const ObjCImplementationDecl *D) const {
2621  return getObjCLayout(D->getClassInterface(), D);
2622 }
2623 
2624 //===----------------------------------------------------------------------===//
2625 // Type creation/memoization methods
2626 //===----------------------------------------------------------------------===//
2627 
2628 QualType
2629 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2630  unsigned fastQuals = quals.getFastQualifiers();
2631  quals.removeFastQualifiers();
2632 
2633  // Check if we've already instantiated this type.
2634  llvm::FoldingSetNodeID ID;
2635  ExtQuals::Profile(ID, baseType, quals);
2636  void *insertPos = nullptr;
2637  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2638  assert(eq->getQualifiers() == quals);
2639  return QualType(eq, fastQuals);
2640  }
2641 
2642  // If the base type is not canonical, make the appropriate canonical type.
2643  QualType canon;
2644  if (!baseType->isCanonicalUnqualified()) {
2645  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2646  canonSplit.Quals.addConsistentQualifiers(quals);
2647  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
2648 
2649  // Re-find the insert position.
2650  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
2651  }
2652 
2653  auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
2654  ExtQualNodes.InsertNode(eq, insertPos);
2655  return QualType(eq, fastQuals);
2656 }
2657 
2659  LangAS AddressSpace) const {
2660  QualType CanT = getCanonicalType(T);
2661  if (CanT.getAddressSpace() == AddressSpace)
2662  return T;
2663 
2664  // If we are composing extended qualifiers together, merge together
2665  // into one ExtQuals node.
2666  QualifierCollector Quals;
2667  const Type *TypeNode = Quals.strip(T);
2668 
2669  // If this type already has an address space specified, it cannot get
2670  // another one.
2671  assert(!Quals.hasAddressSpace() &&
2672  "Type cannot be in multiple addr spaces!");
2673  Quals.addAddressSpace(AddressSpace);
2674 
2675  return getExtQualType(TypeNode, Quals);
2676 }
2677 
2679  // If we are composing extended qualifiers together, merge together
2680  // into one ExtQuals node.
2681  QualifierCollector Quals;
2682  const Type *TypeNode = Quals.strip(T);
2683 
2684  // If the qualifier doesn't have an address space just return it.
2685  if (!Quals.hasAddressSpace())
2686  return T;
2687 
2688  Quals.removeAddressSpace();
2689 
2690  // Removal of the address space can mean there are no longer any
2691  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
2692  // or required.
2693  if (Quals.hasNonFastQualifiers())
2694  return getExtQualType(TypeNode, Quals);
2695  else
2696  return QualType(TypeNode, Quals.getFastQualifiers());
2697 }
2698 
2700  Qualifiers::GC GCAttr) const {
2701  QualType CanT = getCanonicalType(T);
2702  if (CanT.getObjCGCAttr() == GCAttr)
2703  return T;
2704 
2705  if (const auto *ptr = T->getAs<PointerType>()) {
2706  QualType Pointee = ptr->getPointeeType();
2707  if (Pointee->isAnyPointerType()) {
2708  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
2709  return getPointerType(ResultType);
2710  }
2711  }
2712 
2713  // If we are composing extended qualifiers together, merge together
2714  // into one ExtQuals node.
2715  QualifierCollector Quals;
2716  const Type *TypeNode = Quals.strip(T);
2717 
2718  // If this type already has an ObjCGC specified, it cannot get
2719  // another one.
2720  assert(!Quals.hasObjCGCAttr() &&
2721  "Type cannot have multiple ObjCGCs!");
2722  Quals.addObjCGCAttr(GCAttr);
2723 
2724  return getExtQualType(TypeNode, Quals);
2725 }
2726 
2728  FunctionType::ExtInfo Info) {
2729  if (T->getExtInfo() == Info)
2730  return T;
2731 
2732  QualType Result;
2733  if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
2734  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
2735  } else {
2736  const auto *FPT = cast<FunctionProtoType>(T);
2737  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2738  EPI.ExtInfo = Info;
2739  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
2740  }
2741 
2742  return cast<FunctionType>(Result.getTypePtr());
2743 }
2744 
2746  QualType ResultType) {
2747  FD = FD->getMostRecentDecl();
2748  while (true) {
2749  const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
2750  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2751  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
2752  if (FunctionDecl *Next = FD->getPreviousDecl())
2753  FD = Next;
2754  else
2755  break;
2756  }
2758  L->DeducedReturnType(FD, ResultType);
2759 }
2760 
2761 /// Get a function type and produce the equivalent function type with the
2762 /// specified exception specification. Type sugar that can be present on a
2763 /// declaration of a function with an exception specification is permitted
2764 /// and preserved. Other type sugar (for instance, typedefs) is not.
2767  // Might have some parens.
2768  if (const auto *PT = dyn_cast<ParenType>(Orig))
2769  return getParenType(
2770  getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
2771 
2772  // Might have a calling-convention attribute.
2773  if (const auto *AT = dyn_cast<AttributedType>(Orig))
2774  return getAttributedType(
2775  AT->getAttrKind(),
2776  getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
2777  getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
2778 
2779  // Anything else must be a function type. Rebuild it with the new exception
2780  // specification.
2781  const auto *Proto = Orig->getAs<FunctionProtoType>();
2782  return getFunctionType(
2783  Proto->getReturnType(), Proto->getParamTypes(),
2784  Proto->getExtProtoInfo().withExceptionSpec(ESI));
2785 }
2786 
2788  QualType U) {
2789  return hasSameType(T, U) ||
2790  (getLangOpts().CPlusPlus17 &&
2793 }
2794 
2797  bool AsWritten) {
2798  // Update the type.
2799  QualType Updated =
2801  FD->setType(Updated);
2802 
2803  if (!AsWritten)
2804  return;
2805 
2806  // Update the type in the type source information too.
2807  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
2808  // If the type and the type-as-written differ, we may need to update
2809  // the type-as-written too.
2810  if (TSInfo->getType() != FD->getType())
2811  Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
2812 
2813  // FIXME: When we get proper type location information for exceptions,
2814  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
2815  // up the TypeSourceInfo;
2816  assert(TypeLoc::getFullDataSizeForType(Updated) ==
2817  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
2818  "TypeLoc size mismatch from updating exception specification");
2819  TSInfo->overrideType(Updated);
2820  }
2821 }
2822 
2823 /// getComplexType - Return the uniqued reference to the type for a complex
2824 /// number with the specified element type.
2826  // Unique pointers, to guarantee there is only one pointer of a particular
2827  // structure.
2828  llvm::FoldingSetNodeID ID;
2829  ComplexType::Profile(ID, T);
2830 
2831  void *InsertPos = nullptr;
2832  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
2833  return QualType(CT, 0);
2834 
2835  // If the pointee type isn't canonical, this won't be a canonical type either,
2836  // so fill in the canonical type field.
2837  QualType Canonical;
2838  if (!T.isCanonical()) {
2839  Canonical = getComplexType(getCanonicalType(T));
2840 
2841  // Get the new insert position for the node we care about.
2842  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
2843  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2844  }
2845  auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
2846  Types.push_back(New);
2847  ComplexTypes.InsertNode(New, InsertPos);
2848  return QualType(New, 0);
2849 }
2850 
2851 /// getPointerType - Return the uniqued reference to the type for a pointer to
2852 /// the specified type.
2854  // Unique pointers, to guarantee there is only one pointer of a particular
2855  // structure.
2856  llvm::FoldingSetNodeID ID;
2857  PointerType::Profile(ID, T);
2858 
2859  void *InsertPos = nullptr;
2860  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2861  return QualType(PT, 0);
2862 
2863  // If the pointee type isn't canonical, this won't be a canonical type either,
2864  // so fill in the canonical type field.
2865  QualType Canonical;
2866  if (!T.isCanonical()) {
2867  Canonical = getPointerType(getCanonicalType(T));
2868 
2869  // Get the new insert position for the node we care about.
2870  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2871  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2872  }
2873  auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
2874  Types.push_back(New);
2875  PointerTypes.InsertNode(New, InsertPos);
2876  return QualType(New, 0);
2877 }
2878 
2880  llvm::FoldingSetNodeID ID;
2881  AdjustedType::Profile(ID, Orig, New);
2882  void *InsertPos = nullptr;
2883  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2884  if (AT)
2885  return QualType(AT, 0);
2886 
2887  QualType Canonical = getCanonicalType(New);
2888 
2889  // Get the new insert position for the node we care about.
2890  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2891  assert(!AT && "Shouldn't be in the map!");
2892 
2893  AT = new (*this, TypeAlignment)
2894  AdjustedType(Type::Adjusted, Orig, New, Canonical);
2895  Types.push_back(AT);
2896  AdjustedTypes.InsertNode(AT, InsertPos);
2897  return QualType(AT, 0);
2898 }
2899 
2901  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
2902 
2903  QualType Decayed;
2904 
2905  // C99 6.7.5.3p7:
2906  // A declaration of a parameter as "array of type" shall be
2907  // adjusted to "qualified pointer to type", where the type
2908  // qualifiers (if any) are those specified within the [ and ] of
2909  // the array type derivation.
2910  if (T->isArrayType())
2911  Decayed = getArrayDecayedType(T);
2912 
2913  // C99 6.7.5.3p8:
2914  // A declaration of a parameter as "function returning type"
2915  // shall be adjusted to "pointer to function returning type", as
2916  // in 6.3.2.1.
2917  if (T->isFunctionType())
2918  Decayed = getPointerType(T);
2919 
2920  llvm::FoldingSetNodeID ID;
2921  AdjustedType::Profile(ID, T, Decayed);
2922  void *InsertPos = nullptr;
2923  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2924  if (AT)
2925  return QualType(AT, 0);
2926 
2927  QualType Canonical = getCanonicalType(Decayed);
2928 
2929  // Get the new insert position for the node we care about.
2930  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2931  assert(!AT && "Shouldn't be in the map!");
2932 
2933  AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
2934  Types.push_back(AT);
2935  AdjustedTypes.InsertNode(AT, InsertPos);
2936  return QualType(AT, 0);
2937 }
2938 
2939 /// getBlockPointerType - Return the uniqued reference to the type for
2940 /// a pointer to the specified block.
2942  assert(T->isFunctionType() && "block of function types only");
2943  // Unique pointers, to guarantee there is only one block of a particular
2944  // structure.
2945  llvm::FoldingSetNodeID ID;
2947 
2948  void *InsertPos = nullptr;
2949  if (BlockPointerType *PT =
2950  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2951  return QualType(PT, 0);
2952 
2953  // If the block pointee type isn't canonical, this won't be a canonical
2954  // type either so fill in the canonical type field.
2955  QualType Canonical;
2956  if (!T.isCanonical()) {
2957  Canonical = getBlockPointerType(getCanonicalType(T));
2958 
2959  // Get the new insert position for the node we care about.
2960  BlockPointerType *NewIP =
2961  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2962  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2963  }
2964  auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
2965  Types.push_back(New);
2966  BlockPointerTypes.InsertNode(New, InsertPos);
2967  return QualType(New, 0);
2968 }
2969 
2970 /// getLValueReferenceType - Return the uniqued reference to the type for an
2971 /// lvalue reference to the specified type.
2972 QualType
2973 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
2974  assert(getCanonicalType(T) != OverloadTy &&
2975  "Unresolved overloaded function type");
2976 
2977  // Unique pointers, to guarantee there is only one pointer of a particular
2978  // structure.
2979  llvm::FoldingSetNodeID ID;
2980  ReferenceType::Profile(ID, T, SpelledAsLValue);
2981 
2982  void *InsertPos = nullptr;
2983  if (LValueReferenceType *RT =
2984  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
2985  return QualType(RT, 0);
2986 
2987  const auto *InnerRef = T->getAs<ReferenceType>();
2988 
2989  // If the referencee type isn't canonical, this won't be a canonical type
2990  // either, so fill in the canonical type field.
2991  QualType Canonical;
2992  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
2993  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
2994  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
2995 
2996  // Get the new insert position for the node we care about.
2997  LValueReferenceType *NewIP =
2998  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
2999  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3000  }
3001 
3002  auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
3003  SpelledAsLValue);
3004  Types.push_back(New);
3005  LValueReferenceTypes.InsertNode(New, InsertPos);
3006 
3007  return QualType(New, 0);
3008 }
3009 
3010 /// getRValueReferenceType - Return the uniqued reference to the type for an
3011 /// rvalue reference to the specified type.
3013  // Unique pointers, to guarantee there is only one pointer of a particular
3014  // structure.
3015  llvm::FoldingSetNodeID ID;
3016  ReferenceType::Profile(ID, T, false);
3017 
3018  void *InsertPos = nullptr;
3019  if (RValueReferenceType *RT =
3020  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3021  return QualType(RT, 0);
3022 
3023  const auto *InnerRef = T->getAs<ReferenceType>();
3024 
3025  // If the referencee type isn't canonical, this won't be a canonical type
3026  // either, so fill in the canonical type field.
3027  QualType Canonical;
3028  if (InnerRef || !T.isCanonical()) {
3029  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3030  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3031 
3032  // Get the new insert position for the node we care about.
3033  RValueReferenceType *NewIP =
3034  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3035  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3036  }
3037 
3038  auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
3039  Types.push_back(New);
3040  RValueReferenceTypes.InsertNode(New, InsertPos);
3041  return QualType(New, 0);
3042 }
3043 
3044 /// getMemberPointerType - Return the uniqued reference to the type for a
3045 /// member pointer to the specified type, in the specified class.
3047  // Unique pointers, to guarantee there is only one pointer of a particular
3048  // structure.
3049  llvm::FoldingSetNodeID ID;
3050  MemberPointerType::Profile(ID, T, Cls);
3051 
3052  void *InsertPos = nullptr;
3053  if (MemberPointerType *PT =
3054  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3055  return QualType(PT, 0);
3056 
3057  // If the pointee or class type isn't canonical, this won't be a canonical
3058  // type either, so fill in the canonical type field.
3059  QualType Canonical;
3060  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3062 
3063  // Get the new insert position for the node we care about.
3064  MemberPointerType *NewIP =
3065  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3066  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3067  }
3068  auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
3069  Types.push_back(New);
3070  MemberPointerTypes.InsertNode(New, InsertPos);
3071  return QualType(New, 0);
3072 }
3073 
3074 /// getConstantArrayType - Return the unique reference to the type for an
3075 /// array of the specified element type.
3077  const llvm::APInt &ArySizeIn,
3079  unsigned IndexTypeQuals) const {
3080  assert((EltTy->isDependentType() ||
3081  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3082  "Constant array of VLAs is illegal!");
3083 
3084  // Convert the array size into a canonical width matching the pointer size for
3085  // the target.
3086  llvm::APInt ArySize(ArySizeIn);
3087  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3088 
3089  llvm::FoldingSetNodeID ID;
3090  ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
3091 
3092  void *InsertPos = nullptr;
3093  if (ConstantArrayType *ATP =
3094  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3095  return QualType(ATP, 0);
3096 
3097  // If the element type isn't canonical or has qualifiers, this won't
3098  // be a canonical type either, so fill in the canonical type field.
3099  QualType Canon;
3100  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3101  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3102  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
3103  ASM, IndexTypeQuals);
3104  Canon = getQualifiedType(Canon, canonSplit.Quals);
3105 
3106  // Get the new insert position for the node we care about.
3107  ConstantArrayType *NewIP =
3108  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3109  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3110  }
3111 
3112  auto *New = new (*this,TypeAlignment)
3113  ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
3114  ConstantArrayTypes.InsertNode(New, InsertPos);
3115  Types.push_back(New);
3116  return QualType(New, 0);
3117 }
3118 
3119 /// getVariableArrayDecayedType - Turns the given type, which may be
3120 /// variably-modified, into the corresponding type with all the known
3121 /// sizes replaced with [*].
3123  // Vastly most common case.
3124  if (!type->isVariablyModifiedType()) return type;
3125 
3126  QualType result;
3127 
3128  SplitQualType split = type.getSplitDesugaredType();
3129  const Type *ty = split.Ty;
3130  switch (ty->getTypeClass()) {
3131 #define TYPE(Class, Base)
3132 #define ABSTRACT_TYPE(Class, Base)
3133 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3134 #include "clang/AST/TypeNodes.def"
3135  llvm_unreachable("didn't desugar past all non-canonical types?");
3136 
3137  // These types should never be variably-modified.
3138  case Type::Builtin:
3139  case Type::Complex:
3140  case Type::Vector:
3141  case Type::DependentVector:
3142  case Type::ExtVector:
3143  case Type::DependentSizedExtVector:
3144  case Type::DependentAddressSpace:
3145  case Type::ObjCObject:
3146  case Type::ObjCInterface:
3147  case Type::ObjCObjectPointer:
3148  case Type::Record:
3149  case Type::Enum:
3150  case Type::UnresolvedUsing:
3151  case Type::TypeOfExpr:
3152  case Type::TypeOf:
3153  case Type::Decltype:
3154  case Type::UnaryTransform:
3155  case Type::DependentName:
3156  case Type::InjectedClassName:
3157  case Type::TemplateSpecialization:
3158  case Type::DependentTemplateSpecialization:
3159  case Type::TemplateTypeParm:
3160  case Type::SubstTemplateTypeParmPack:
3161  case Type::Auto:
3162  case Type::DeducedTemplateSpecialization:
3163  case Type::PackExpansion:
3164  llvm_unreachable("type should never be variably-modified");
3165 
3166  // These types can be variably-modified but should never need to
3167  // further decay.
3168  case Type::FunctionNoProto:
3169  case Type::FunctionProto:
3170  case Type::BlockPointer:
3171  case Type::MemberPointer:
3172  case Type::Pipe:
3173  return type;
3174 
3175  // These types can be variably-modified. All these modifications
3176  // preserve structure except as noted by comments.
3177  // TODO: if we ever care about optimizing VLAs, there are no-op
3178  // optimizations available here.
3179  case Type::Pointer:
3181  cast<PointerType>(ty)->getPointeeType()));
3182  break;
3183 
3184  case Type::LValueReference: {
3185  const auto *lv = cast<LValueReferenceType>(ty);
3186  result = getLValueReferenceType(
3187  getVariableArrayDecayedType(lv->getPointeeType()),
3188  lv->isSpelledAsLValue());
3189  break;
3190  }
3191 
3192  case Type::RValueReference: {
3193  const auto *lv = cast<RValueReferenceType>(ty);
3194  result = getRValueReferenceType(
3195  getVariableArrayDecayedType(lv->getPointeeType()));
3196  break;
3197  }
3198 
3199  case Type::Atomic: {
3200  const auto *at = cast<AtomicType>(ty);
3201  result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3202  break;
3203  }
3204 
3205  case Type::ConstantArray: {
3206  const auto *cat = cast<ConstantArrayType>(ty);
3207  result = getConstantArrayType(
3208  getVariableArrayDecayedType(cat->getElementType()),
3209  cat->getSize(),
3210  cat->getSizeModifier(),
3211  cat->getIndexTypeCVRQualifiers());
3212  break;
3213  }
3214 
3215  case Type::DependentSizedArray: {
3216  const auto *dat = cast<DependentSizedArrayType>(ty);
3217  result = getDependentSizedArrayType(
3218  getVariableArrayDecayedType(dat->getElementType()),
3219  dat->getSizeExpr(),
3220  dat->getSizeModifier(),
3221  dat->getIndexTypeCVRQualifiers(),
3222  dat->getBracketsRange());
3223  break;
3224  }
3225 
3226  // Turn incomplete types into [*] types.
3227  case Type::IncompleteArray: {
3228  const auto *iat = cast<IncompleteArrayType>(ty);
3229  result = getVariableArrayType(
3230  getVariableArrayDecayedType(iat->getElementType()),
3231  /*size*/ nullptr,
3233  iat->getIndexTypeCVRQualifiers(),
3234  SourceRange());
3235  break;
3236  }
3237 
3238  // Turn VLA types into [*] types.
3239  case Type::VariableArray: {
3240  const auto *vat = cast<VariableArrayType>(ty);
3241  result = getVariableArrayType(
3242  getVariableArrayDecayedType(vat->getElementType()),
3243  /*size*/ nullptr,
3245  vat->getIndexTypeCVRQualifiers(),
3246  vat->getBracketsRange());
3247  break;
3248  }
3249  }
3250 
3251  // Apply the top-level qualifiers from the original.
3252  return getQualifiedType(result, split.Quals);
3253 }
3254 
3255 /// getVariableArrayType - Returns a non-unique reference to the type for a
3256 /// variable array of the specified element type.
3258  Expr *NumElts,
3260  unsigned IndexTypeQuals,
3261  SourceRange Brackets) const {
3262  // Since we don't unique expressions, it isn't possible to unique VLA's
3263  // that have an expression provided for their size.
3264  QualType Canon;
3265 
3266  // Be sure to pull qualifiers off the element type.
3267  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3268  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3269  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3270  IndexTypeQuals, Brackets);
3271  Canon = getQualifiedType(Canon, canonSplit.Quals);
3272  }
3273 
3274  auto *New = new (*this, TypeAlignment)
3275  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3276 
3277  VariableArrayTypes.push_back(New);
3278  Types.push_back(New);
3279  return QualType(New, 0);
3280 }
3281 
3282 /// getDependentSizedArrayType - Returns a non-unique reference to
3283 /// the type for a dependently-sized array of the specified element
3284 /// type.
3286  Expr *numElements,
3288  unsigned elementTypeQuals,
3289  SourceRange brackets) const {
3290  assert((!numElements || numElements->isTypeDependent() ||
3291  numElements->isValueDependent()) &&
3292  "Size must be type- or value-dependent!");
3293 
3294  // Dependently-sized array types that do not have a specified number
3295  // of elements will have their sizes deduced from a dependent
3296  // initializer. We do no canonicalization here at all, which is okay
3297  // because they can't be used in most locations.
3298  if (!numElements) {
3299  auto *newType
3300  = new (*this, TypeAlignment)
3301  DependentSizedArrayType(*this, elementType, QualType(),
3302  numElements, ASM, elementTypeQuals,
3303  brackets);
3304  Types.push_back(newType);
3305  return QualType(newType, 0);
3306  }
3307 
3308  // Otherwise, we actually build a new type every time, but we
3309  // also build a canonical type.
3310 
3311  SplitQualType canonElementType = getCanonicalType(elementType).split();
3312 
3313  void *insertPos = nullptr;
3314  llvm::FoldingSetNodeID ID;
3316  QualType(canonElementType.Ty, 0),
3317  ASM, elementTypeQuals, numElements);
3318 
3319  // Look for an existing type with these properties.
3320  DependentSizedArrayType *canonTy =
3321  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3322 
3323  // If we don't have one, build one.
3324  if (!canonTy) {
3325  canonTy = new (*this, TypeAlignment)
3326  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3327  QualType(), numElements, ASM, elementTypeQuals,
3328  brackets);
3329  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3330  Types.push_back(canonTy);
3331  }
3332 
3333  // Apply qualifiers from the element type to the array.
3334  QualType canon = getQualifiedType(QualType(canonTy,0),
3335  canonElementType.Quals);
3336 
3337  // If we didn't need extra canonicalization for the element type or the size
3338  // expression, then just use that as our result.
3339  if (QualType(canonElementType.Ty, 0) == elementType &&
3340  canonTy->getSizeExpr() == numElements)
3341  return canon;
3342 
3343  // Otherwise, we need to build a type which follows the spelling
3344  // of the element type.
3345  auto *sugaredType
3346  = new (*this, TypeAlignment)
3347  DependentSizedArrayType(*this, elementType, canon, numElements,
3348  ASM, elementTypeQuals, brackets);
3349  Types.push_back(sugaredType);
3350  return QualType(sugaredType, 0);
3351 }
3352 
3355  unsigned elementTypeQuals) const {
3356  llvm::FoldingSetNodeID ID;
3357  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3358 
3359  void *insertPos = nullptr;
3360  if (IncompleteArrayType *iat =
3361  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3362  return QualType(iat, 0);
3363 
3364  // If the element type isn't canonical, this won't be a canonical type
3365  // either, so fill in the canonical type field. We also have to pull
3366  // qualifiers off the element type.
3367  QualType canon;
3368 
3369  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3370  SplitQualType canonSplit = getCanonicalType(elementType).split();
3371  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3372  ASM, elementTypeQuals);
3373  canon = getQualifiedType(canon, canonSplit.Quals);
3374 
3375  // Get the new insert position for the node we care about.
3376  IncompleteArrayType *existing =
3377  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3378  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3379  }
3380 
3381  auto *newType = new (*this, TypeAlignment)
3382  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3383 
3384  IncompleteArrayTypes.InsertNode(newType, insertPos);
3385  Types.push_back(newType);
3386  return QualType(newType, 0);
3387 }
3388 
3389 /// getVectorType - Return the unique reference to a vector type of
3390 /// the specified element type and size. VectorType must be a built-in type.
3391 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
3392  VectorType::VectorKind VecKind) const {
3393  assert(vecType->isBuiltinType());
3394 
3395  // Check if we've already instantiated a vector of this type.
3396  llvm::FoldingSetNodeID ID;
3397  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
3398 
3399  void *InsertPos = nullptr;
3400  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3401  return QualType(VTP, 0);
3402 
3403  // If the element type isn't canonical, this won't be a canonical type either,
3404  // so fill in the canonical type field.
3405  QualType Canonical;
3406  if (!vecType.isCanonical()) {
3407  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
3408 
3409  // Get the new insert position for the node we care about.
3410  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3411  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3412  }
3413  auto *New = new (*this, TypeAlignment)
3414  VectorType(vecType, NumElts, Canonical, VecKind);
3415  VectorTypes.InsertNode(New, InsertPos);
3416  Types.push_back(New);
3417  return QualType(New, 0);
3418 }
3419 
3420 QualType
3422  SourceLocation AttrLoc,
3423  VectorType::VectorKind VecKind) const {
3424  llvm::FoldingSetNodeID ID;
3425  DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
3426  VecKind);
3427  void *InsertPos = nullptr;
3428  DependentVectorType *Canon =
3429  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3430  DependentVectorType *New;
3431 
3432  if (Canon) {
3433  New = new (*this, TypeAlignment) DependentVectorType(
3434  *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
3435  } else {
3436  QualType CanonVecTy = getCanonicalType(VecType);
3437  if (CanonVecTy == VecType) {
3438  New = new (*this, TypeAlignment) DependentVectorType(
3439  *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
3440 
3441  DependentVectorType *CanonCheck =
3442  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3443  assert(!CanonCheck &&
3444  "Dependent-sized vector_size canonical type broken");
3445  (void)CanonCheck;
3446  DependentVectorTypes.InsertNode(New, InsertPos);
3447  } else {
3448  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3449  SourceLocation());
3450  New = new (*this, TypeAlignment) DependentVectorType(
3451  *this, VecType, Canon, SizeExpr, AttrLoc, VecKind);
3452  }
3453  }
3454 
3455  Types.push_back(New);
3456  return QualType(New, 0);
3457 }
3458 
3459 /// getExtVectorType - Return the unique reference to an extended vector type of
3460 /// the specified element type and size. VectorType must be a built-in type.
3461 QualType
3462 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
3463  assert(vecType->isBuiltinType() || vecType->isDependentType());
3464 
3465  // Check if we've already instantiated a vector of this type.
3466  llvm::FoldingSetNodeID ID;
3467  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
3469  void *InsertPos = nullptr;
3470  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3471  return QualType(VTP, 0);
3472 
3473  // If the element type isn't canonical, this won't be a canonical type either,
3474  // so fill in the canonical type field.
3475  QualType Canonical;
3476  if (!vecType.isCanonical()) {
3477  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
3478 
3479  // Get the new insert position for the node we care about.
3480  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3481  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3482  }
3483  auto *New = new (*this, TypeAlignment)
3484  ExtVectorType(vecType, NumElts, Canonical);
3485  VectorTypes.InsertNode(New, InsertPos);
3486  Types.push_back(New);
3487  return QualType(New, 0);
3488 }
3489 
3490 QualType
3492  Expr *SizeExpr,
3493  SourceLocation AttrLoc) const {
3494  llvm::FoldingSetNodeID ID;
3496  SizeExpr);
3497 
3498  void *InsertPos = nullptr;
3500  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3502  if (Canon) {
3503  // We already have a canonical version of this array type; use it as
3504  // the canonical type for a newly-built type.
3505  New = new (*this, TypeAlignment)
3506  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
3507  SizeExpr, AttrLoc);
3508  } else {
3509  QualType CanonVecTy = getCanonicalType(vecType);
3510  if (CanonVecTy == vecType) {
3511  New = new (*this, TypeAlignment)
3512  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
3513  AttrLoc);
3514 
3515  DependentSizedExtVectorType *CanonCheck
3516  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3517  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
3518  (void)CanonCheck;
3519  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
3520  } else {
3521  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3522  SourceLocation());
3523  New = new (*this, TypeAlignment)
3524  DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
3525  }
3526  }
3527 
3528  Types.push_back(New);
3529  return QualType(New, 0);
3530 }
3531 
3533  Expr *AddrSpaceExpr,
3534  SourceLocation AttrLoc) const {
3535  assert(AddrSpaceExpr->isInstantiationDependent());
3536 
3537  QualType canonPointeeType = getCanonicalType(PointeeType);
3538 
3539  void *insertPos = nullptr;
3540  llvm::FoldingSetNodeID ID;
3541  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
3542  AddrSpaceExpr);
3543 
3544  DependentAddressSpaceType *canonTy =
3545  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
3546 
3547  if (!canonTy) {
3548  canonTy = new (*this, TypeAlignment)
3549  DependentAddressSpaceType(*this, canonPointeeType,
3550  QualType(), AddrSpaceExpr, AttrLoc);
3551  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
3552  Types.push_back(canonTy);
3553  }
3554 
3555  if (canonPointeeType == PointeeType &&
3556  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
3557  return QualType(canonTy, 0);
3558 
3559  auto *sugaredType
3560  = new (*this, TypeAlignment)
3561  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
3562  AddrSpaceExpr, AttrLoc);
3563  Types.push_back(sugaredType);
3564  return QualType(sugaredType, 0);
3565 }
3566 
3567 /// Determine whether \p T is canonical as the result type of a function.
3569  return T.isCanonical() &&
3572 }
3573 
3574 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
3575 QualType
3577  const FunctionType::ExtInfo &Info) const {
3578  // Unique functions, to guarantee there is only one function of a particular
3579  // structure.
3580  llvm::FoldingSetNodeID ID;
3581  FunctionNoProtoType::Profile(ID, ResultTy, Info);
3582 
3583  void *InsertPos = nullptr;
3584  if (FunctionNoProtoType *FT =
3585  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
3586  return QualType(FT, 0);
3587 
3588  QualType Canonical;
3589  if (!isCanonicalResultType(ResultTy)) {
3590  Canonical =
3592 
3593  // Get the new insert position for the node we care about.
3594  FunctionNoProtoType *NewIP =
3595  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3596  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3597  }
3598 
3599  auto *New = new (*this, TypeAlignment)
3600  FunctionNoProtoType(ResultTy, Canonical, Info);
3601  Types.push_back(New);
3602  FunctionNoProtoTypes.InsertNode(New, InsertPos);
3603  return QualType(New, 0);
3604 }
3605 
3608  CanQualType CanResultType = getCanonicalType(ResultType);
3609 
3610  // Canonical result types do not have ARC lifetime qualifiers.
3611  if (CanResultType.getQualifiers().hasObjCLifetime()) {
3612  Qualifiers Qs = CanResultType.getQualifiers();
3613  Qs.removeObjCLifetime();
3615  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
3616  }
3617 
3618  return CanResultType;
3619 }
3620 
3622  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
3623  if (ESI.Type == EST_None)
3624  return true;
3625  if (!NoexceptInType)
3626  return false;
3627 
3628  // C++17 onwards: exception specification is part of the type, as a simple
3629  // boolean "can this function type throw".
3630  if (ESI.Type == EST_BasicNoexcept)
3631  return true;
3632 
3633  // A noexcept(expr) specification is (possibly) canonical if expr is
3634  // value-dependent.
3635  if (ESI.Type == EST_DependentNoexcept)
3636  return true;
3637 
3638  // A dynamic exception specification is canonical if it only contains pack
3639  // expansions (so we can't tell whether it's non-throwing) and all its
3640  // contained types are canonical.
3641  if (ESI.Type == EST_Dynamic) {
3642  bool AnyPackExpansions = false;
3643  for (QualType ET : ESI.Exceptions) {
3644  if (!ET.isCanonical())
3645  return false;
3646  if (ET->getAs<PackExpansionType>())
3647  AnyPackExpansions = true;
3648  }
3649  return AnyPackExpansions;
3650  }
3651 
3652  return false;
3653 }
3654 
3655 QualType ASTContext::getFunctionTypeInternal(
3656  QualType ResultTy, ArrayRef<QualType> ArgArray,
3657  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
3658  size_t NumArgs = ArgArray.size();
3659 
3660  // Unique functions, to guarantee there is only one function of a particular
3661  // structure.
3662  llvm::FoldingSetNodeID ID;
3663  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
3664  *this, true);
3665 
3666  QualType Canonical;
3667  bool Unique = false;
3668 
3669  void *InsertPos = nullptr;
3670  if (FunctionProtoType *FPT =
3671  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
3672  QualType Existing = QualType(FPT, 0);
3673 
3674  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
3675  // it so long as our exception specification doesn't contain a dependent
3676  // noexcept expression, or we're just looking for a canonical type.
3677  // Otherwise, we're going to need to create a type
3678  // sugar node to hold the concrete expression.
3679  if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
3680  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
3681  return Existing;
3682 
3683  // We need a new type sugar node for this one, to hold the new noexcept
3684  // expression. We do no canonicalization here, but that's OK since we don't
3685  // expect to see the same noexcept expression much more than once.
3686  Canonical = getCanonicalType(Existing);
3687  Unique = true;
3688  }
3689 
3690  bool NoexceptInType = getLangOpts().CPlusPlus17;
3691  bool IsCanonicalExceptionSpec =
3692  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
3693 
3694  // Determine whether the type being created is already canonical or not.
3695  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
3696  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
3697  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
3698  if (!ArgArray[i].isCanonicalAsParam())
3699  isCanonical = false;
3700 
3701  if (OnlyWantCanonical)
3702  assert(isCanonical &&
3703  "given non-canonical parameters constructing canonical type");
3704 
3705  // If this type isn't canonical, get the canonical version of it if we don't
3706  // already have it. The exception spec is only partially part of the
3707  // canonical type, and only in C++17 onwards.
3708  if (!isCanonical && Canonical.isNull()) {
3709  SmallVector<QualType, 16> CanonicalArgs;
3710  CanonicalArgs.reserve(NumArgs);
3711  for (unsigned i = 0; i != NumArgs; ++i)
3712  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
3713 
3714  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
3715  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
3716  CanonicalEPI.HasTrailingReturn = false;
3717 
3718  if (IsCanonicalExceptionSpec) {
3719  // Exception spec is already OK.
3720  } else if (NoexceptInType) {
3721  switch (EPI.ExceptionSpec.Type) {
3723  // We don't know yet. It shouldn't matter what we pick here; no-one
3724  // should ever look at this.
3725  LLVM_FALLTHROUGH;
3726  case EST_None: case EST_MSAny: case EST_NoexceptFalse:
3727  CanonicalEPI.ExceptionSpec.Type = EST_None;
3728  break;
3729 
3730  // A dynamic exception specification is almost always "not noexcept",
3731  // with the exception that a pack expansion might expand to no types.
3732  case EST_Dynamic: {
3733  bool AnyPacks = false;
3734  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
3735  if (ET->getAs<PackExpansionType>())
3736  AnyPacks = true;
3737  ExceptionTypeStorage.push_back(getCanonicalType(ET));
3738  }
3739  if (!AnyPacks)
3740  CanonicalEPI.ExceptionSpec.Type = EST_None;
3741  else {
3742  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
3743  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
3744  }
3745  break;
3746  }
3747 
3749  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
3750  break;
3751 
3752  case EST_DependentNoexcept:
3753  llvm_unreachable("dependent noexcept is already canonical");
3754  }
3755  } else {
3757  }
3758 
3759  // Adjust the canonical function result type.
3760  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
3761  Canonical =
3762  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
3763 
3764  // Get the new insert position for the node we care about.
3765  FunctionProtoType *NewIP =
3766  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3767  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3768  }
3769 
3770  // Compute the needed size to hold this FunctionProtoType and the
3771  // various trailing objects.
3772  auto ESH = FunctionProtoType::getExceptionSpecSize(
3773  EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
3774  size_t Size = FunctionProtoType::totalSizeToAlloc<
3777  FunctionProtoType::ExtParameterInfo, Qualifiers>(
3778  NumArgs, FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
3779  ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
3780  EPI.ExtParameterInfos ? NumArgs : 0,
3781  EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
3782 
3783  auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
3784  FunctionProtoType::ExtProtoInfo newEPI = EPI;
3785  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
3786  Types.push_back(FTP);
3787  if (!Unique)
3788  FunctionProtoTypes.InsertNode(FTP, InsertPos);
3789  return QualType(FTP, 0);
3790 }
3791 
3792 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
3793  llvm::FoldingSetNodeID ID;
3794  PipeType::Profile(ID, T, ReadOnly);
3795 
3796  void *InsertPos = nullptr;
3797  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
3798  return QualType(PT, 0);
3799 
3800  // If the pipe element type isn't canonical, this won't be a canonical type
3801  // either, so fill in the canonical type field.
3802  QualType Canonical;
3803  if (!T.isCanonical()) {
3804  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
3805 
3806  // Get the new insert position for the node we care about.
3807  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
3808  assert(!NewIP && "Shouldn't be in the map!");
3809  (void)NewIP;
3810  }
3811  auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
3812  Types.push_back(New);
3813  PipeTypes.InsertNode(New, InsertPos);
3814  return QualType(New, 0);
3815 }
3816 
3818  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
3819  return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
3820  : Ty;
3821 }
3822 
3824  return getPipeType(T, true);
3825 }
3826 
3828  return getPipeType(T, false);
3829 }
3830 
3831 #ifndef NDEBUG
3833  if (!isa<CXXRecordDecl>(D)) return false;
3834  const auto *RD = cast<CXXRecordDecl>(D);
3835  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
3836  return true;
3837  if (RD->getDescribedClassTemplate() &&
3838  !isa<ClassTemplateSpecializationDecl>(RD))
3839  return true;
3840  return false;
3841 }
3842 #endif
3843 
3844 /// getInjectedClassNameType - Return the unique reference to the
3845 /// injected class name type for the specified templated declaration.
3847  QualType TST) const {
3848  assert(NeedsInjectedClassNameType(Decl));
3849  if (Decl->TypeForDecl) {
3850  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3851  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
3852  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
3853  Decl->TypeForDecl = PrevDecl->TypeForDecl;
3854  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3855  } else {
3856  Type *newType =
3857  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
3858  Decl->TypeForDecl = newType;
3859  Types.push_back(newType);
3860  }
3861  return QualType(Decl->TypeForDecl, 0);
3862 }
3863 
3864 /// getTypeDeclType - Return the unique reference to the type for the
3865 /// specified type declaration.
3866 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
3867  assert(Decl && "Passed null for Decl param");
3868  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
3869 
3870  if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
3871  return getTypedefType(Typedef);
3872 
3873  assert(!isa<TemplateTypeParmDecl>(Decl) &&
3874  "Template type parameter types are always available.");
3875 
3876  if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
3877  assert(Record->isFirstDecl() && "struct/union has previous declaration");
3878  assert(!NeedsInjectedClassNameType(Record));
3879  return getRecordType(Record);
3880  } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
3881  assert(Enum->isFirstDecl() && "enum has previous declaration");
3882  return getEnumType(Enum);
3883  } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
3884  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
3885  Decl->TypeForDecl = newType;
3886  Types.push_back(newType);
3887  } else
3888  llvm_unreachable("TypeDecl without a type?");
3889 
3890  return QualType(Decl->TypeForDecl, 0);
3891 }
3892 
3893 /// getTypedefType - Return the unique reference to the type for the
3894 /// specified typedef name decl.
3895 QualType
3897  QualType Canonical) const {
3898  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3899 
3900  if (Canonical.isNull())
3901  Canonical = getCanonicalType(Decl->getUnderlyingType());
3902  auto *newType = new (*this, TypeAlignment)
3903  TypedefType(Type::Typedef, Decl, Canonical);
3904  Decl->TypeForDecl = newType;
3905  Types.push_back(newType);
3906  return QualType(newType, 0);
3907 }
3908 
3910  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3911 
3912  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
3913  if (PrevDecl->TypeForDecl)
3914  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3915 
3916  auto *newType = new (*this, TypeAlignment) RecordType(Decl);
3917  Decl->TypeForDecl = newType;
3918  Types.push_back(newType);
3919  return QualType(newType, 0);
3920 }
3921 
3923  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3924 
3925  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
3926  if (PrevDecl->TypeForDecl)
3927  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3928 
3929  auto *newType = new (*this, TypeAlignment) EnumType(Decl);
3930  Decl->TypeForDecl = newType;
3931  Types.push_back(newType);
3932  return QualType(newType, 0);
3933 }
3934 
3936  QualType modifiedType,
3937  QualType equivalentType) {
3938  llvm::FoldingSetNodeID id;
3939  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
3940 
3941  void *insertPos = nullptr;
3942  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
3943  if (type) return QualType(type, 0);
3944 
3945  QualType canon = getCanonicalType(equivalentType);
3946  type = new (*this, TypeAlignment)
3947  AttributedType(canon, attrKind, modifiedType, equivalentType);
3948 
3949  Types.push_back(type);
3950  AttributedTypes.InsertNode(type, insertPos);
3951 
3952  return QualType(type, 0);
3953 }
3954 
3955 /// Retrieve a substitution-result type.
3956 QualType
3958  QualType Replacement) const {
3959  assert(Replacement.isCanonical()
3960  && "replacement types must always be canonical");
3961 
3962  llvm::FoldingSetNodeID ID;
3963  SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
3964  void *InsertPos = nullptr;
3965  SubstTemplateTypeParmType *SubstParm
3966  = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3967 
3968  if (!SubstParm) {
3969  SubstParm = new (*this, TypeAlignment)
3970  SubstTemplateTypeParmType(Parm, Replacement);
3971  Types.push_back(SubstParm);
3972  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
3973  }
3974 
3975  return QualType(SubstParm, 0);
3976 }
3977 
3978 /// Retrieve a
3980  const TemplateTypeParmType *Parm,
3981  const TemplateArgument &ArgPack) {
3982 #ifndef NDEBUG
3983  for (const auto &P : ArgPack.pack_elements()) {
3984  assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
3985  assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
3986  }
3987 #endif
3988 
3989  llvm::FoldingSetNodeID ID;
3990  SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
3991  void *InsertPos = nullptr;
3992  if (SubstTemplateTypeParmPackType *SubstParm
3993  = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
3994  return QualType(SubstParm, 0);
3995 
3996  QualType Canon;
3997  if (!Parm->isCanonicalUnqualified()) {
3998  Canon = getCanonicalType(QualType(Parm, 0));
3999  Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
4000  ArgPack);
4001  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
4002  }
4003 
4004  auto *SubstParm
4005  = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
4006  ArgPack);
4007  Types.push_back(SubstParm);
4008  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
4009  return QualType(SubstParm, 0);
4010 }
4011 
4012 /// Retrieve the template type parameter type for a template
4013 /// parameter or parameter pack with the given depth, index, and (optionally)
4014 /// name.
4016  bool ParameterPack,
4017  TemplateTypeParmDecl *TTPDecl) const {
4018  llvm::FoldingSetNodeID ID;
4019  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4020  void *InsertPos = nullptr;
4021  TemplateTypeParmType *TypeParm
4022  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4023 
4024  if (TypeParm)
4025  return QualType(TypeParm, 0);
4026 
4027  if (TTPDecl) {
4028  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4029  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
4030 
4031  TemplateTypeParmType *TypeCheck
4032  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4033  assert(!TypeCheck && "Template type parameter canonical type broken");
4034  (void)TypeCheck;
4035  } else
4036  TypeParm = new (*this, TypeAlignment)
4037  TemplateTypeParmType(Depth, Index, ParameterPack);
4038 
4039  Types.push_back(TypeParm);
4040  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
4041 
4042  return QualType(TypeParm, 0);
4043 }
4044 
4047  SourceLocation NameLoc,
4048  const TemplateArgumentListInfo &Args,
4049  QualType Underlying) const {
4050  assert(!Name.getAsDependentTemplateName() &&
4051  "No dependent template names here!");
4052  QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
4053 
4058  TL.setTemplateNameLoc(NameLoc);
4059  TL.setLAngleLoc(Args.getLAngleLoc());
4060  TL.setRAngleLoc(Args.getRAngleLoc());
4061  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4062  TL.setArgLocInfo(i, Args[i].getLocInfo());
4063  return DI;
4064 }
4065 
4066 QualType
4068  const TemplateArgumentListInfo &Args,
4069  QualType Underlying) const {
4070  assert(!Template.getAsDependentTemplateName() &&
4071  "No dependent template names here!");
4072 
4074  ArgVec.reserve(Args.size());
4075  for (const TemplateArgumentLoc &Arg : Args.arguments())
4076  ArgVec.push_back(Arg.getArgument());
4077 
4078  return getTemplateSpecializationType(Template, ArgVec, Underlying);
4079 }
4080 
4081 #ifndef NDEBUG
4083  for (const TemplateArgument &Arg : Args)
4084  if (Arg.isPackExpansion())
4085  return true;
4086 
4087  return true;
4088 }
4089 #endif
4090 
4091 QualType
4094  QualType Underlying) const {
4095  assert(!Template.getAsDependentTemplateName() &&
4096  "No dependent template names here!");
4097  // Look through qualified template names.
4098  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4099  Template = TemplateName(QTN->getTemplateDecl());
4100 
4101  bool IsTypeAlias =
4102  Template.getAsTemplateDecl() &&
4103  isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
4104  QualType CanonType;
4105  if (!Underlying.isNull())
4106  CanonType = getCanonicalType(Underlying);
4107  else {
4108  // We can get here with an alias template when the specialization contains
4109  // a pack expansion that does not match up with a parameter pack.
4110  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
4111  "Caller must compute aliased type");
4112  IsTypeAlias = false;
4113  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
4114  }
4115 
4116  // Allocate the (non-canonical) template specialization type, but don't
4117  // try to unique it: these types typically have location information that
4118  // we don't unique and don't want to lose.
4119  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
4120  sizeof(TemplateArgument) * Args.size() +
4121  (IsTypeAlias? sizeof(QualType) : 0),
4122  TypeAlignment);
4123  auto *Spec
4124  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4125  IsTypeAlias ? Underlying : QualType());
4126 
4127  Types.push_back(Spec);
4128  return QualType(Spec, 0);
4129 }
4130 
4132  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4133  assert(!Template.getAsDependentTemplateName() &&
4134  "No dependent template names here!");
4135 
4136  // Look through qualified template names.
4137  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4138  Template = TemplateName(QTN->getTemplateDecl());
4139 
4140  // Build the canonical template specialization type.
4141  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4143  unsigned NumArgs = Args.size();
4144  CanonArgs.reserve(NumArgs);
4145  for (const TemplateArgument &Arg : Args)
4146  CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
4147 
4148  // Determine whether this canonical template specialization type already
4149  // exists.
4150  llvm::FoldingSetNodeID ID;
4151  TemplateSpecializationType::Profile(ID, CanonTemplate,
4152  CanonArgs, *this);
4153 
4154  void *InsertPos = nullptr;
4156  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4157 
4158  if (!Spec) {
4159  // Allocate a new canonical template specialization type.
4160  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
4161  sizeof(TemplateArgument) * NumArgs),
4162  TypeAlignment);
4163  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
4164  CanonArgs,
4165  QualType(), QualType());
4166  Types.push_back(Spec);
4167  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
4168  }
4169 
4170  assert(Spec->isDependentType() &&
4171  "Non-dependent template-id type must have a canonical type");
4172  return QualType(Spec, 0);
4173 }
4174 
4176  NestedNameSpecifier *NNS,
4177  QualType NamedType,
4178  TagDecl *OwnedTagDecl) const {
4179  llvm::FoldingSetNodeID ID;
4180  ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
4181 
4182  void *InsertPos = nullptr;
4183  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4184  if (T)
4185  return QualType(T, 0);
4186 
4187  QualType Canon = NamedType;
4188  if (!Canon.isCanonical()) {
4189  Canon = getCanonicalType(NamedType);
4190  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4191  assert(!CheckT && "Elaborated canonical type broken");
4192  (void)CheckT;
4193  }
4194 
4195  void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
4196  TypeAlignment);
4197  T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
4198 
4199  Types.push_back(T);
4200  ElaboratedTypes.InsertNode(T, InsertPos);
4201  return QualType(T, 0);
4202 }
4203 
4204 QualType
4206  llvm::FoldingSetNodeID ID;
4207  ParenType::Profile(ID, InnerType);
4208 
4209  void *InsertPos = nullptr;
4210  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4211  if (T)
4212  return QualType(T, 0);
4213 
4214  QualType Canon = InnerType;
4215  if (!Canon.isCanonical()) {
4216  Canon = getCanonicalType(InnerType);
4217  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4218  assert(!CheckT && "Paren canonical type broken");
4219  (void)CheckT;
4220  }
4221 
4222  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
4223  Types.push_back(T);
4224  ParenTypes.InsertNode(T, InsertPos);
4225  return QualType(T, 0);
4226 }
4227 
4229  NestedNameSpecifier *NNS,
4230  const IdentifierInfo *Name,
4231  QualType Canon) const {
4232  if (Canon.isNull()) {
4234  if (CanonNNS != NNS)
4235  Canon = getDependentNameType(Keyword, CanonNNS, Name);
4236  }
4237 
4238  llvm::FoldingSetNodeID ID;
4239  DependentNameType::Profile(ID, Keyword, NNS, Name);
4240 
4241  void *InsertPos = nullptr;
4243  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
4244  if (T)
4245  return QualType(T, 0);
4246 
4247  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
4248  Types.push_back(T);
4249  DependentNameTypes.InsertNode(T, InsertPos);
4250  return QualType(T, 0);
4251 }
4252 
4253 QualType
4255  ElaboratedTypeKeyword Keyword,
4256  NestedNameSpecifier *NNS,
4257  const IdentifierInfo *Name,
4258  const TemplateArgumentListInfo &Args) const {
4259  // TODO: avoid this copy
4261  for (unsigned I = 0, E = Args.size(); I != E; ++I)
4262  ArgCopy.push_back(Args[I].getArgument());
4263  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
4264 }
4265 
4266 QualType
4268  ElaboratedTypeKeyword Keyword,
4269  NestedNameSpecifier *NNS,
4270  const IdentifierInfo *Name,
4271  ArrayRef<TemplateArgument> Args) const {
4272  assert((!NNS || NNS->isDependent()) &&
4273  "nested-name-specifier must be dependent");
4274 
4275  llvm::FoldingSetNodeID ID;
4276  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
4277  Name, Args);
4278 
4279  void *InsertPos = nullptr;
4281  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4282  if (T)
4283  return QualType(T, 0);
4284 
4286 
4287  ElaboratedTypeKeyword CanonKeyword = Keyword;
4288  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
4289 
4290  bool AnyNonCanonArgs = false;
4291  unsigned NumArgs = Args.size();
4292  SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
4293  for (unsigned I = 0; I != NumArgs; ++I) {
4294  CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
4295  if (!CanonArgs[I].structurallyEquals(Args[I]))
4296  AnyNonCanonArgs = true;
4297  }
4298 
4299  QualType Canon;
4300  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
4301  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
4302  Name,
4303  CanonArgs);
4304 
4305  // Find the insert position again.
4306  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4307  }
4308 
4309  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
4310  sizeof(TemplateArgument) * NumArgs),
4311  TypeAlignment);
4312  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
4313  Name, Args, Canon);
4314  Types.push_back(T);
4315  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
4316  return QualType(T, 0);
4317 }
4318 
4320  TemplateArgument Arg;
4321  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
4322  QualType ArgType = getTypeDeclType(TTP);
4323  if (TTP->isParameterPack())
4324  ArgType = getPackExpansionType(ArgType, None);
4325 
4326  Arg = TemplateArgument(ArgType);
4327  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
4328  Expr *E = new (*this) DeclRefExpr(
4329  *this, NTTP, /*enclosing*/ false,
4330  NTTP->getType().getNonLValueExprType(*this),
4331  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
4332 
4333  if (NTTP->isParameterPack())
4334  E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
4335  None);
4336  Arg = TemplateArgument(E);
4337  } else {
4338  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
4339  if (TTP->isParameterPack())
4341  else
4342  Arg = TemplateArgument(TemplateName(TTP));
4343  }
4344 
4345  if (Param->isTemplateParameterPack())
4346  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
4347 
4348  return Arg;
4349 }
4350 
4351 void
4354  Args.reserve(Args.size() + Params->size());
4355 
4356  for (NamedDecl *Param : *Params)
4357  Args.push_back(getInjectedTemplateArg(Param));
4358 }
4359 
4361  Optional<unsigned> NumExpansions) {
4362  llvm::FoldingSetNodeID ID;
4363  PackExpansionType::Profile(ID, Pattern, NumExpansions);
4364 
4365  assert(Pattern->containsUnexpandedParameterPack() &&
4366  "Pack expansions must expand one or more parameter packs");
4367  void *InsertPos = nullptr;
4369  = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4370  if (T)
4371  return QualType(T, 0);
4372 
4373  QualType Canon;
4374  if (!Pattern.isCanonical()) {
4375  Canon = getCanonicalType(Pattern);
4376  // The canonical type might not contain an unexpanded parameter pack, if it
4377  // contains an alias template specialization which ignores one of its
4378  // parameters.
4379  if (Canon->containsUnexpandedParameterPack()) {
4380  Canon = getPackExpansionType(Canon, NumExpansions);
4381 
4382  // Find the insert position again, in case we inserted an element into
4383  // PackExpansionTypes and invalidated our insert position.
4384  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4385  }
4386  }
4387 
4388  T = new (*this, TypeAlignment)
4389  PackExpansionType(Pattern, Canon, NumExpansions);
4390  Types.push_back(T);
4391  PackExpansionTypes.InsertNode(T, InsertPos);
4392  return QualType(T, 0);
4393 }
4394 
4395 /// CmpProtocolNames - Comparison predicate for sorting protocols
4396 /// alphabetically.
4397 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
4398  ObjCProtocolDecl *const *RHS) {
4399  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
4400 }
4401 
4403  if (Protocols.empty()) return true;
4404 
4405  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
4406  return false;
4407 
4408  for (unsigned i = 1; i != Protocols.size(); ++i)
4409  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
4410  Protocols[i]->getCanonicalDecl() != Protocols[i])
4411  return false;
4412  return true;
4413 }
4414 
4415 static void
4417  // Sort protocols, keyed by name.
4418  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
4419 
4420  // Canonicalize.
4421  for (ObjCProtocolDecl *&P : Protocols)
4422  P = P->getCanonicalDecl();
4423 
4424  // Remove duplicates.
4425  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
4426  Protocols.erase(ProtocolsEnd, Protocols.end());
4427 }
4428 
4430  ObjCProtocolDecl * const *Protocols,
4431  unsigned NumProtocols) const {
4432  return getObjCObjectType(BaseType, {},
4433  llvm::makeArrayRef(Protocols, NumProtocols),
4434  /*isKindOf=*/false);
4435 }
4436 
4438  QualType baseType,
4439  ArrayRef<QualType> typeArgs,
4440  ArrayRef<ObjCProtocolDecl *> protocols,
4441  bool isKindOf) const {
4442  // If the base type is an interface and there aren't any protocols or
4443  // type arguments to add, then the interface type will do just fine.
4444  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
4445  isa<ObjCInterfaceType>(baseType))
4446  return baseType;
4447 
4448  // Look in the folding set for an existing type.
4449  llvm::FoldingSetNodeID ID;
4450  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
4451  void *InsertPos = nullptr;
4452  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
4453  return QualType(QT, 0);
4454 
4455  // Determine the type arguments to be used for canonicalization,
4456  // which may be explicitly specified here or written on the base
4457  // type.
4458  ArrayRef<QualType> effectiveTypeArgs = typeArgs;
4459  if (effectiveTypeArgs.empty()) {
4460  if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
4461  effectiveTypeArgs = baseObject->getTypeArgs();
4462  }
4463 
4464  // Build the canonical type, which has the canonical base type and a
4465  // sorted-and-uniqued list of protocols and the type arguments
4466  // canonicalized.
4467  QualType canonical;
4468  bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
4469  effectiveTypeArgs.end(),
4470  [&](QualType type) {
4471  return type.isCanonical();
4472  });
4473  bool protocolsSorted = areSortedAndUniqued(protocols);
4474  if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
4475  // Determine the canonical type arguments.
4476  ArrayRef<QualType> canonTypeArgs;
4477  SmallVector<QualType, 4> canonTypeArgsVec;
4478  if (!typeArgsAreCanonical) {
4479  canonTypeArgsVec.reserve(effectiveTypeArgs.size());
4480  for (auto typeArg : effectiveTypeArgs)
4481  canonTypeArgsVec.push_back(getCanonicalType(typeArg));
4482  canonTypeArgs = canonTypeArgsVec;
4483  } else {
4484  canonTypeArgs = effectiveTypeArgs;
4485  }
4486 
4487  ArrayRef<ObjCProtocolDecl *> canonProtocols;
4488  SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
4489  if (!protocolsSorted) {
4490  canonProtocolsVec.append(protocols.begin(), protocols.end());
4491  SortAndUniqueProtocols(canonProtocolsVec);
4492  canonProtocols = canonProtocolsVec;
4493  } else {
4494  canonProtocols = protocols;
4495  }
4496 
4497  canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
4498  canonProtocols, isKindOf);
4499 
4500  // Regenerate InsertPos.
4501  ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
4502  }
4503 
4504  unsigned size = sizeof(ObjCObjectTypeImpl);
4505  size += typeArgs.size() * sizeof(QualType);
4506  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4507  void *mem = Allocate(size, TypeAlignment);
4508  auto *T =
4509  new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
4510  isKindOf);
4511 
4512  Types.push_back(T);
4513  ObjCObjectTypes.InsertNode(T, InsertPos);
4514  return QualType(T, 0);
4515 }
4516 
4517 /// Apply Objective-C protocol qualifiers to the given type.
4518 /// If this is for the canonical type of a type parameter, we can apply
4519 /// protocol qualifiers on the ObjCObjectPointerType.
4520 QualType
4522  ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
4523  bool allowOnPointerType) const {
4524  hasError = false;
4525 
4526  if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
4527  return getObjCTypeParamType(objT->getDecl(), protocols);
4528  }
4529 
4530  // Apply protocol qualifiers to ObjCObjectPointerType.
4531  if (allowOnPointerType) {
4532  if (const auto *objPtr =
4533  dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
4534  const ObjCObjectType *objT = objPtr->getObjectType();
4535  // Merge protocol lists and construct ObjCObjectType.
4536  SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
4537  protocolsVec.append(objT->qual_begin(),
4538  objT->qual_end());
4539  protocolsVec.append(protocols.begin(), protocols.end());
4540  ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
4541  type = getObjCObjectType(
4542  objT->getBaseType(),
4543  objT->getTypeArgsAsWritten(),
4544  protocols,
4545  objT->isKindOfTypeAsWritten());
4546  return getObjCObjectPointerType(type);
4547  }
4548  }
4549 
4550  // Apply protocol qualifiers to ObjCObjectType.
4551  if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
4552  // FIXME: Check for protocols to which the class type is already
4553  // known to conform.
4554 
4555  return getObjCObjectType(objT->getBaseType(),
4556  objT->getTypeArgsAsWritten(),
4557  protocols,
4558  objT->isKindOfTypeAsWritten());
4559  }
4560 
4561  // If the canonical type is ObjCObjectType, ...
4562  if (type->isObjCObjectType()) {
4563  // Silently overwrite any existing protocol qualifiers.
4564  // TODO: determine whether that's the right thing to do.
4565 
4566  // FIXME: Check for protocols to which the class type is already
4567  // known to conform.
4568  return getObjCObjectType(type, {}, protocols, false);
4569  }
4570 
4571  // id<protocol-list>
4572  if (type->isObjCIdType()) {
4573  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4574  type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
4575  objPtr->isKindOfType());
4576  return getObjCObjectPointerType(type);
4577  }
4578 
4579  // Class<protocol-list>
4580  if (type->isObjCClassType()) {
4581  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4582  type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
4583  objPtr->isKindOfType());
4584  return getObjCObjectPointerType(type);
4585  }
4586 
4587  hasError = true;
4588  return type;
4589 }
4590 
4591 QualType
4593  ArrayRef<ObjCProtocolDecl *> protocols,
4594  QualType Canonical) const {
4595  // Look in the folding set for an existing type.
4596  llvm::FoldingSetNodeID ID;
4597  ObjCTypeParamType::Profile(ID, Decl, protocols);
4598  void *InsertPos = nullptr;
4599  if (ObjCTypeParamType *TypeParam =
4600  ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
4601  return QualType(TypeParam, 0);
4602 
4603  if (Canonical.isNull()) {
4604  // We canonicalize to the underlying type.
4605  Canonical = getCanonicalType(Decl->getUnderlyingType());
4606  if (!protocols.empty()) {
4607  // Apply the protocol qualifers.
4608  bool hasError;
4610  Canonical, protocols, hasError, true /*allowOnPointerType*/));
4611  assert(!hasError && "Error when apply protocol qualifier to bound type");
4612  }
4613  }
4614 
4615  unsigned size = sizeof(ObjCTypeParamType);
4616  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4617  void *mem = Allocate(size, TypeAlignment);
4618  auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
4619 
4620  Types.push_back(newType);
4621  ObjCTypeParamTypes.InsertNode(newType, InsertPos);
4622  return QualType(newType, 0);
4623 }
4624 
4625 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
4626 /// protocol list adopt all protocols in QT's qualified-id protocol
4627 /// list.
4629  ObjCInterfaceDecl *IC) {
4630  if (!QT->isObjCQualifiedIdType())
4631  return false;
4632 
4633  if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
4634  // If both the right and left sides have qualifiers.
4635  for (auto *Proto : OPT->quals()) {
4636  if (!IC->ClassImplementsProtocol(Proto, false))
4637  return false;
4638  }
4639  return true;
4640  }
4641  return false;
4642 }
4643 
4644 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
4645 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
4646 /// of protocols.
4648  ObjCInterfaceDecl *IDecl) {
4649  if (!QT->isObjCQualifiedIdType())
4650  return false;
4651  const auto *OPT = QT->getAs<ObjCObjectPointerType>();
4652  if (!OPT)
4653  return false;
4654  if (!IDecl->hasDefinition())
4655  return false;
4656  llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
4657  CollectInheritedProtocols(IDecl, InheritedProtocols);
4658  if (InheritedProtocols.empty())
4659  return false;
4660  // Check that if every protocol in list of id<plist> conforms to a protocol
4661  // of IDecl's, then bridge casting is ok.
4662  bool Conforms = false;
4663  for (auto *Proto : OPT->quals()) {
4664  Conforms = false;
4665  for (auto *PI : InheritedProtocols) {
4666  if (ProtocolCompatibleWithProtocol(Proto, PI)) {
4667  Conforms = true;
4668  break;
4669  }
4670  }
4671  if (!Conforms)
4672  break;
4673  }
4674  if (Conforms)
4675  return true;
4676 
4677  for (auto *PI : InheritedProtocols) {
4678  // If both the right and left sides have qualifiers.
4679  bool Adopts = false;
4680  for (auto *Proto : OPT->quals()) {
4681  // return 'true' if 'PI' is in the inheritance hierarchy of Proto
4682  if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
4683  break;
4684  }
4685  if (!Adopts)
4686  return false;
4687  }
4688  return true;
4689 }
4690 
4691 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
4692 /// the given object type.
4694  llvm::FoldingSetNodeID ID;
4695  ObjCObjectPointerType::Profile(ID, ObjectT);
4696 
4697  void *InsertPos = nullptr;
4698  if (ObjCObjectPointerType *QT =
4699  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4700  return QualType(QT, 0);
4701 
4702  // Find the canonical object type.
4703  QualType Canonical;
4704  if (!ObjectT.isCanonical()) {
4705  Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
4706 
4707  // Regenerate InsertPos.
4708  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4709  }
4710 
4711  // No match.
4712  void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
4713  auto *QType =
4714  new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
4715 
4716  Types.push_back(QType);
4717  ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
4718  return QualType(QType, 0);
4719 }
4720 
4721 /// getObjCInterfaceType - Return the unique reference to the type for the
4722 /// specified ObjC interface decl. The list of protocols is optional.
4724  ObjCInterfaceDecl *PrevDecl) const {
4725  if (Decl->TypeForDecl)
4726  return QualType(Decl->TypeForDecl, 0);
4727 
4728  if (PrevDecl) {
4729  assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
4730  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4731  return QualType(PrevDecl->TypeForDecl, 0);
4732  }
4733 
4734  // Prefer the definition, if there is one.
4735  if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
4736  Decl = Def;
4737 
4738  void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
4739  auto *T = new (Mem) ObjCInterfaceType(Decl);
4740  Decl->TypeForDecl = T;
4741  Types.push_back(T);
4742  return QualType(T, 0);
4743 }
4744 
4745 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
4746 /// TypeOfExprType AST's (since expression's are never shared). For example,
4747 /// multiple declarations that refer to "typeof(x)" all contain different
4748 /// DeclRefExpr's. This doesn't effect the type checker, since it operates
4749 /// on canonical type's (which are always unique).
4751  TypeOfExprType *toe;
4752  if (tofExpr->isTypeDependent()) {
4753  llvm::FoldingSetNodeID ID;
4754  DependentTypeOfExprType::Profile(ID, *this, tofExpr);
4755 
4756  void *InsertPos = nullptr;
4758  = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
4759  if (Canon) {
4760  // We already have a "canonical" version of an identical, dependent
4761  // typeof(expr) type. Use that as our canonical type.
4762  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
4763  QualType((TypeOfExprType*)Canon, 0));
4764  } else {
4765  // Build a new, canonical typeof(expr) type.
4766  Canon
4767  = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
4768  DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
4769  toe = Canon;
4770  }
4771  } else {
4772  QualType Canonical = getCanonicalType(tofExpr->getType());
4773  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
4774  }
4775  Types.push_back(toe);
4776  return QualType(toe, 0);
4777 }
4778 
4779 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
4780 /// TypeOfType nodes. The only motivation to unique these nodes would be
4781 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
4782 /// an issue. This doesn't affect the type checker, since it operates
4783 /// on canonical types (which are always unique).
4785  QualType Canonical = getCanonicalType(tofType);
4786  auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
4787  Types.push_back(tot);
4788  return QualType(tot, 0);
4789 }
4790 
4791 /// Unlike many "get<Type>" functions, we don't unique DecltypeType
4792 /// nodes. This would never be helpful, since each such type has its own
4793 /// expression, and would not give a significant memory saving, since there
4794 /// is an Expr tree under each such type.
4796  DecltypeType *dt;
4797 
4798  // C++11 [temp.type]p2:
4799  // If an expression e involves a template parameter, decltype(e) denotes a
4800  // unique dependent type. Two such decltype-specifiers refer to the same
4801  // type only if their expressions are equivalent (14.5.6.1).
4802  if (e->isInstantiationDependent()) {
4803  llvm::FoldingSetNodeID ID;
4804  DependentDecltypeType::Profile(ID, *this, e);
4805 
4806  void *InsertPos = nullptr;
4807  DependentDecltypeType *Canon
4808  = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
4809  if (!Canon) {
4810  // Build a new, canonical decltype(expr) type.
4811  Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
4812  DependentDecltypeTypes.InsertNode(Canon, InsertPos);
4813  }
4814  dt = new (*this, TypeAlignment)
4815  DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
4816  } else {
4817  dt = new (*this, TypeAlignment)
4818  DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
4819  }
4820  Types.push_back(dt);
4821  return QualType(dt, 0);
4822 }
4823 
4824 /// getUnaryTransformationType - We don't unique these, since the memory
4825 /// savings are minimal and these are rare.
4827  QualType UnderlyingType,
4829  const {
4830  UnaryTransformType *ut = nullptr;
4831 
4832  if (BaseType->isDependentType()) {
4833  // Look in the folding set for an existing type.
4834  llvm::FoldingSetNodeID ID;
4836 
4837  void *InsertPos = nullptr;
4839  = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
4840 
4841  if (!Canon) {
4842  // Build a new, canonical __underlying_type(type) type.
4843  Canon = new (*this, TypeAlignment)
4845  Kind);
4846  DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
4847  }
4848  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4849  QualType(), Kind,
4850  QualType(Canon, 0));
4851  } else {
4852  QualType CanonType = getCanonicalType(UnderlyingType);
4853  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4854  UnderlyingType, Kind,
4855  CanonType);
4856  }
4857  Types.push_back(ut);
4858  return QualType(ut, 0);
4859 }
4860 
4861 /// getAutoType - Return the uniqued reference to the 'auto' type which has been
4862 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the
4863 /// canonical deduced-but-dependent 'auto' type.
4865  bool IsDependent) const {
4866  if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
4867  return getAutoDeductType();
4868 
4869  // Look in the folding set for an existing type.
4870  void *InsertPos = nullptr;
4871  llvm::FoldingSetNodeID ID;
4872  AutoType::Profile(ID, DeducedType, Keyword, IsDependent);
4873  if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
4874  return QualType(AT, 0);
4875 
4876  auto *AT = new (*this, TypeAlignment)
4877  AutoType(DeducedType, Keyword, IsDependent);
4878  Types.push_back(AT);
4879  if (InsertPos)
4880  AutoTypes.InsertNode(AT, InsertPos);
4881  return QualType(AT, 0);
4882 }
4883 
4884 /// Return the uniqued reference to the deduced template specialization type
4885 /// which has been deduced to the given type, or to the canonical undeduced
4886 /// such type, or the canonical deduced-but-dependent such type.
4888  TemplateName Template, QualType DeducedType, bool IsDependent) const {
4889  // Look in the folding set for an existing type.
4890  void *InsertPos = nullptr;
4891  llvm::FoldingSetNodeID ID;
4892  DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
4893  IsDependent);
4895  DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
4896  return QualType(DTST, 0);
4897 
4898  auto *DTST = new (*this, TypeAlignment)
4899  DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
4900  Types.push_back(DTST);
4901  if (InsertPos)
4902  DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
4903  return QualType(DTST, 0);
4904 }
4905 
4906 /// getAtomicType - Return the uniqued reference to the atomic type for
4907 /// the given value type.
4909  // Unique pointers, to guarantee there is only one pointer of a particular
4910  // structure.
4911  llvm::FoldingSetNodeID ID;
4912  AtomicType::Profile(ID, T);
4913 
4914  void *InsertPos = nullptr;
4915  if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
4916  return QualType(AT, 0);
4917 
4918  // If the atomic value type isn't canonical, this won't be a canonical type
4919  // either, so fill in the canonical type field.
4920  QualType Canonical;
4921  if (!T.isCanonical()) {
4922  Canonical = getAtomicType(getCanonicalType(T));
4923 
4924  // Get the new insert position for the node we care about.
4925  AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
4926  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4927  }
4928  auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
4929  Types.push_back(New);
4930  AtomicTypes.InsertNode(New, InsertPos);
4931  return QualType(New, 0);
4932 }
4933 
4934 /// getAutoDeductType - Get type pattern for deducing against 'auto'.
4936  if (AutoDeductTy.isNull())
4939  /*dependent*/false),
4940  0);
4941  return AutoDeductTy;
4942 }
4943 
4944 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
4946  if (AutoRRefDeductTy.isNull())
4948  assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
4949  return AutoRRefDeductTy;
4950 }
4951 
4952 /// getTagDeclType - Return the unique reference to the type for the
4953 /// specified TagDecl (struct/union/class/enum) decl.
4955  assert(Decl);
4956  // FIXME: What is the design on getTagDeclType when it requires casting
4957  // away const? mutable?
4958  return getTypeDeclType(const_cast<TagDecl*>(Decl));
4959 }
4960 
4961 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
4962 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
4963 /// needs to agree with the definition in <stddef.h>.
4965  return getFromTargetType(Target->getSizeType());
4966 }
4967 
4968 /// Return the unique signed counterpart of the integer type
4969 /// corresponding to size_t.
4971  return getFromTargetType(Target->getSignedSizeType());
4972 }
4973 
4974 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
4976  return getFromTargetType(Target->getIntMaxType());
4977 }
4978 
4979 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
4981  return getFromTargetType(Target->getUIntMaxType());
4982 }
4983 
4984 /// getSignedWCharType - Return the type of "signed wchar_t".
4985 /// Used when in C++, as a GCC extension.
4987  // FIXME: derive from "Target" ?
4988  return WCharTy;
4989 }
4990 
4991 /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
4992 /// Used when in C++, as a GCC extension.
4994  // FIXME: derive from "Target" ?
4995  return UnsignedIntTy;
4996 }
4997 
4999  return getFromTargetType(Target->getIntPtrType());
5000 }
5001 
5004 }
5005 
5006 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
5007 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
5009  return getFromTargetType(Target->getPtrDiffType(0));
5010 }
5011 
5012 /// Return the unique unsigned counterpart of "ptrdiff_t"
5013 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
5014 /// in the definition of %tu format specifier.
5016  return getFromTargetType(Target->getUnsignedPtrDiffType(0));
5017 }
5018 
5019 /// Return the unique type for "pid_t" defined in
5020 /// <sys/types.h>. We need this to compute the correct type for vfork().
5022  return getFromTargetType(Target->getProcessIDType());
5023 }
5024 
5025 //===----------------------------------------------------------------------===//
5026 // Type Operators
5027 //===----------------------------------------------------------------------===//
5028 
5030  // Push qualifiers into arrays, and then discard any remaining
5031  // qualifiers.
5032  T = getCanonicalType(T);
5034  const Type *Ty = T.getTypePtr();
5035  QualType Result;
5036  if (isa<ArrayType>(Ty)) {
5037  Result = getArrayDecayedType(QualType(Ty,0));
5038  } else if (isa<FunctionType>(Ty)) {
5039  Result = getPointerType(QualType(Ty, 0));
5040  } else {
5041  Result = QualType(Ty, 0);
5042  }
5043 
5044  return CanQualType::CreateUnsafe(Result);
5045 }
5046 
5048  Qualifiers &quals) {
5049  SplitQualType splitType = type.getSplitUnqualifiedType();
5050 
5051  // FIXME: getSplitUnqualifiedType() actually walks all the way to
5052  // the unqualified desugared type and then drops it on the floor.
5053  // We then have to strip that sugar back off with
5054  // getUnqualifiedDesugaredType(), which is silly.
5055  const auto *AT =
5056  dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
5057 
5058  // If we don't have an array, just use the results in splitType.
5059  if (!AT) {
5060  quals = splitType.Quals;
5061  return QualType(splitType.Ty, 0);
5062  }
5063 
5064  // Otherwise, recurse on the array's element type.
5065  QualType elementType = AT->getElementType();
5066  QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
5067 
5068  // If that didn't change the element type, AT has no qualifiers, so we
5069  // can just use the results in splitType.
5070  if (elementType == unqualElementType) {
5071  assert(quals.empty()); // from the recursive call
5072  quals = splitType.Quals;
5073  return QualType(splitType.Ty, 0);
5074  }
5075 
5076  // Otherwise, add in the qualifiers from the outermost type, then
5077  // build the type back up.
5078  quals.addConsistentQualifiers(splitType.Quals);
5079 
5080  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
5081  return getConstantArrayType(unqualElementType, CAT->getSize(),
5082  CAT->getSizeModifier(), 0);
5083  }
5084 
5085  if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
5086  return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
5087  }
5088 
5089  if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
5090  return getVariableArrayType(unqualElementType,
5091  VAT->getSizeExpr(),
5092  VAT->getSizeModifier(),
5093  VAT->getIndexTypeCVRQualifiers(),
5094  VAT->getBracketsRange());
5095  }
5096 
5097  const auto *DSAT = cast<DependentSizedArrayType>(AT);
5098  return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
5099  DSAT->getSizeModifier(), 0,
5100  SourceRange());
5101 }
5102 
5103 /// Attempt to unwrap two types that may both be array types with the same bound
5104 /// (or both be array types of unknown bound) for the purpose of comparing the
5105 /// cv-decomposition of two types per C++ [conv.qual].
5107  bool UnwrappedAny = false;
5108  while (true) {
5109  auto *AT1 = getAsArrayType(T1);
5110  if (!AT1) return UnwrappedAny;
5111 
5112  auto *AT2 = getAsArrayType(T2);
5113  if (!AT2) return UnwrappedAny;
5114 
5115  // If we don't have two array types with the same constant bound nor two
5116  // incomplete array types, we've unwrapped everything we can.
5117  if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
5118  auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
5119  if (!CAT2 || CAT1->getSize() != CAT2->getSize())
5120  return UnwrappedAny;
5121  } else if (!isa<IncompleteArrayType>(AT1) ||
5122  !isa<IncompleteArrayType>(AT2)) {
5123  return UnwrappedAny;
5124  }
5125 
5126  T1 = AT1->getElementType();
5127  T2 = AT2->getElementType();
5128  UnwrappedAny = true;
5129  }
5130 }
5131 
5132 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
5133 ///
5134 /// If T1 and T2 are both pointer types of the same kind, or both array types
5135 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is
5136 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
5137 ///
5138 /// This function will typically be called in a loop that successively
5139 /// "unwraps" pointer and pointer-to-member types to compare them at each
5140 /// level.
5141 ///
5142 /// \return \c true if a pointer type was unwrapped, \c false if we reached a
5143 /// pair of types that can't be unwrapped further.
5145  UnwrapSimilarArrayTypes(T1, T2);
5146 
5147  const auto *T1PtrType = T1->getAs<PointerType>();
5148  const auto *T2PtrType = T2->getAs<PointerType>();
5149  if (T1PtrType && T2PtrType) {
5150  T1 = T1PtrType->getPointeeType();
5151  T2 = T2PtrType->getPointeeType();
5152  return true;
5153  }
5154 
5155  const auto *T1MPType = T1->getAs<MemberPointerType>();
5156  const auto *T2MPType = T2->getAs<MemberPointerType>();
5157  if (T1MPType && T2MPType &&
5158  hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
5159  QualType(T2MPType->getClass(), 0))) {
5160  T1 = T1MPType->getPointeeType();
5161  T2 = T2MPType->getPointeeType();
5162  return true;
5163  }
5164 
5165  if (getLangOpts().ObjC) {
5166  const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
5167  const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
5168  if (T1OPType && T2OPType) {
5169  T1 = T1OPType->getPointeeType();
5170  T2 = T2OPType->getPointeeType();
5171  return true;
5172  }
5173  }
5174 
5175  // FIXME: Block pointers, too?
5176 
5177  return false;
5178 }
5179 
5181  while (true) {
5182  Qualifiers Quals;
5183  T1 = getUnqualifiedArrayType(T1, Quals);
5184  T2 = getUnqualifiedArrayType(T2, Quals);
5185  if (hasSameType(T1, T2))
5186  return true;
5187  if (!UnwrapSimilarTypes(T1, T2))
5188  return false;
5189  }
5190 }
5191 
5193  while (true) {
5194  Qualifiers Quals1, Quals2;
5195  T1 = getUnqualifiedArrayType(T1, Quals1);
5196  T2 = getUnqualifiedArrayType(T2, Quals2);
5197 
5198  Quals1.removeCVRQualifiers();
5199  Quals2.removeCVRQualifiers();
5200  if (Quals1 != Quals2)
5201  return false;
5202 
5203  if (hasSameType(T1, T2))
5204  return true;
5205 
5206  if (!UnwrapSimilarTypes(T1, T2))
5207  return false;
5208  }
5209 }
5210 
5213  SourceLocation NameLoc) const {
5214  switch (Name.getKind()) {
5217  // DNInfo work in progress: CHECKME: what about DNLoc?
5219  NameLoc);
5220 
5223  // DNInfo work in progress: CHECKME: what about DNLoc?
5224  return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
5225  }
5226 
5229  DeclarationName DName;
5230  if (DTN->isIdentifier()) {
5232  return DeclarationNameInfo(DName, NameLoc);
5233  } else {
5235  // DNInfo work in progress: FIXME: source locations?
5236  DeclarationNameLoc DNLoc;
5239  return DeclarationNameInfo(DName, NameLoc, DNLoc);
5240  }
5241  }
5242 
5246  return DeclarationNameInfo(subst->getParameter()->getDeclName(),
5247  NameLoc);
5248  }
5249 
5254  NameLoc);
5255  }
5256  }
5257 
5258  llvm_unreachable("bad template name kind!");
5259 }
5260 
5262  switch (Name.getKind()) {
5264  case TemplateName::Template: {
5265  TemplateDecl *Template = Name.getAsTemplateDecl();
5266  if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
5267  Template = getCanonicalTemplateTemplateParmDecl(TTP);
5268 
5269  // The canonical template name is the canonical template declaration.
5270  return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
5271  }
5272 
5274  llvm_unreachable("cannot canonicalize overloaded template");
5275 
5278  assert(DTN && "Non-dependent template names must refer to template decls.");
5279  return DTN->CanonicalTemplateName;
5280  }
5281 
5285  return getCanonicalTemplateName(subst->getReplacement());
5286  }
5287 
5291  TemplateTemplateParmDecl *canonParameter
5292  = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
5293  TemplateArgument canonArgPack
5295  return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
5296  }
5297  }
5298 
5299  llvm_unreachable("bad template name!");
5300 }
5301 
5303  X = getCanonicalTemplateName(X);
5304  Y = getCanonicalTemplateName(Y);
5305  return X.getAsVoidPointer() == Y.getAsVoidPointer();
5306 }
5307 
5310  switch (Arg.getKind()) {
5312  return Arg;
5313 
5315  return Arg;
5316 
5318  auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
5319  return TemplateArgument(D, Arg.getParamTypeForDecl());
5320  }
5321 
5324  /*isNullPtr*/true);
5325 
5328 
5332  Arg.getNumTemplateExpansions());
5333 
5336 
5339 
5340  case TemplateArgument::Pack: {
5341  if (Arg.pack_size() == 0)
5342  return Arg;
5343 
5344  auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
5345  unsigned Idx = 0;
5347  AEnd = Arg.pack_end();
5348  A != AEnd; (void)++A, ++Idx)
5349  CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
5350 
5351  return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
5352  }
5353  }
5354 
5355  // Silence GCC warning
5356  llvm_unreachable("Unhandled template argument kind");
5357 }
5358 
5361  if (!NNS)
5362  return nullptr;
5363 
5364  switch (NNS->getKind()) {
5366  // Canonicalize the prefix but keep the identifier the same.
5367  return NestedNameSpecifier::Create(*this,
5369  NNS->getAsIdentifier());
5370 
5372  // A namespace is canonical; build a nested-name-specifier with
5373  // this namespace and no prefix.
5374  return NestedNameSpecifier::Create(*this, nullptr,
5376 
5378  // A namespace is canonical; build a nested-name-specifier with
5379  // this namespace and no prefix.
5380  return NestedNameSpecifier::Create(*this, nullptr,
5382  ->getOriginalNamespace());
5383 
5386  QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
5387 
5388  // If we have some kind of dependent-named type (e.g., "typename T::type"),
5389  // break it apart into its prefix and identifier, then reconsititute those
5390  // as the canonical nested-name-specifier. This is required to canonicalize
5391  // a dependent nested-name-specifier involving typedefs of dependent-name
5392  // types, e.g.,
5393  // typedef typename T::type T1;
5394  // typedef typename T1::type T2;
5395  if (const auto *DNT = T->getAs<DependentNameType>())
5396  return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
5397  const_cast<IdentifierInfo *>(DNT->getIdentifier()));
5398 
5399  // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
5400  // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
5401  // first place?
5402  return NestedNameSpecifier::Create(*this, nullptr, false,
5403  const_cast<Type *>(T.getTypePtr()));
5404  }
5405 
5408  // The global specifier and __super specifer are canonical and unique.
5409  return NNS;
5410  }
5411 
5412  llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
5413 }
5414 
5416  // Handle the non-qualified case efficiently.
5417  if (!T.hasLocalQualifiers()) {
5418  // Handle the common positive case fast.
5419  if (const auto *AT = dyn_cast<ArrayType>(T))
5420  return AT;
5421  }
5422 
5423  // Handle the common negative case fast.
5424  if (!isa<ArrayType>(T.getCanonicalType()))
5425  return nullptr;
5426 
5427  // Apply any qualifiers from the array type to the element type. This
5428  // implements C99 6.7.3p8: "If the specification of an array type includes
5429  // any type qualifiers, the element type is so qualified, not the array type."
5430 
5431  // If we get here, we either have type qualifiers on the type, or we have
5432  // sugar such as a typedef in the way. If we have type qualifiers on the type
5433  // we must propagate them down into the element type.
5434 
5436  Qualifiers qs = split.Quals;
5437 
5438  // If we have a simple case, just return now.
5439  const auto *ATy = dyn_cast<ArrayType>(split.Ty);
5440  if (!ATy || qs.empty())
5441  return ATy;
5442 
5443  // Otherwise, we have an array and we have qualifiers on it. Push the
5444  // qualifiers into the array element type and return a new array type.
5445  QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
5446 
5447  if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy))
5448  return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
5449  CAT->getSizeModifier(),
5450  CAT->getIndexTypeCVRQualifiers()));
5451  if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy))
5452  return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
5453  IAT->getSizeModifier(),
5454  IAT->getIndexTypeCVRQualifiers()));
5455 
5456  if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy))
5457  return cast<ArrayType>(
5458  getDependentSizedArrayType(NewEltTy,
5459  DSAT->getSizeExpr(),
5460  DSAT->getSizeModifier(),
5461  DSAT->getIndexTypeCVRQualifiers(),
5462  DSAT->getBracketsRange()));
5463 
5464  const auto *VAT = cast<VariableArrayType>(ATy);
5465  return cast<ArrayType>(getVariableArrayType(NewEltTy,
5466  VAT->getSizeExpr(),
5467  VAT->getSizeModifier(),
5468  VAT->getIndexTypeCVRQualifiers(),
5469  VAT->getBracketsRange()));
5470 }
5471 
5473  if (T->isArrayType() || T->isFunctionType())
5474  return getDecayedType(T);
5475  return T;
5476 }
5477 
5480  T = getAdjustedParameterType(T);
5481  return T.getUnqualifiedType();
5482 }
5483 
5485  // C++ [except.throw]p3:
5486  // A throw-expression initializes a temporary object, called the exception
5487  // object, the type of which is determined by removing any top-level
5488  // cv-qualifiers from the static type of the operand of throw and adjusting
5489  // the type from "array of T" or "function returning T" to "pointer to T"
5490  // or "pointer to function returning T", [...]
5492  if (T->isArrayType() || T->isFunctionType())
5493  T = getDecayedType(T);
5494  return T.getUnqualifiedType();
5495 }
5496 
5497 /// getArrayDecayedType - Return the properly qualified result of decaying the
5498 /// specified array type to a pointer. This operation is non-trivial when
5499 /// handling typedefs etc. The canonical type of "T" must be an array type,
5500 /// this returns a pointer to a properly qualified element of the array.
5501 ///
5502 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
5504  // Get the element type with 'getAsArrayType' so that we don't lose any
5505  // typedefs in the element type of the array. This also handles propagation
5506  // of type qualifiers from the array type into the element type if present
5507  // (C99 6.7.3p8).
5508  const ArrayType *PrettyArrayType = getAsArrayType(Ty);
5509  assert(PrettyArrayType && "Not an array type!");
5510 
5511  QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
5512 
5513  // int x[restrict 4] -> int *restrict
5515  PrettyArrayType->getIndexTypeQualifiers());
5516 
5517  // int x[_Nullable] -> int * _Nullable
5518  if (auto Nullability = Ty->getNullability(*this)) {
5519  Result = const_cast<ASTContext *>(this)->getAttributedType(
5521  }
5522  return Result;
5523 }
5524