clang  6.0.0svn
ASTContext.cpp
Go to the documentation of this file.
1 //===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the ASTContext interface.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/AST/ASTContext.h"
15 #include "CXXABI.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/CharUnits.h"
19 #include "clang/AST/Comment.h"
21 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/AST/DeclTemplate.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/ExprCXX.h"
28 #include "clang/AST/Mangle.h"
30 #include "clang/AST/RecordLayout.h"
32 #include "clang/AST/TypeLoc.h"
34 #include "clang/Basic/Builtins.h"
36 #include "clang/Basic/TargetInfo.h"
37 #include "llvm/ADT/StringExtras.h"
38 #include "llvm/ADT/Triple.h"
39 #include "llvm/Support/Capacity.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include <map>
43 
44 using namespace clang;
45 
58 
61 };
62 
65  ExternalSource->ReadComments();
66 
67 #ifndef NDEBUG
69  assert(std::is_sorted(RawComments.begin(), RawComments.end(),
70  BeforeThanCompare<RawComment>(SourceMgr)));
71 #endif
72 
73  CommentsLoaded = true;
74  }
75 
76  assert(D);
77 
78  // User can not attach documentation to implicit declarations.
79  if (D->isImplicit())
80  return nullptr;
81 
82  // User can not attach documentation to implicit instantiations.
83  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
84  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
85  return nullptr;
86  }
87 
88  if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
89  if (VD->isStaticDataMember() &&
90  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
91  return nullptr;
92  }
93 
94  if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
95  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
96  return nullptr;
97  }
98 
99  if (const ClassTemplateSpecializationDecl *CTSD =
100  dyn_cast<ClassTemplateSpecializationDecl>(D)) {
101  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
102  if (TSK == TSK_ImplicitInstantiation ||
103  TSK == TSK_Undeclared)
104  return nullptr;
105  }
106 
107  if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
108  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
109  return nullptr;
110  }
111  if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
112  // When tag declaration (but not definition!) is part of the
113  // decl-specifier-seq of some other declaration, it doesn't get comment
114  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
115  return nullptr;
116  }
117  // TODO: handle comments for function parameters properly.
118  if (isa<ParmVarDecl>(D))
119  return nullptr;
120 
121  // TODO: we could look up template parameter documentation in the template
122  // documentation.
123  if (isa<TemplateTypeParmDecl>(D) ||
124  isa<NonTypeTemplateParmDecl>(D) ||
125  isa<TemplateTemplateParmDecl>(D))
126  return nullptr;
127 
129 
130  // If there are no comments anywhere, we won't find anything.
131  if (RawComments.empty())
132  return nullptr;
133 
134  // Find declaration location.
135  // For Objective-C declarations we generally don't expect to have multiple
136  // declarators, thus use declaration starting location as the "declaration
137  // location".
138  // For all other declarations multiple declarators are used quite frequently,
139  // so we use the location of the identifier as the "declaration location".
140  SourceLocation DeclLoc;
141  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
142  isa<ObjCPropertyDecl>(D) ||
143  isa<RedeclarableTemplateDecl>(D) ||
144  isa<ClassTemplateSpecializationDecl>(D))
145  DeclLoc = D->getLocStart();
146  else {
147  DeclLoc = D->getLocation();
148  if (DeclLoc.isMacroID()) {
149  if (isa<TypedefDecl>(D)) {
150  // If location of the typedef name is in a macro, it is because being
151  // declared via a macro. Try using declaration's starting location as
152  // the "declaration location".
153  DeclLoc = D->getLocStart();
154  } else if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
155  // If location of the tag decl is inside a macro, but the spelling of
156  // the tag name comes from a macro argument, it looks like a special
157  // macro like NS_ENUM is being used to define the tag decl. In that
158  // case, adjust the source location to the expansion loc so that we can
159  // attach the comment to the tag decl.
160  if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
161  TD->isCompleteDefinition())
162  DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
163  }
164  }
165  }
166 
167  // If the declaration doesn't map directly to a location in a file, we
168  // can't find the comment.
169  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
170  return nullptr;
171 
172  // Find the comment that occurs just after this declaration.
174  {
175  // When searching for comments during parsing, the comment we are looking
176  // for is usually among the last two comments we parsed -- check them
177  // first.
178  RawComment CommentAtDeclLoc(
179  SourceMgr, SourceRange(DeclLoc), false,
180  LangOpts.CommentOpts.ParseAllComments);
181  BeforeThanCompare<RawComment> Compare(SourceMgr);
182  ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
183  bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
184  if (!Found && RawComments.size() >= 2) {
185  MaybeBeforeDecl--;
186  Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
187  }
188 
189  if (Found) {
190  Comment = MaybeBeforeDecl + 1;
191  assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
192  &CommentAtDeclLoc, Compare));
193  } else {
194  // Slow path.
195  Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
196  &CommentAtDeclLoc, Compare);
197  }
198  }
199 
200  // Decompose the location for the declaration and find the beginning of the
201  // file buffer.
202  std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc);
203 
204  // First check whether we have a trailing comment.
205  if (Comment != RawComments.end() &&
206  (*Comment)->isDocumentation() && (*Comment)->isTrailingComment() &&
207  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
208  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
209  std::pair<FileID, unsigned> CommentBeginDecomp
210  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin());
211  // Check that Doxygen trailing comment comes after the declaration, starts
212  // on the same line and in the same file as the declaration.
213  if (DeclLocDecomp.first == CommentBeginDecomp.first &&
214  SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
215  == SourceMgr.getLineNumber(CommentBeginDecomp.first,
216  CommentBeginDecomp.second)) {
217  return *Comment;
218  }
219  }
220 
221  // The comment just after the declaration was not a trailing comment.
222  // Let's look at the previous comment.
223  if (Comment == RawComments.begin())
224  return nullptr;
225  --Comment;
226 
227  // Check that we actually have a non-member Doxygen comment.
228  if (!(*Comment)->isDocumentation() || (*Comment)->isTrailingComment())
229  return nullptr;
230 
231  // Decompose the end of the comment.
232  std::pair<FileID, unsigned> CommentEndDecomp
233  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd());
234 
235  // If the comment and the declaration aren't in the same file, then they
236  // aren't related.
237  if (DeclLocDecomp.first != CommentEndDecomp.first)
238  return nullptr;
239 
240  // Get the corresponding buffer.
241  bool Invalid = false;
242  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
243  &Invalid).data();
244  if (Invalid)
245  return nullptr;
246 
247  // Extract text between the comment and declaration.
248  StringRef Text(Buffer + CommentEndDecomp.second,
249  DeclLocDecomp.second - CommentEndDecomp.second);
250 
251  // There should be no other declarations or preprocessor directives between
252  // comment and declaration.
253  if (Text.find_first_of(";{}#@") != StringRef::npos)
254  return nullptr;
255 
256  return *Comment;
257 }
258 
259 namespace {
260 /// If we have a 'templated' declaration for a template, adjust 'D' to
261 /// refer to the actual template.
262 /// If we have an implicit instantiation, adjust 'D' to refer to template.
263 const Decl *adjustDeclToTemplate(const Decl *D) {
264  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
265  // Is this function declaration part of a function template?
266  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
267  return FTD;
268 
269  // Nothing to do if function is not an implicit instantiation.
270  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
271  return D;
272 
273  // Function is an implicit instantiation of a function template?
274  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
275  return FTD;
276 
277  // Function is instantiated from a member definition of a class template?
278  if (const FunctionDecl *MemberDecl =
280  return MemberDecl;
281 
282  return D;
283  }
284  if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
285  // Static data member is instantiated from a member definition of a class
286  // template?
287  if (VD->isStaticDataMember())
288  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
289  return MemberDecl;
290 
291  return D;
292  }
293  if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
294  // Is this class declaration part of a class template?
295  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
296  return CTD;
297 
298  // Class is an implicit instantiation of a class template or partial
299  // specialization?
300  if (const ClassTemplateSpecializationDecl *CTSD =
301  dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
302  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
303  return D;
304  llvm::PointerUnion<ClassTemplateDecl *,
306  PU = CTSD->getSpecializedTemplateOrPartial();
307  return PU.is<ClassTemplateDecl*>() ?
308  static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) :
309  static_cast<const Decl*>(
311  }
312 
313  // Class is instantiated from a member definition of a class template?
314  if (const MemberSpecializationInfo *Info =
315  CRD->getMemberSpecializationInfo())
316  return Info->getInstantiatedFrom();
317 
318  return D;
319  }
320  if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
321  // Enum is instantiated from a member definition of a class template?
322  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
323  return MemberDecl;
324 
325  return D;
326  }
327  // FIXME: Adjust alias templates?
328  return D;
329 }
330 } // anonymous namespace
331 
333  const Decl *D,
334  const Decl **OriginalDecl) const {
335  D = adjustDeclToTemplate(D);
336 
337  // Check whether we have cached a comment for this declaration already.
338  {
339  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
340  RedeclComments.find(D);
341  if (Pos != RedeclComments.end()) {
342  const RawCommentAndCacheFlags &Raw = Pos->second;
344  if (OriginalDecl)
345  *OriginalDecl = Raw.getOriginalDecl();
346  return Raw.getRaw();
347  }
348  }
349  }
350 
351  // Search for comments attached to declarations in the redeclaration chain.
352  const RawComment *RC = nullptr;
353  const Decl *OriginalDeclForRC = nullptr;
354  for (auto I : D->redecls()) {
355  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
356  RedeclComments.find(I);
357  if (Pos != RedeclComments.end()) {
358  const RawCommentAndCacheFlags &Raw = Pos->second;
360  RC = Raw.getRaw();
361  OriginalDeclForRC = Raw.getOriginalDecl();
362  break;
363  }
364  } else {
366  OriginalDeclForRC = I;
368  if (RC) {
369  // Call order swapped to work around ICE in VS2015 RTM (Release Win32)
370  // https://connect.microsoft.com/VisualStudio/feedback/details/1741530
372  Raw.setRaw(RC);
373  } else
375  Raw.setOriginalDecl(I);
376  RedeclComments[I] = Raw;
377  if (RC)
378  break;
379  }
380  }
381 
382  // If we found a comment, it should be a documentation comment.
383  assert(!RC || RC->isDocumentation());
384 
385  if (OriginalDecl)
386  *OriginalDecl = OriginalDeclForRC;
387 
388  // Update cache for every declaration in the redeclaration chain.
390  Raw.setRaw(RC);
392  Raw.setOriginalDecl(OriginalDeclForRC);
393 
394  for (auto I : D->redecls()) {
397  R = Raw;
398  }
399 
400  return RC;
401 }
402 
403 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
405  const DeclContext *DC = ObjCMethod->getDeclContext();
406  if (const ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(DC)) {
407  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
408  if (!ID)
409  return;
410  // Add redeclared method here.
411  for (const auto *Ext : ID->known_extensions()) {
412  if (ObjCMethodDecl *RedeclaredMethod =
413  Ext->getMethod(ObjCMethod->getSelector(),
414  ObjCMethod->isInstanceMethod()))
415  Redeclared.push_back(RedeclaredMethod);
416  }
417  }
418 }
419 
421  const Decl *D) const {
422  comments::DeclInfo *ThisDeclInfo = new (*this) comments::DeclInfo;
423  ThisDeclInfo->CommentDecl = D;
424  ThisDeclInfo->IsFilled = false;
425  ThisDeclInfo->fill();
426  ThisDeclInfo->CommentDecl = FC->getDecl();
427  if (!ThisDeclInfo->TemplateParameters)
428  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
429  comments::FullComment *CFC =
430  new (*this) comments::FullComment(FC->getBlocks(),
431  ThisDeclInfo);
432  return CFC;
433 }
434 
437  return RC ? RC->parse(*this, nullptr, D) : nullptr;
438 }
439 
441  const Decl *D,
442  const Preprocessor *PP) const {
443  if (D->isInvalidDecl())
444  return nullptr;
445  D = adjustDeclToTemplate(D);
446 
447  const Decl *Canonical = D->getCanonicalDecl();
448  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
449  ParsedComments.find(Canonical);
450 
451  if (Pos != ParsedComments.end()) {
452  if (Canonical != D) {
453  comments::FullComment *FC = Pos->second;
455  return CFC;
456  }
457  return Pos->second;
458  }
459 
460  const Decl *OriginalDecl;
461 
462  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
463  if (!RC) {
464  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
466  const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D);
467  if (OMD && OMD->isPropertyAccessor())
468  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
469  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
470  return cloneFullComment(FC, D);
471  if (OMD)
472  addRedeclaredMethods(OMD, Overridden);
473  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
474  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
475  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
476  return cloneFullComment(FC, D);
477  }
478  else if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
479  // Attach any tag type's documentation to its typedef if latter
480  // does not have one of its own.
481  QualType QT = TD->getUnderlyingType();
482  if (const TagType *TT = QT->getAs<TagType>())
483  if (const Decl *TD = TT->getDecl())
484  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
485  return cloneFullComment(FC, D);
486  }
487  else if (const ObjCInterfaceDecl *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
488  while (IC->getSuperClass()) {
489  IC = IC->getSuperClass();
490  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
491  return cloneFullComment(FC, D);
492  }
493  }
494  else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
495  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
496  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
497  return cloneFullComment(FC, D);
498  }
499  else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
500  if (!(RD = RD->getDefinition()))
501  return nullptr;
502  // Check non-virtual bases.
503  for (const auto &I : RD->bases()) {
504  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
505  continue;
506  QualType Ty = I.getType();
507  if (Ty.isNull())
508  continue;
509  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
510  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
511  continue;
512 
513  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
514  return cloneFullComment(FC, D);
515  }
516  }
517  // Check virtual bases.
518  for (const auto &I : RD->vbases()) {
519  if (I.getAccessSpecifier() != AS_public)
520  continue;
521  QualType Ty = I.getType();
522  if (Ty.isNull())
523  continue;
524  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
525  if (!(VirtualBase= VirtualBase->getDefinition()))
526  continue;
527  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
528  return cloneFullComment(FC, D);
529  }
530  }
531  }
532  return nullptr;
533  }
534 
535  // If the RawComment was attached to other redeclaration of this Decl, we
536  // should parse the comment in context of that other Decl. This is important
537  // because comments can contain references to parameter names which can be
538  // different across redeclarations.
539  if (D != OriginalDecl)
540  return getCommentForDecl(OriginalDecl, PP);
541 
542  comments::FullComment *FC = RC->parse(*this, PP, D);
543  ParsedComments[Canonical] = FC;
544  return FC;
545 }
546 
547 void
548 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
549  TemplateTemplateParmDecl *Parm) {
550  ID.AddInteger(Parm->getDepth());
551  ID.AddInteger(Parm->getPosition());
552  ID.AddBoolean(Parm->isParameterPack());
553 
555  ID.AddInteger(Params->size());
557  PEnd = Params->end();
558  P != PEnd; ++P) {
559  if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
560  ID.AddInteger(0);
561  ID.AddBoolean(TTP->isParameterPack());
562  continue;
563  }
564 
565  if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
566  ID.AddInteger(1);
567  ID.AddBoolean(NTTP->isParameterPack());
568  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
569  if (NTTP->isExpandedParameterPack()) {
570  ID.AddBoolean(true);
571  ID.AddInteger(NTTP->getNumExpansionTypes());
572  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
573  QualType T = NTTP->getExpansionType(I);
574  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
575  }
576  } else
577  ID.AddBoolean(false);
578  continue;
579  }
580 
581  TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
582  ID.AddInteger(2);
583  Profile(ID, TTP);
584  }
585 }
586 
588 ASTContext::getCanonicalTemplateTemplateParmDecl(
589  TemplateTemplateParmDecl *TTP) const {
590  // Check if we already have a canonical template template parameter.
591  llvm::FoldingSetNodeID ID;
592  CanonicalTemplateTemplateParm::Profile(ID, TTP);
593  void *InsertPos = nullptr;
594  CanonicalTemplateTemplateParm *Canonical
595  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
596  if (Canonical)
597  return Canonical->getParam();
598 
599  // Build a canonical template parameter list.
601  SmallVector<NamedDecl *, 4> CanonParams;
602  CanonParams.reserve(Params->size());
604  PEnd = Params->end();
605  P != PEnd; ++P) {
606  if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
607  CanonParams.push_back(
609  SourceLocation(),
610  SourceLocation(),
611  TTP->getDepth(),
612  TTP->getIndex(), nullptr, false,
613  TTP->isParameterPack()));
614  else if (NonTypeTemplateParmDecl *NTTP
615  = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
616  QualType T = getCanonicalType(NTTP->getType());
619  if (NTTP->isExpandedParameterPack()) {
620  SmallVector<QualType, 2> ExpandedTypes;
621  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
622  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
623  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
624  ExpandedTInfos.push_back(
625  getTrivialTypeSourceInfo(ExpandedTypes.back()));
626  }
627 
629  SourceLocation(),
630  SourceLocation(),
631  NTTP->getDepth(),
632  NTTP->getPosition(), nullptr,
633  T,
634  TInfo,
635  ExpandedTypes,
636  ExpandedTInfos);
637  } else {
639  SourceLocation(),
640  SourceLocation(),
641  NTTP->getDepth(),
642  NTTP->getPosition(), nullptr,
643  T,
644  NTTP->isParameterPack(),
645  TInfo);
646  }
647  CanonParams.push_back(Param);
648 
649  } else
650  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
651  cast<TemplateTemplateParmDecl>(*P)));
652  }
653 
654  assert(!TTP->getRequiresClause() &&
655  "Unexpected requires-clause on template template-parameter");
656  Expr *const CanonRequiresClause = nullptr;
657 
658  TemplateTemplateParmDecl *CanonTTP
660  SourceLocation(), TTP->getDepth(),
661  TTP->getPosition(),
662  TTP->isParameterPack(),
663  nullptr,
665  SourceLocation(),
666  CanonParams,
667  SourceLocation(),
668  CanonRequiresClause));
669 
670  // Get the new insert position for the node we care about.
671  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
672  assert(!Canonical && "Shouldn't be in the map!");
673  (void)Canonical;
674 
675  // Create the canonical template template parameter entry.
676  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
677  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
678  return CanonTTP;
679 }
680 
681 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
682  if (!LangOpts.CPlusPlus) return nullptr;
683 
684  switch (T.getCXXABI().getKind()) {
685  case TargetCXXABI::GenericARM: // Same as Itanium at this level
686  case TargetCXXABI::iOS:
687  case TargetCXXABI::iOS64:
693  return CreateItaniumCXXABI(*this);
695  return CreateMicrosoftCXXABI(*this);
696  }
697  llvm_unreachable("Invalid CXXABI type!");
698 }
699 
700 static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
701  const LangOptions &LOpts) {
702  if (LOpts.FakeAddressSpaceMap) {
703  // The fake address space map must have a distinct entry for each
704  // language-specific address space.
705  static const unsigned FakeAddrSpaceMap[] = {
706  0, // Default
707  1, // opencl_global
708  3, // opencl_local
709  2, // opencl_constant
710  0, // opencl_private
711  4, // opencl_generic
712  5, // cuda_device
713  6, // cuda_constant
714  7 // cuda_shared
715  };
716  return &FakeAddrSpaceMap;
717  } else {
718  return &T.getAddressSpaceMap();
719  }
720 }
721 
723  const LangOptions &LangOpts) {
724  switch (LangOpts.getAddressSpaceMapMangling()) {
726  return TI.useAddressSpaceMapMangling();
728  return true;
730  return false;
731  }
732  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
733 }
734 
736  IdentifierTable &idents, SelectorTable &sels,
737  Builtin::Context &builtins)
738  : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
739  DependentTemplateSpecializationTypes(this_()),
740  SubstTemplateTemplateParmPacks(this_()),
741  GlobalNestedNameSpecifier(nullptr), Int128Decl(nullptr),
742  UInt128Decl(nullptr), BuiltinVaListDecl(nullptr),
743  BuiltinMSVaListDecl(nullptr), ObjCIdDecl(nullptr), ObjCSelDecl(nullptr),
744  ObjCClassDecl(nullptr), ObjCProtocolClassDecl(nullptr), BOOLDecl(nullptr),
745  CFConstantStringTagDecl(nullptr), CFConstantStringTypeDecl(nullptr),
746  ObjCInstanceTypeDecl(nullptr), FILEDecl(nullptr), jmp_bufDecl(nullptr),
747  sigjmp_bufDecl(nullptr), ucontext_tDecl(nullptr),
748  BlockDescriptorType(nullptr), BlockDescriptorExtendedType(nullptr),
749  cudaConfigureCallDecl(nullptr), FirstLocalImport(), LastLocalImport(),
750  ExternCContext(nullptr), MakeIntegerSeqDecl(nullptr),
751  TypePackElementDecl(nullptr), SourceMgr(SM), LangOpts(LOpts),
752  SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
753  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
754  LangOpts.XRayNeverInstrumentFiles, SM)),
755  AddrSpaceMap(nullptr), Target(nullptr), AuxTarget(nullptr),
756  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
757  BuiltinInfo(builtins), DeclarationNames(*this), ExternalSource(nullptr),
758  Listener(nullptr), Comments(SM), CommentsLoaded(false),
759  CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), LastSDM(nullptr, 0) {
760  TUDecl = TranslationUnitDecl::Create(*this);
761 }
762 
764  ReleaseParentMapEntries();
765 
766  // Release the DenseMaps associated with DeclContext objects.
767  // FIXME: Is this the ideal solution?
768  ReleaseDeclContextMaps();
769 
770  // Call all of the deallocation functions on all of their targets.
771  for (auto &Pair : Deallocations)
772  (Pair.first)(Pair.second);
773 
774  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
775  // because they can contain DenseMaps.
776  for (llvm::DenseMap<const ObjCContainerDecl*,
777  const ASTRecordLayout*>::iterator
778  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
779  // Increment in loop to prevent using deallocated memory.
780  if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
781  R->Destroy(*this);
782 
783  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
784  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
785  // Increment in loop to prevent using deallocated memory.
786  if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
787  R->Destroy(*this);
788  }
789 
790  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
791  AEnd = DeclAttrs.end();
792  A != AEnd; ++A)
793  A->second->~AttrVec();
794 
795  for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
796  MaterializedTemporaryValues)
797  MTVPair.second->~APValue();
798 
799  for (const auto &Value : ModuleInitializers)
800  Value.second->~PerModuleInitializers();
801 }
802 
803 void ASTContext::ReleaseParentMapEntries() {
804  if (!PointerParents) return;
805  for (const auto &Entry : *PointerParents) {
806  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
807  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
808  } else if (Entry.second.is<ParentVector *>()) {
809  delete Entry.second.get<ParentVector *>();
810  }
811  }
812  for (const auto &Entry : *OtherParents) {
813  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
814  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
815  } else if (Entry.second.is<ParentVector *>()) {
816  delete Entry.second.get<ParentVector *>();
817  }
818  }
819 }
820 
821 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
822  Deallocations.push_back({Callback, Data});
823 }
824 
825 void
827  ExternalSource = std::move(Source);
828 }
829 
831  llvm::errs() << "\n*** AST Context Stats:\n";
832  llvm::errs() << " " << Types.size() << " types total.\n";
833 
834  unsigned counts[] = {
835 #define TYPE(Name, Parent) 0,
836 #define ABSTRACT_TYPE(Name, Parent)
837 #include "clang/AST/TypeNodes.def"
838  0 // Extra
839  };
840 
841  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
842  Type *T = Types[i];
843  counts[(unsigned)T->getTypeClass()]++;
844  }
845 
846  unsigned Idx = 0;
847  unsigned TotalBytes = 0;
848 #define TYPE(Name, Parent) \
849  if (counts[Idx]) \
850  llvm::errs() << " " << counts[Idx] << " " << #Name \
851  << " types\n"; \
852  TotalBytes += counts[Idx] * sizeof(Name##Type); \
853  ++Idx;
854 #define ABSTRACT_TYPE(Name, Parent)
855 #include "clang/AST/TypeNodes.def"
856 
857  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
858 
859  // Implicit special member functions.
860  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
862  << " implicit default constructors created\n";
863  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
865  << " implicit copy constructors created\n";
866  if (getLangOpts().CPlusPlus)
867  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
869  << " implicit move constructors created\n";
870  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
872  << " implicit copy assignment operators created\n";
873  if (getLangOpts().CPlusPlus)
874  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
876  << " implicit move assignment operators created\n";
877  llvm::errs() << NumImplicitDestructorsDeclared << "/"
879  << " implicit destructors created\n";
880 
881  if (ExternalSource) {
882  llvm::errs() << "\n";
883  ExternalSource->PrintStats();
884  }
885 
886  BumpAlloc.PrintStats();
887 }
888 
890  bool NotifyListeners) {
891  if (NotifyListeners)
892  if (auto *Listener = getASTMutationListener())
894 
895  if (getLangOpts().ModulesLocalVisibility)
896  MergedDefModules[ND].push_back(M);
897  else
899 }
900 
902  auto It = MergedDefModules.find(ND);
903  if (It == MergedDefModules.end())
904  return;
905 
906  auto &Merged = It->second;
908  for (Module *&M : Merged)
909  if (!Found.insert(M).second)
910  M = nullptr;
911  Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
912 }
913 
914 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
915  if (LazyInitializers.empty())
916  return;
917 
918  auto *Source = Ctx.getExternalSource();
919  assert(Source && "lazy initializers but no external source");
920 
921  auto LazyInits = std::move(LazyInitializers);
922  LazyInitializers.clear();
923 
924  for (auto ID : LazyInits)
925  Initializers.push_back(Source->GetExternalDecl(ID));
926 
927  assert(LazyInitializers.empty() &&
928  "GetExternalDecl for lazy module initializer added more inits");
929 }
930 
932  // One special case: if we add a module initializer that imports another
933  // module, and that module's only initializer is an ImportDecl, simplify.
934  if (auto *ID = dyn_cast<ImportDecl>(D)) {
935  auto It = ModuleInitializers.find(ID->getImportedModule());
936 
937  // Maybe the ImportDecl does nothing at all. (Common case.)
938  if (It == ModuleInitializers.end())
939  return;
940 
941  // Maybe the ImportDecl only imports another ImportDecl.
942  auto &Imported = *It->second;
943  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
944  Imported.resolve(*this);
945  auto *OnlyDecl = Imported.Initializers.front();
946  if (isa<ImportDecl>(OnlyDecl))
947  D = OnlyDecl;
948  }
949  }
950 
951  auto *&Inits = ModuleInitializers[M];
952  if (!Inits)
953  Inits = new (*this) PerModuleInitializers;
954  Inits->Initializers.push_back(D);
955 }
956 
958  auto *&Inits = ModuleInitializers[M];
959  if (!Inits)
960  Inits = new (*this) PerModuleInitializers;
961  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
962  IDs.begin(), IDs.end());
963 }
964 
966  auto It = ModuleInitializers.find(M);
967  if (It == ModuleInitializers.end())
968  return None;
969 
970  auto *Inits = It->second;
971  Inits->resolve(*this);
972  return Inits->Initializers;
973 }
974 
976  if (!ExternCContext)
977  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
978 
979  return ExternCContext;
980 }
981 
984  const IdentifierInfo *II) const {
985  auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
986  BuiltinTemplate->setImplicit();
987  TUDecl->addDecl(BuiltinTemplate);
988 
989  return BuiltinTemplate;
990 }
991 
994  if (!MakeIntegerSeqDecl)
995  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
997  return MakeIntegerSeqDecl;
998 }
999 
1002  if (!TypePackElementDecl)
1003  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1005  return TypePackElementDecl;
1006 }
1007 
1009  RecordDecl::TagKind TK) const {
1010  SourceLocation Loc;
1011  RecordDecl *NewDecl;
1012  if (getLangOpts().CPlusPlus)
1013  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1014  Loc, &Idents.get(Name));
1015  else
1016  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1017  &Idents.get(Name));
1018  NewDecl->setImplicit();
1019  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1020  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1021  return NewDecl;
1022 }
1023 
1025  StringRef Name) const {
1027  TypedefDecl *NewDecl = TypedefDecl::Create(
1028  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1029  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1030  NewDecl->setImplicit();
1031  return NewDecl;
1032 }
1033 
1035  if (!Int128Decl)
1036  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1037  return Int128Decl;
1038 }
1039 
1041  if (!UInt128Decl)
1042  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1043  return UInt128Decl;
1044 }
1045 
1046 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1047  BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K);
1048  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1049  Types.push_back(Ty);
1050 }
1051 
1053  const TargetInfo *AuxTarget) {
1054  assert((!this->Target || this->Target == &Target) &&
1055  "Incorrect target reinitialization");
1056  assert(VoidTy.isNull() && "Context reinitialized?");
1057 
1058  this->Target = &Target;
1059  this->AuxTarget = AuxTarget;
1060 
1061  ABI.reset(createCXXABI(Target));
1062  AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1063  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1064 
1065  // C99 6.2.5p19.
1066  InitBuiltinType(VoidTy, BuiltinType::Void);
1067 
1068  // C99 6.2.5p2.
1069  InitBuiltinType(BoolTy, BuiltinType::Bool);
1070  // C99 6.2.5p3.
1071  if (LangOpts.CharIsSigned)
1072  InitBuiltinType(CharTy, BuiltinType::Char_S);
1073  else
1074  InitBuiltinType(CharTy, BuiltinType::Char_U);
1075  // C99 6.2.5p4.
1076  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1077  InitBuiltinType(ShortTy, BuiltinType::Short);
1078  InitBuiltinType(IntTy, BuiltinType::Int);
1079  InitBuiltinType(LongTy, BuiltinType::Long);
1080  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1081 
1082  // C99 6.2.5p6.
1083  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1084  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1085  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1086  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1087  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1088 
1089  // C99 6.2.5p10.
1090  InitBuiltinType(FloatTy, BuiltinType::Float);
1091  InitBuiltinType(DoubleTy, BuiltinType::Double);
1092  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1093 
1094  // GNU extension, __float128 for IEEE quadruple precision
1095  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1096 
1097  // C11 extension ISO/IEC TS 18661-3
1098  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1099 
1100  // GNU extension, 128-bit integers.
1101  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1102  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1103 
1104  // C++ 3.9.1p5
1105  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1106  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1107  else // -fshort-wchar makes wchar_t be unsigned.
1108  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1109  if (LangOpts.CPlusPlus && LangOpts.WChar)
1110  WideCharTy = WCharTy;
1111  else {
1112  // C99 (or C++ using -fno-wchar).
1113  WideCharTy = getFromTargetType(Target.getWCharType());
1114  }
1115 
1116  WIntTy = getFromTargetType(Target.getWIntType());
1117 
1118  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1119  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1120  else // C99
1121  Char16Ty = getFromTargetType(Target.getChar16Type());
1122 
1123  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1124  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1125  else // C99
1126  Char32Ty = getFromTargetType(Target.getChar32Type());
1127 
1128  // Placeholder type for type-dependent expressions whose type is
1129  // completely unknown. No code should ever check a type against
1130  // DependentTy and users should never see it; however, it is here to
1131  // help diagnose failures to properly check for type-dependent
1132  // expressions.
1133  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1134 
1135  // Placeholder type for functions.
1136  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1137 
1138  // Placeholder type for bound members.
1139  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1140 
1141  // Placeholder type for pseudo-objects.
1142  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1143 
1144  // "any" type; useful for debugger-like clients.
1145  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1146 
1147  // Placeholder type for unbridged ARC casts.
1148  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1149 
1150  // Placeholder type for builtin functions.
1151  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1152 
1153  // Placeholder type for OMP array sections.
1154  if (LangOpts.OpenMP)
1155  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1156 
1157  // C99 6.2.5p11.
1162 
1163  // Builtin types for 'id', 'Class', and 'SEL'.
1164  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1165  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1166  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1167 
1168  if (LangOpts.OpenCL) {
1169 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1170  InitBuiltinType(SingletonId, BuiltinType::Id);
1171 #include "clang/Basic/OpenCLImageTypes.def"
1172 
1173  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1174  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1175  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1176  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1177  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1178  }
1179 
1180  // Builtin type for __objc_yes and __objc_no
1181  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1182  SignedCharTy : BoolTy);
1183 
1184  ObjCConstantStringType = QualType();
1185 
1186  ObjCSuperType = QualType();
1187 
1188  // void * type
1189  if (LangOpts.OpenCLVersion >= 200) {
1190  auto Q = VoidTy.getQualifiers();
1194  } else {
1196  }
1197 
1198  // nullptr type (C++0x 2.14.7)
1199  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1200 
1201  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1202  InitBuiltinType(HalfTy, BuiltinType::Half);
1203 
1204  // Builtin type used to help define __builtin_va_list.
1205  VaListTagDecl = nullptr;
1206 }
1207 
1209  return SourceMgr.getDiagnostics();
1210 }
1211 
1213  AttrVec *&Result = DeclAttrs[D];
1214  if (!Result) {
1215  void *Mem = Allocate(sizeof(AttrVec));
1216  Result = new (Mem) AttrVec;
1217  }
1218 
1219  return *Result;
1220 }
1221 
1222 /// \brief Erase the attributes corresponding to the given declaration.
1224  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1225  if (Pos != DeclAttrs.end()) {
1226  Pos->second->~AttrVec();
1227  DeclAttrs.erase(Pos);
1228  }
1229 }
1230 
1231 // FIXME: Remove ?
1234  assert(Var->isStaticDataMember() && "Not a static data member");
1236  .dyn_cast<MemberSpecializationInfo *>();
1237 }
1238 
1241  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1242  TemplateOrInstantiation.find(Var);
1243  if (Pos == TemplateOrInstantiation.end())
1245 
1246  return Pos->second;
1247 }
1248 
1249 void
1252  SourceLocation PointOfInstantiation) {
1253  assert(Inst->isStaticDataMember() && "Not a static data member");
1254  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1256  Tmpl, TSK, PointOfInstantiation));
1257 }
1258 
1259 void
1262  assert(!TemplateOrInstantiation[Inst] &&
1263  "Already noted what the variable was instantiated from");
1264  TemplateOrInstantiation[Inst] = TSI;
1265 }
1266 
1268  const FunctionDecl *FD){
1269  assert(FD && "Specialization is 0");
1270  llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
1271  = ClassScopeSpecializationPattern.find(FD);
1272  if (Pos == ClassScopeSpecializationPattern.end())
1273  return nullptr;
1274 
1275  return Pos->second;
1276 }
1277 
1279  FunctionDecl *Pattern) {
1280  assert(FD && "Specialization is 0");
1281  assert(Pattern && "Class scope specialization pattern is 0");
1282  ClassScopeSpecializationPattern[FD] = Pattern;
1283 }
1284 
1285 NamedDecl *
1287  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1288  if (Pos == InstantiatedFromUsingDecl.end())
1289  return nullptr;
1290 
1291  return Pos->second;
1292 }
1293 
1294 void
1296  assert((isa<UsingDecl>(Pattern) ||
1297  isa<UnresolvedUsingValueDecl>(Pattern) ||
1298  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1299  "pattern decl is not a using decl");
1300  assert((isa<UsingDecl>(Inst) ||
1301  isa<UnresolvedUsingValueDecl>(Inst) ||
1302  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1303  "instantiation did not produce a using decl");
1304  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1305  InstantiatedFromUsingDecl[Inst] = Pattern;
1306 }
1307 
1310  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1311  = InstantiatedFromUsingShadowDecl.find(Inst);
1312  if (Pos == InstantiatedFromUsingShadowDecl.end())
1313  return nullptr;
1314 
1315  return Pos->second;
1316 }
1317 
1318 void
1320  UsingShadowDecl *Pattern) {
1321  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1322  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1323 }
1324 
1326  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1327  = InstantiatedFromUnnamedFieldDecl.find(Field);
1328  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1329  return nullptr;
1330 
1331  return Pos->second;
1332 }
1333 
1335  FieldDecl *Tmpl) {
1336  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1337  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1338  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1339  "Already noted what unnamed field was instantiated from");
1340 
1341  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1342 }
1343 
1346  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1347  OverriddenMethods.find(Method->getCanonicalDecl());
1348  if (Pos == OverriddenMethods.end())
1349  return nullptr;
1350  return Pos->second.begin();
1351 }
1352 
1355  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1356  OverriddenMethods.find(Method->getCanonicalDecl());
1357  if (Pos == OverriddenMethods.end())
1358  return nullptr;
1359  return Pos->second.end();
1360 }
1361 
1362 unsigned
1364  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1365  OverriddenMethods.find(Method->getCanonicalDecl());
1366  if (Pos == OverriddenMethods.end())
1367  return 0;
1368  return Pos->second.size();
1369 }
1370 
1374  overridden_methods_end(Method));
1375 }
1376 
1378  const CXXMethodDecl *Overridden) {
1379  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1380  OverriddenMethods[Method].push_back(Overridden);
1381 }
1382 
1384  const NamedDecl *D,
1385  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1386  assert(D);
1387 
1388  if (const CXXMethodDecl *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1389  Overridden.append(overridden_methods_begin(CXXMethod),
1390  overridden_methods_end(CXXMethod));
1391  return;
1392  }
1393 
1394  const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D);
1395  if (!Method)
1396  return;
1397 
1399  Method->getOverriddenMethods(OverDecls);
1400  Overridden.append(OverDecls.begin(), OverDecls.end());
1401 }
1402 
1404  assert(!Import->NextLocalImport && "Import declaration already in the chain");
1405  assert(!Import->isFromASTFile() && "Non-local import declaration");
1406  if (!FirstLocalImport) {
1407  FirstLocalImport = Import;
1408  LastLocalImport = Import;
1409  return;
1410  }
1411 
1412  LastLocalImport->NextLocalImport = Import;
1413  LastLocalImport = Import;
1414 }
1415 
1416 //===----------------------------------------------------------------------===//
1417 // Type Sizing and Analysis
1418 //===----------------------------------------------------------------------===//
1419 
1420 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1421 /// scalar floating point type.
1422 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1423  const BuiltinType *BT = T->getAs<BuiltinType>();
1424  assert(BT && "Not a floating point type!");
1425  switch (BT->getKind()) {
1426  default: llvm_unreachable("Not a floating point type!");
1427  case BuiltinType::Float16:
1428  case BuiltinType::Half:
1429  return Target->getHalfFormat();
1430  case BuiltinType::Float: return Target->getFloatFormat();
1431  case BuiltinType::Double: return Target->getDoubleFormat();
1432  case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
1433  case BuiltinType::Float128: return Target->getFloat128Format();
1434  }
1435 }
1436 
1437 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1438  unsigned Align = Target->getCharWidth();
1439 
1440  bool UseAlignAttrOnly = false;
1441  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1442  Align = AlignFromAttr;
1443 
1444  // __attribute__((aligned)) can increase or decrease alignment
1445  // *except* on a struct or struct member, where it only increases
1446  // alignment unless 'packed' is also specified.
1447  //
1448  // It is an error for alignas to decrease alignment, so we can
1449  // ignore that possibility; Sema should diagnose it.
1450  if (isa<FieldDecl>(D)) {
1451  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1452  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1453  } else {
1454  UseAlignAttrOnly = true;
1455  }
1456  }
1457  else if (isa<FieldDecl>(D))
1458  UseAlignAttrOnly =
1459  D->hasAttr<PackedAttr>() ||
1460  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1461 
1462  // If we're using the align attribute only, just ignore everything
1463  // else about the declaration and its type.
1464  if (UseAlignAttrOnly) {
1465  // do nothing
1466 
1467  } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
1468  QualType T = VD->getType();
1469  if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
1470  if (ForAlignof)
1471  T = RT->getPointeeType();
1472  else
1473  T = getPointerType(RT->getPointeeType());
1474  }
1475  QualType BaseT = getBaseElementType(T);
1476  if (T->isFunctionType())
1477  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1478  else if (!BaseT->isIncompleteType()) {
1479  // Adjust alignments of declarations with array type by the
1480  // large-array alignment on the target.
1481  if (const ArrayType *arrayType = getAsArrayType(T)) {
1482  unsigned MinWidth = Target->getLargeArrayMinWidth();
1483  if (!ForAlignof && MinWidth) {
1484  if (isa<VariableArrayType>(arrayType))
1485  Align = std::max(Align, Target->getLargeArrayAlign());
1486  else if (isa<ConstantArrayType>(arrayType) &&
1487  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1488  Align = std::max(Align, Target->getLargeArrayAlign());
1489  }
1490  }
1491  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1492  if (BaseT.getQualifiers().hasUnaligned())
1493  Align = Target->getCharWidth();
1494  if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
1495  if (VD->hasGlobalStorage() && !ForAlignof)
1496  Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
1497  }
1498  }
1499 
1500  // Fields can be subject to extra alignment constraints, like if
1501  // the field is packed, the struct is packed, or the struct has a
1502  // a max-field-alignment constraint (#pragma pack). So calculate
1503  // the actual alignment of the field within the struct, and then
1504  // (as we're expected to) constrain that by the alignment of the type.
1505  if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) {
1506  const RecordDecl *Parent = Field->getParent();
1507  // We can only produce a sensible answer if the record is valid.
1508  if (!Parent->isInvalidDecl()) {
1509  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1510 
1511  // Start with the record's overall alignment.
1512  unsigned FieldAlign = toBits(Layout.getAlignment());
1513 
1514  // Use the GCD of that and the offset within the record.
1515  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1516  if (Offset > 0) {
1517  // Alignment is always a power of 2, so the GCD will be a power of 2,
1518  // which means we get to do this crazy thing instead of Euclid's.
1519  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1520  if (LowBitOfOffset < FieldAlign)
1521  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1522  }
1523 
1524  Align = std::min(Align, FieldAlign);
1525  }
1526  }
1527  }
1528 
1529  return toCharUnitsFromBits(Align);
1530 }
1531 
1532 // getTypeInfoDataSizeInChars - Return the size of a type, in
1533 // chars. If the type is a record, its data size is returned. This is
1534 // the size of the memcpy that's performed when assigning this type
1535 // using a trivial copy/move assignment operator.
1536 std::pair<CharUnits, CharUnits>
1538  std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
1539 
1540  // In C++, objects can sometimes be allocated into the tail padding
1541  // of a base-class subobject. We decide whether that's possible
1542  // during class layout, so here we can just trust the layout results.
1543  if (getLangOpts().CPlusPlus) {
1544  if (const RecordType *RT = T->getAs<RecordType>()) {
1545  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1546  sizeAndAlign.first = layout.getDataSize();
1547  }
1548  }
1549 
1550  return sizeAndAlign;
1551 }
1552 
1553 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1554 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1555 std::pair<CharUnits, CharUnits>
1557  const ConstantArrayType *CAT) {
1558  std::pair<CharUnits, CharUnits> EltInfo =
1559  Context.getTypeInfoInChars(CAT->getElementType());
1560  uint64_t Size = CAT->getSize().getZExtValue();
1561  assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
1562  (uint64_t)(-1)/Size) &&
1563  "Overflow in array type char size evaluation");
1564  uint64_t Width = EltInfo.first.getQuantity() * Size;
1565  unsigned Align = EltInfo.second.getQuantity();
1566  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1567  Context.getTargetInfo().getPointerWidth(0) == 64)
1568  Width = llvm::alignTo(Width, Align);
1569  return std::make_pair(CharUnits::fromQuantity(Width),
1570  CharUnits::fromQuantity(Align));
1571 }
1572 
1573 std::pair<CharUnits, CharUnits>
1575  if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T))
1576  return getConstantArrayInfoInChars(*this, CAT);
1577  TypeInfo Info = getTypeInfo(T);
1578  return std::make_pair(toCharUnitsFromBits(Info.Width),
1579  toCharUnitsFromBits(Info.Align));
1580 }
1581 
1582 std::pair<CharUnits, CharUnits>
1584  return getTypeInfoInChars(T.getTypePtr());
1585 }
1586 
1588  return getTypeInfo(T).AlignIsRequired;
1589 }
1590 
1592  return isAlignmentRequired(T.getTypePtr());
1593 }
1594 
1596  // An alignment on a typedef overrides anything else.
1597  if (auto *TT = T->getAs<TypedefType>())
1598  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1599  return Align;
1600 
1601  // If we have an (array of) complete type, we're done.
1602  T = getBaseElementType(T);
1603  if (!T->isIncompleteType())
1604  return getTypeAlign(T);
1605 
1606  // If we had an array type, its element type might be a typedef
1607  // type with an alignment attribute.
1608  if (auto *TT = T->getAs<TypedefType>())
1609  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1610  return Align;
1611 
1612  // Otherwise, see if the declaration of the type had an attribute.
1613  if (auto *TT = T->getAs<TagType>())
1614  return TT->getDecl()->getMaxAlignment();
1615 
1616  return 0;
1617 }
1618 
1620  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1621  if (I != MemoizedTypeInfo.end())
1622  return I->second;
1623 
1624  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1625  TypeInfo TI = getTypeInfoImpl(T);
1626  MemoizedTypeInfo[T] = TI;
1627  return TI;
1628 }
1629 
1630 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1631 /// method does not work on incomplete types.
1632 ///
1633 /// FIXME: Pointers into different addr spaces could have different sizes and
1634 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1635 /// should take a QualType, &c.
1636 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1637  uint64_t Width = 0;
1638  unsigned Align = 8;
1639  bool AlignIsRequired = false;
1640  unsigned AS = 0;
1641  switch (T->getTypeClass()) {
1642 #define TYPE(Class, Base)
1643 #define ABSTRACT_TYPE(Class, Base)
1644 #define NON_CANONICAL_TYPE(Class, Base)
1645 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1646 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1647  case Type::Class: \
1648  assert(!T->isDependentType() && "should not see dependent types here"); \
1649  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1650 #include "clang/AST/TypeNodes.def"
1651  llvm_unreachable("Should not see dependent types");
1652 
1653  case Type::FunctionNoProto:
1654  case Type::FunctionProto:
1655  // GCC extension: alignof(function) = 32 bits
1656  Width = 0;
1657  Align = 32;
1658  break;
1659 
1660  case Type::IncompleteArray:
1661  case Type::VariableArray:
1662  Width = 0;
1663  Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
1664  break;
1665 
1666  case Type::ConstantArray: {
1667  const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
1668 
1669  TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
1670  uint64_t Size = CAT->getSize().getZExtValue();
1671  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1672  "Overflow in array type bit size evaluation");
1673  Width = EltInfo.Width * Size;
1674  Align = EltInfo.Align;
1675  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1676  getTargetInfo().getPointerWidth(0) == 64)
1677  Width = llvm::alignTo(Width, Align);
1678  break;
1679  }
1680  case Type::ExtVector:
1681  case Type::Vector: {
1682  const VectorType *VT = cast<VectorType>(T);
1683  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1684  Width = EltInfo.Width * VT->getNumElements();
1685  Align = Width;
1686  // If the alignment is not a power of 2, round up to the next power of 2.
1687  // This happens for non-power-of-2 length vectors.
1688  if (Align & (Align-1)) {
1689  Align = llvm::NextPowerOf2(Align);
1690  Width = llvm::alignTo(Width, Align);
1691  }
1692  // Adjust the alignment based on the target max.
1693  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1694  if (TargetVectorAlign && TargetVectorAlign < Align)
1695  Align = TargetVectorAlign;
1696  break;
1697  }
1698 
1699  case Type::Builtin:
1700  switch (cast<BuiltinType>(T)->getKind()) {
1701  default: llvm_unreachable("Unknown builtin type!");
1702  case BuiltinType::Void:
1703  // GCC extension: alignof(void) = 8 bits.
1704  Width = 0;
1705  Align = 8;
1706  break;
1707 
1708  case BuiltinType::Bool:
1709  Width = Target->getBoolWidth();
1710  Align = Target->getBoolAlign();
1711  break;
1712  case BuiltinType::Char_S:
1713  case BuiltinType::Char_U:
1714  case BuiltinType::UChar:
1715  case BuiltinType::SChar:
1716  Width = Target->getCharWidth();
1717  Align = Target->getCharAlign();
1718  break;
1719  case BuiltinType::WChar_S:
1720  case BuiltinType::WChar_U:
1721  Width = Target->getWCharWidth();
1722  Align = Target->getWCharAlign();
1723  break;
1724  case BuiltinType::Char16:
1725  Width = Target->getChar16Width();
1726  Align = Target->getChar16Align();
1727  break;
1728  case BuiltinType::Char32:
1729  Width = Target->getChar32Width();
1730  Align = Target->getChar32Align();
1731  break;
1732  case BuiltinType::UShort:
1733  case BuiltinType::Short:
1734  Width = Target->getShortWidth();
1735  Align = Target->getShortAlign();
1736  break;
1737  case BuiltinType::UInt:
1738  case BuiltinType::Int:
1739  Width = Target->getIntWidth();
1740  Align = Target->getIntAlign();
1741  break;
1742  case BuiltinType::ULong:
1743  case BuiltinType::Long:
1744  Width = Target->getLongWidth();
1745  Align = Target->getLongAlign();
1746  break;
1747  case BuiltinType::ULongLong:
1748  case BuiltinType::LongLong:
1749  Width = Target->getLongLongWidth();
1750  Align = Target->getLongLongAlign();
1751  break;
1752  case BuiltinType::Int128:
1753  case BuiltinType::UInt128:
1754  Width = 128;
1755  Align = 128; // int128_t is 128-bit aligned on all targets.
1756  break;
1757  case BuiltinType::Float16:
1758  case BuiltinType::Half:
1759  Width = Target->getHalfWidth();
1760  Align = Target->getHalfAlign();
1761  break;
1762  case BuiltinType::Float:
1763  Width = Target->getFloatWidth();
1764  Align = Target->getFloatAlign();
1765  break;
1766  case BuiltinType::Double:
1767  Width = Target->getDoubleWidth();
1768  Align = Target->getDoubleAlign();
1769  break;
1770  case BuiltinType::LongDouble:
1771  Width = Target->getLongDoubleWidth();
1772  Align = Target->getLongDoubleAlign();
1773  break;
1774  case BuiltinType::Float128:
1775  Width = Target->getFloat128Width();
1776  Align = Target->getFloat128Align();
1777  break;
1778  case BuiltinType::NullPtr:
1779  Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
1780  Align = Target->getPointerAlign(0); // == sizeof(void*)
1781  break;
1782  case BuiltinType::ObjCId:
1783  case BuiltinType::ObjCClass:
1784  case BuiltinType::ObjCSel:
1785  Width = Target->getPointerWidth(0);
1786  Align = Target->getPointerAlign(0);
1787  break;
1788  case BuiltinType::OCLSampler:
1789  case BuiltinType::OCLEvent:
1790  case BuiltinType::OCLClkEvent:
1791  case BuiltinType::OCLQueue:
1792  case BuiltinType::OCLReserveID:
1793 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1794  case BuiltinType::Id:
1795 #include "clang/Basic/OpenCLImageTypes.def"
1797  Width = Target->getPointerWidth(AS);
1798  Align = Target->getPointerAlign(AS);
1799  break;
1800  }
1801  break;
1802  case Type::ObjCObjectPointer:
1803  Width = Target->getPointerWidth(0);
1804  Align = Target->getPointerAlign(0);
1805  break;
1806  case Type::BlockPointer: {
1807  AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
1808  Width = Target->getPointerWidth(AS);
1809  Align = Target->getPointerAlign(AS);
1810  break;
1811  }
1812  case Type::LValueReference:
1813  case Type::RValueReference: {
1814  // alignof and sizeof should never enter this code path here, so we go
1815  // the pointer route.
1816  AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
1817  Width = Target->getPointerWidth(AS);
1818  Align = Target->getPointerAlign(AS);
1819  break;
1820  }
1821  case Type::Pointer: {
1822  AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
1823  Width = Target->getPointerWidth(AS);
1824  Align = Target->getPointerAlign(AS);
1825  break;
1826  }
1827  case Type::MemberPointer: {
1828  const MemberPointerType *MPT = cast<MemberPointerType>(T);
1829  std::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT);
1830  break;
1831  }
1832  case Type::Complex: {
1833  // Complex types have the same alignment as their elements, but twice the
1834  // size.
1835  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
1836  Width = EltInfo.Width * 2;
1837  Align = EltInfo.Align;
1838  break;
1839  }
1840  case Type::ObjCObject:
1841  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
1842  case Type::Adjusted:
1843  case Type::Decayed:
1844  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
1845  case Type::ObjCInterface: {
1846  const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
1847  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
1848  Width = toBits(Layout.getSize());
1849  Align = toBits(Layout.getAlignment());
1850  break;
1851  }
1852  case Type::Record:
1853  case Type::Enum: {
1854  const TagType *TT = cast<TagType>(T);
1855 
1856  if (TT->getDecl()->isInvalidDecl()) {
1857  Width = 8;
1858  Align = 8;
1859  break;
1860  }
1861 
1862  if (const EnumType *ET = dyn_cast<EnumType>(TT)) {
1863  const EnumDecl *ED = ET->getDecl();
1864  TypeInfo Info =
1866  if (unsigned AttrAlign = ED->getMaxAlignment()) {
1867  Info.Align = AttrAlign;
1868  Info.AlignIsRequired = true;
1869  }
1870  return Info;
1871  }
1872 
1873  const RecordType *RT = cast<RecordType>(TT);
1874  const RecordDecl *RD = RT->getDecl();
1875  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
1876  Width = toBits(Layout.getSize());
1877  Align = toBits(Layout.getAlignment());
1878  AlignIsRequired = RD->hasAttr<AlignedAttr>();
1879  break;
1880  }
1881 
1882  case Type::SubstTemplateTypeParm:
1883  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
1884  getReplacementType().getTypePtr());
1885 
1886  case Type::Auto:
1887  case Type::DeducedTemplateSpecialization: {
1888  const DeducedType *A = cast<DeducedType>(T);
1889  assert(!A->getDeducedType().isNull() &&
1890  "cannot request the size of an undeduced or dependent auto type");
1891  return getTypeInfo(A->getDeducedType().getTypePtr());
1892  }
1893 
1894  case Type::Paren:
1895  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
1896 
1897  case Type::ObjCTypeParam:
1898  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
1899 
1900  case Type::Typedef: {
1901  const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
1902  TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
1903  // If the typedef has an aligned attribute on it, it overrides any computed
1904  // alignment we have. This violates the GCC documentation (which says that
1905  // attribute(aligned) can only round up) but matches its implementation.
1906  if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
1907  Align = AttrAlign;
1908  AlignIsRequired = true;
1909  } else {
1910  Align = Info.Align;
1911  AlignIsRequired = Info.AlignIsRequired;
1912  }
1913  Width = Info.Width;
1914  break;
1915  }
1916 
1917  case Type::Elaborated:
1918  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
1919 
1920  case Type::Attributed:
1921  return getTypeInfo(
1922  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
1923 
1924  case Type::Atomic: {
1925  // Start with the base type information.
1926  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
1927  Width = Info.Width;
1928  Align = Info.Align;
1929 
1930  // If the size of the type doesn't exceed the platform's max
1931  // atomic promotion width, make the size and alignment more
1932  // favorable to atomic operations:
1933  if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth()) {
1934  // Round the size up to a power of 2.
1935  if (!llvm::isPowerOf2_64(Width))
1936  Width = llvm::NextPowerOf2(Width);
1937 
1938  // Set the alignment equal to the size.
1939  Align = static_cast<unsigned>(Width);
1940  }
1941  }
1942  break;
1943 
1944  case Type::Pipe: {
1947  }
1948 
1949  }
1950 
1951  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
1952  return TypeInfo(Width, Align, AlignIsRequired);
1953 }
1954 
1956  unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
1957  // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
1958  if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
1959  getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
1960  getTargetInfo().getABI() == "elfv1-qpx" &&
1961  T->isSpecificBuiltinType(BuiltinType::Double))
1962  SimdAlign = 256;
1963  return SimdAlign;
1964 }
1965 
1966 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
1968  return CharUnits::fromQuantity(BitSize / getCharWidth());
1969 }
1970 
1971 /// toBits - Convert a size in characters to a size in characters.
1972 int64_t ASTContext::toBits(CharUnits CharSize) const {
1973  return CharSize.getQuantity() * getCharWidth();
1974 }
1975 
1976 /// getTypeSizeInChars - Return the size of the specified type, in characters.
1977 /// This method does not work on incomplete types.
1979  return getTypeInfoInChars(T).first;
1980 }
1982  return getTypeInfoInChars(T).first;
1983 }
1984 
1985 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
1986 /// characters. This method does not work on incomplete types.
1988  return toCharUnitsFromBits(getTypeAlign(T));
1989 }
1991  return toCharUnitsFromBits(getTypeAlign(T));
1992 }
1993 
1994 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
1995 /// type for the current target in bits. This can be different than the ABI
1996 /// alignment in cases where it is beneficial for performance to overalign
1997 /// a data type.
1998 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
1999  TypeInfo TI = getTypeInfo(T);
2000  unsigned ABIAlign = TI.Align;
2001 
2002  T = T->getBaseElementTypeUnsafe();
2003 
2004  // The preferred alignment of member pointers is that of a pointer.
2005  if (T->isMemberPointerType())
2007 
2008  if (!Target->allowsLargerPreferedTypeAlignment())
2009  return ABIAlign;
2010 
2011  // Double and long long should be naturally aligned if possible.
2012  if (const ComplexType *CT = T->getAs<ComplexType>())
2013  T = CT->getElementType().getTypePtr();
2014  if (const EnumType *ET = T->getAs<EnumType>())
2015  T = ET->getDecl()->getIntegerType().getTypePtr();
2016  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2017  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2018  T->isSpecificBuiltinType(BuiltinType::ULongLong))
2019  // Don't increase the alignment if an alignment attribute was specified on a
2020  // typedef declaration.
2021  if (!TI.AlignIsRequired)
2022  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2023 
2024  return ABIAlign;
2025 }
2026 
2027 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2028 /// for __attribute__((aligned)) on this target, to be used if no alignment
2029 /// value is specified.
2032 }
2033 
2034 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2035 /// to a global variable of the specified type.
2037  return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign());
2038 }
2039 
2040 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2041 /// should be given to a global variable of the specified type.
2044 }
2045 
2048  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2049  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2050  Offset += Layout->getBaseClassOffset(Base);
2051  Layout = &getASTRecordLayout(Base);
2052  }
2053  return Offset;
2054 }
2055 
2056 /// DeepCollectObjCIvars -
2057 /// This routine first collects all declared, but not synthesized, ivars in
2058 /// super class and then collects all ivars, including those synthesized for
2059 /// current class. This routine is used for implementation of current class
2060 /// when all ivars, declared and synthesized are known.
2061 ///
2063  bool leafClass,
2064  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2065  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2066  DeepCollectObjCIvars(SuperClass, false, Ivars);
2067  if (!leafClass) {
2068  for (const auto *I : OI->ivars())
2069  Ivars.push_back(I);
2070  } else {
2071  ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2072  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2073  Iv= Iv->getNextIvar())
2074  Ivars.push_back(Iv);
2075  }
2076 }
2077 
2078 /// CollectInheritedProtocols - Collect all protocols in current class and
2079 /// those inherited by it.
2081  llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2082  if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2083  // We can use protocol_iterator here instead of
2084  // all_referenced_protocol_iterator since we are walking all categories.
2085  for (auto *Proto : OI->all_referenced_protocols()) {
2086  CollectInheritedProtocols(Proto, Protocols);
2087  }
2088 
2089  // Categories of this Interface.
2090  for (const auto *Cat : OI->visible_categories())
2091  CollectInheritedProtocols(Cat, Protocols);
2092 
2093  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2094  while (SD) {
2095  CollectInheritedProtocols(SD, Protocols);
2096  SD = SD->getSuperClass();
2097  }
2098  } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2099  for (auto *Proto : OC->protocols()) {
2100  CollectInheritedProtocols(Proto, Protocols);
2101  }
2102  } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2103  // Insert the protocol.
2104  if (!Protocols.insert(
2105  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2106  return;
2107 
2108  for (auto *Proto : OP->protocols())
2109  CollectInheritedProtocols(Proto, Protocols);
2110  }
2111 }
2112 
2114  unsigned count = 0;
2115  // Count ivars declared in class extension.
2116  for (const auto *Ext : OI->known_extensions())
2117  count += Ext->ivar_size();
2118 
2119  // Count ivar defined in this class's implementation. This
2120  // includes synthesized ivars.
2121  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2122  count += ImplDecl->ivar_size();
2123 
2124  return count;
2125 }
2126 
2128  if (!E)
2129  return false;
2130 
2131  // nullptr_t is always treated as null.
2132  if (E->getType()->isNullPtrType()) return true;
2133 
2134  if (E->getType()->isAnyPointerType() &&
2137  return true;
2138 
2139  // Unfortunately, __null has type 'int'.
2140  if (isa<GNUNullExpr>(E)) return true;
2141 
2142  return false;
2143 }
2144 
2145 /// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
2147  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2148  I = ObjCImpls.find(D);
2149  if (I != ObjCImpls.end())
2150  return cast<ObjCImplementationDecl>(I->second);
2151  return nullptr;
2152 }
2153 /// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
2155  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2156  I = ObjCImpls.find(D);
2157  if (I != ObjCImpls.end())
2158  return cast<ObjCCategoryImplDecl>(I->second);
2159  return nullptr;
2160 }
2161 
2162 /// \brief Set the implementation of ObjCInterfaceDecl.
2164  ObjCImplementationDecl *ImplD) {
2165  assert(IFaceD && ImplD && "Passed null params");
2166  ObjCImpls[IFaceD] = ImplD;
2167 }
2168 /// \brief Set the implementation of ObjCCategoryDecl.
2170  ObjCCategoryImplDecl *ImplD) {
2171  assert(CatD && ImplD && "Passed null params");
2172  ObjCImpls[CatD] = ImplD;
2173 }
2174 
2175 const ObjCMethodDecl *
2177  return ObjCMethodRedecls.lookup(MD);
2178 }
2179 
2181  const ObjCMethodDecl *Redecl) {
2182  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2183  ObjCMethodRedecls[MD] = Redecl;
2184 }
2185 
2187  const NamedDecl *ND) const {
2188  if (const ObjCInterfaceDecl *ID =
2189  dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2190  return ID;
2191  if (const ObjCCategoryDecl *CD =
2192  dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2193  return CD->getClassInterface();
2194  if (const ObjCImplDecl *IMD =
2195  dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2196  return IMD->getClassInterface();
2197 
2198  return nullptr;
2199 }
2200 
2201 /// \brief Get the copy initialization expression of VarDecl,or NULL if
2202 /// none exists.
2204  assert(VD && "Passed null params");
2205  assert(VD->hasAttr<BlocksAttr>() &&
2206  "getBlockVarCopyInits - not __block var");
2207  llvm::DenseMap<const VarDecl*, Expr*>::iterator
2208  I = BlockVarCopyInits.find(VD);
2209  return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : nullptr;
2210 }
2211 
2212 /// \brief Set the copy inialization expression of a block var decl.
2214  assert(VD && Init && "Passed null params");
2215  assert(VD->hasAttr<BlocksAttr>() &&
2216  "setBlockVarCopyInits - not __block var");
2217  BlockVarCopyInits[VD] = Init;
2218 }
2219 
2221  unsigned DataSize) const {
2222  if (!DataSize)
2223  DataSize = TypeLoc::getFullDataSizeForType(T);
2224  else
2225  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2226  "incorrect data size provided to CreateTypeSourceInfo!");
2227 
2228  TypeSourceInfo *TInfo =
2229  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2230  new (TInfo) TypeSourceInfo(T);
2231  return TInfo;
2232 }
2233 
2235  SourceLocation L) const {
2237  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2238  return DI;
2239 }
2240 
2241 const ASTRecordLayout &
2243  return getObjCLayout(D, nullptr);
2244 }
2245 
2246 const ASTRecordLayout &
2248  const ObjCImplementationDecl *D) const {
2249  return getObjCLayout(D->getClassInterface(), D);
2250 }
2251 
2252 //===----------------------------------------------------------------------===//
2253 // Type creation/memoization methods
2254 //===----------------------------------------------------------------------===//
2255 
2256 QualType
2257 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2258  unsigned fastQuals = quals.getFastQualifiers();
2259  quals.removeFastQualifiers();
2260 
2261  // Check if we've already instantiated this type.
2262  llvm::FoldingSetNodeID ID;
2263  ExtQuals::Profile(ID, baseType, quals);
2264  void *insertPos = nullptr;
2265  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2266  assert(eq->getQualifiers() == quals);
2267  return QualType(eq, fastQuals);
2268  }
2269 
2270  // If the base type is not canonical, make the appropriate canonical type.
2271  QualType canon;
2272  if (!baseType->isCanonicalUnqualified()) {
2273  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2274  canonSplit.Quals.addConsistentQualifiers(quals);
2275  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
2276 
2277  // Re-find the insert position.
2278  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
2279  }
2280 
2281  ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
2282  ExtQualNodes.InsertNode(eq, insertPos);
2283  return QualType(eq, fastQuals);
2284 }
2285 
2287  LangAS AddressSpace) const {
2288  QualType CanT = getCanonicalType(T);
2289  if (CanT.getAddressSpace() == AddressSpace)
2290  return T;
2291 
2292  // If we are composing extended qualifiers together, merge together
2293  // into one ExtQuals node.
2294  QualifierCollector Quals;
2295  const Type *TypeNode = Quals.strip(T);
2296 
2297  // If this type already has an address space specified, it cannot get
2298  // another one.
2299  assert(!Quals.hasAddressSpace() &&
2300  "Type cannot be in multiple addr spaces!");
2301  Quals.addAddressSpace(AddressSpace);
2302 
2303  return getExtQualType(TypeNode, Quals);
2304 }
2305 
2307  // If we are composing extended qualifiers together, merge together
2308  // into one ExtQuals node.
2309  QualifierCollector Quals;
2310  const Type *TypeNode = Quals.strip(T);
2311 
2312  // If the qualifier doesn't have an address space just return it.
2313  if (!Quals.hasAddressSpace())
2314  return T;
2315 
2316  Quals.removeAddressSpace();
2317 
2318  // Removal of the address space can mean there are no longer any
2319  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
2320  // or required.
2321  if (Quals.hasNonFastQualifiers())
2322  return getExtQualType(TypeNode, Quals);
2323  else
2324  return QualType(TypeNode, Quals.getFastQualifiers());
2325 }
2326 
2328  Qualifiers::GC GCAttr) const {
2329  QualType CanT = getCanonicalType(T);
2330  if (CanT.getObjCGCAttr() == GCAttr)
2331  return T;
2332 
2333  if (const PointerType *ptr = T->getAs<PointerType>()) {
2334  QualType Pointee = ptr->getPointeeType();
2335  if (Pointee->isAnyPointerType()) {
2336  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
2337  return getPointerType(ResultType);
2338  }
2339  }
2340 
2341  // If we are composing extended qualifiers together, merge together
2342  // into one ExtQuals node.
2343  QualifierCollector Quals;
2344  const Type *TypeNode = Quals.strip(T);
2345 
2346  // If this type already has an ObjCGC specified, it cannot get
2347  // another one.
2348  assert(!Quals.hasObjCGCAttr() &&
2349  "Type cannot have multiple ObjCGCs!");
2350  Quals.addObjCGCAttr(GCAttr);
2351 
2352  return getExtQualType(TypeNode, Quals);
2353 }
2354 
2356  FunctionType::ExtInfo Info) {
2357  if (T->getExtInfo() == Info)
2358  return T;
2359 
2360  QualType Result;
2361  if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
2362  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
2363  } else {
2364  const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
2366  EPI.ExtInfo = Info;
2367  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
2368  }
2369 
2370  return cast<FunctionType>(Result.getTypePtr());
2371 }
2372 
2374  QualType ResultType) {
2375  FD = FD->getMostRecentDecl();
2376  while (true) {
2377  const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
2379  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
2380  if (FunctionDecl *Next = FD->getPreviousDecl())
2381  FD = Next;
2382  else
2383  break;
2384  }
2386  L->DeducedReturnType(FD, ResultType);
2387 }
2388 
2389 /// Get a function type and produce the equivalent function type with the
2390 /// specified exception specification. Type sugar that can be present on a
2391 /// declaration of a function with an exception specification is permitted
2392 /// and preserved. Other type sugar (for instance, typedefs) is not.
2394  ASTContext &Context, QualType Orig,
2396  // Might have some parens.
2397  if (auto *PT = dyn_cast<ParenType>(Orig))
2398  return Context.getParenType(
2399  getFunctionTypeWithExceptionSpec(Context, PT->getInnerType(), ESI));
2400 
2401  // Might have a calling-convention attribute.
2402  if (auto *AT = dyn_cast<AttributedType>(Orig))
2403  return Context.getAttributedType(
2404  AT->getAttrKind(),
2405  getFunctionTypeWithExceptionSpec(Context, AT->getModifiedType(), ESI),
2406  getFunctionTypeWithExceptionSpec(Context, AT->getEquivalentType(),
2407  ESI));
2408 
2409  // Anything else must be a function type. Rebuild it with the new exception
2410  // specification.
2411  const FunctionProtoType *Proto = cast<FunctionProtoType>(Orig);
2412  return Context.getFunctionType(
2413  Proto->getReturnType(), Proto->getParamTypes(),
2414  Proto->getExtProtoInfo().withExceptionSpec(ESI));
2415 }
2416 
2418  QualType U) {
2419  return hasSameType(T, U) ||
2420  (getLangOpts().CPlusPlus1z &&
2423 }
2424 
2427  bool AsWritten) {
2428  // Update the type.
2429  QualType Updated =
2430  getFunctionTypeWithExceptionSpec(*this, FD->getType(), ESI);
2431  FD->setType(Updated);
2432 
2433  if (!AsWritten)
2434  return;
2435 
2436  // Update the type in the type source information too.
2437  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
2438  // If the type and the type-as-written differ, we may need to update
2439  // the type-as-written too.
2440  if (TSInfo->getType() != FD->getType())
2441  Updated = getFunctionTypeWithExceptionSpec(*this, TSInfo->getType(), ESI);
2442 
2443  // FIXME: When we get proper type location information for exceptions,
2444  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
2445  // up the TypeSourceInfo;
2446  assert(TypeLoc::getFullDataSizeForType(Updated) ==
2447  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
2448  "TypeLoc size mismatch from updating exception specification");
2449  TSInfo->overrideType(Updated);
2450  }
2451 }
2452 
2453 /// getComplexType - Return the uniqued reference to the type for a complex
2454 /// number with the specified element type.
2456  // Unique pointers, to guarantee there is only one pointer of a particular
2457  // structure.
2458  llvm::FoldingSetNodeID ID;
2459  ComplexType::Profile(ID, T);
2460 
2461  void *InsertPos = nullptr;
2462  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
2463  return QualType(CT, 0);
2464 
2465  // If the pointee type isn't canonical, this won't be a canonical type either,
2466  // so fill in the canonical type field.
2467  QualType Canonical;
2468  if (!T.isCanonical()) {
2469  Canonical = getComplexType(getCanonicalType(T));
2470 
2471  // Get the new insert position for the node we care about.
2472  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
2473  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2474  }
2475  ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
2476  Types.push_back(New);
2477  ComplexTypes.InsertNode(New, InsertPos);
2478  return QualType(New, 0);
2479 }
2480 
2481 /// getPointerType - Return the uniqued reference to the type for a pointer to
2482 /// the specified type.
2484  // Unique pointers, to guarantee there is only one pointer of a particular
2485  // structure.
2486  llvm::FoldingSetNodeID ID;
2487  PointerType::Profile(ID, T);
2488 
2489  void *InsertPos = nullptr;
2490  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2491  return QualType(PT, 0);
2492 
2493  // If the pointee type isn't canonical, this won't be a canonical type either,
2494  // so fill in the canonical type field.
2495  QualType Canonical;
2496  if (!T.isCanonical()) {
2497  Canonical = getPointerType(getCanonicalType(T));
2498 
2499  // Get the new insert position for the node we care about.
2500  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2501  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2502  }
2503  PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical);
2504  Types.push_back(New);
2505  PointerTypes.InsertNode(New, InsertPos);
2506  return QualType(New, 0);
2507 }
2508 
2510  llvm::FoldingSetNodeID ID;
2511  AdjustedType::Profile(ID, Orig, New);
2512  void *InsertPos = nullptr;
2513  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2514  if (AT)
2515  return QualType(AT, 0);
2516 
2517  QualType Canonical = getCanonicalType(New);
2518 
2519  // Get the new insert position for the node we care about.
2520  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2521  assert(!AT && "Shouldn't be in the map!");
2522 
2523  AT = new (*this, TypeAlignment)
2524  AdjustedType(Type::Adjusted, Orig, New, Canonical);
2525  Types.push_back(AT);
2526  AdjustedTypes.InsertNode(AT, InsertPos);
2527  return QualType(AT, 0);
2528 }
2529 
2531  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
2532 
2533  QualType Decayed;
2534 
2535  // C99 6.7.5.3p7:
2536  // A declaration of a parameter as "array of type" shall be
2537  // adjusted to "qualified pointer to type", where the type
2538  // qualifiers (if any) are those specified within the [ and ] of
2539  // the array type derivation.
2540  if (T->isArrayType())
2541  Decayed = getArrayDecayedType(T);
2542 
2543  // C99 6.7.5.3p8:
2544  // A declaration of a parameter as "function returning type"
2545  // shall be adjusted to "pointer to function returning type", as
2546  // in 6.3.2.1.
2547  if (T->isFunctionType())
2548  Decayed = getPointerType(T);
2549 
2550  llvm::FoldingSetNodeID ID;
2551  AdjustedType::Profile(ID, T, Decayed);
2552  void *InsertPos = nullptr;
2553  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2554  if (AT)
2555  return QualType(AT, 0);
2556 
2557  QualType Canonical = getCanonicalType(Decayed);
2558 
2559  // Get the new insert position for the node we care about.
2560  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2561  assert(!AT && "Shouldn't be in the map!");
2562 
2563  AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
2564  Types.push_back(AT);
2565  AdjustedTypes.InsertNode(AT, InsertPos);
2566  return QualType(AT, 0);
2567 }
2568 
2569 /// getBlockPointerType - Return the uniqued reference to the type for
2570 /// a pointer to the specified block.
2572  assert(T->isFunctionType() && "block of function types only");
2573  // Unique pointers, to guarantee there is only one block of a particular
2574  // structure.
2575  llvm::FoldingSetNodeID ID;
2577 
2578  void *InsertPos = nullptr;
2579  if (BlockPointerType *PT =
2580  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2581  return QualType(PT, 0);
2582 
2583  // If the block pointee type isn't canonical, this won't be a canonical
2584  // type either so fill in the canonical type field.
2585  QualType Canonical;
2586  if (!T.isCanonical()) {
2587  Canonical = getBlockPointerType(getCanonicalType(T));
2588 
2589  // Get the new insert position for the node we care about.
2590  BlockPointerType *NewIP =
2591  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2592  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2593  }
2594  BlockPointerType *New
2595  = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
2596  Types.push_back(New);
2597  BlockPointerTypes.InsertNode(New, InsertPos);
2598  return QualType(New, 0);
2599 }
2600 
2601 /// getLValueReferenceType - Return the uniqued reference to the type for an
2602 /// lvalue reference to the specified type.
2603 QualType
2604 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
2605  assert(getCanonicalType(T) != OverloadTy &&
2606  "Unresolved overloaded function type");
2607 
2608  // Unique pointers, to guarantee there is only one pointer of a particular
2609  // structure.
2610  llvm::FoldingSetNodeID ID;
2611  ReferenceType::Profile(ID, T, SpelledAsLValue);
2612 
2613  void *InsertPos = nullptr;
2614  if (LValueReferenceType *RT =
2615  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
2616  return QualType(RT, 0);
2617 
2618  const ReferenceType *InnerRef = T->getAs<ReferenceType>();
2619 
2620  // If the referencee type isn't canonical, this won't be a canonical type
2621  // either, so fill in the canonical type field.
2622  QualType Canonical;
2623  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
2624  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
2625  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
2626 
2627  // Get the new insert position for the node we care about.
2628  LValueReferenceType *NewIP =
2629  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
2630  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2631  }
2632 
2633  LValueReferenceType *New
2634  = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
2635  SpelledAsLValue);
2636  Types.push_back(New);
2637  LValueReferenceTypes.InsertNode(New, InsertPos);
2638 
2639  return QualType(New, 0);
2640 }
2641 
2642 /// getRValueReferenceType - Return the uniqued reference to the type for an
2643 /// rvalue reference to the specified type.
2645  // Unique pointers, to guarantee there is only one pointer of a particular
2646  // structure.
2647  llvm::FoldingSetNodeID ID;
2648  ReferenceType::Profile(ID, T, false);
2649 
2650  void *InsertPos = nullptr;
2651  if (RValueReferenceType *RT =
2652  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
2653  return QualType(RT, 0);
2654 
2655  const ReferenceType *InnerRef = T->getAs<ReferenceType>();
2656 
2657  // If the referencee type isn't canonical, this won't be a canonical type
2658  // either, so fill in the canonical type field.
2659  QualType Canonical;
2660  if (InnerRef || !T.isCanonical()) {
2661  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
2662  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
2663 
2664  // Get the new insert position for the node we care about.
2665  RValueReferenceType *NewIP =
2666  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
2667  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2668  }
2669 
2670  RValueReferenceType *New
2671  = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
2672  Types.push_back(New);
2673  RValueReferenceTypes.InsertNode(New, InsertPos);
2674  return QualType(New, 0);
2675 }
2676 
2677 /// getMemberPointerType - Return the uniqued reference to the type for a
2678 /// member pointer to the specified type, in the specified class.
2680  // Unique pointers, to guarantee there is only one pointer of a particular
2681  // structure.
2682  llvm::FoldingSetNodeID ID;
2683  MemberPointerType::Profile(ID, T, Cls);
2684 
2685  void *InsertPos = nullptr;
2686  if (MemberPointerType *PT =
2687  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2688  return QualType(PT, 0);
2689 
2690  // If the pointee or class type isn't canonical, this won't be a canonical
2691  // type either, so fill in the canonical type field.
2692  QualType Canonical;
2693  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
2695 
2696  // Get the new insert position for the node we care about.
2697  MemberPointerType *NewIP =
2698  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2699  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2700  }
2701  MemberPointerType *New
2702  = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
2703  Types.push_back(New);
2704  MemberPointerTypes.InsertNode(New, InsertPos);
2705  return QualType(New, 0);
2706 }
2707 
2708 /// getConstantArrayType - Return the unique reference to the type for an
2709 /// array of the specified element type.
2711  const llvm::APInt &ArySizeIn,
2713  unsigned IndexTypeQuals) const {
2714  assert((EltTy->isDependentType() ||
2715  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
2716  "Constant array of VLAs is illegal!");
2717 
2718  // Convert the array size into a canonical width matching the pointer size for
2719  // the target.
2720  llvm::APInt ArySize(ArySizeIn);
2721  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
2722 
2723  llvm::FoldingSetNodeID ID;
2724  ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
2725 
2726  void *InsertPos = nullptr;
2727  if (ConstantArrayType *ATP =
2728  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
2729  return QualType(ATP, 0);
2730 
2731  // If the element type isn't canonical or has qualifiers, this won't
2732  // be a canonical type either, so fill in the canonical type field.
2733  QualType Canon;
2734  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
2735  SplitQualType canonSplit = getCanonicalType(EltTy).split();
2736  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
2737  ASM, IndexTypeQuals);
2738  Canon = getQualifiedType(Canon, canonSplit.Quals);
2739 
2740  // Get the new insert position for the node we care about.
2741  ConstantArrayType *NewIP =
2742  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
2743  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2744  }
2745 
2746  ConstantArrayType *New = new(*this,TypeAlignment)
2747  ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
2748  ConstantArrayTypes.InsertNode(New, InsertPos);
2749  Types.push_back(New);
2750  return QualType(New, 0);
2751 }
2752 
2753 /// getVariableArrayDecayedType - Turns the given type, which may be
2754 /// variably-modified, into the corresponding type with all the known
2755 /// sizes replaced with [*].
2757  // Vastly most common case.
2758  if (!type->isVariablyModifiedType()) return type;
2759 
2760  QualType result;
2761 
2762  SplitQualType split = type.getSplitDesugaredType();
2763  const Type *ty = split.Ty;
2764  switch (ty->getTypeClass()) {
2765 #define TYPE(Class, Base)
2766 #define ABSTRACT_TYPE(Class, Base)
2767 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
2768 #include "clang/AST/TypeNodes.def"
2769  llvm_unreachable("didn't desugar past all non-canonical types?");
2770 
2771  // These types should never be variably-modified.
2772  case Type::Builtin:
2773  case Type::Complex:
2774  case Type::Vector:
2775  case Type::ExtVector:
2776  case Type::DependentSizedExtVector:
2777  case Type::DependentAddressSpace:
2778  case Type::ObjCObject:
2779  case Type::ObjCInterface:
2780  case Type::ObjCObjectPointer:
2781  case Type::Record:
2782  case Type::Enum:
2783  case Type::UnresolvedUsing:
2784  case Type::TypeOfExpr:
2785  case Type::TypeOf:
2786  case Type::Decltype:
2787  case Type::UnaryTransform:
2788  case Type::DependentName:
2789  case Type::InjectedClassName:
2790  case Type::TemplateSpecialization:
2791  case Type::DependentTemplateSpecialization:
2792  case Type::TemplateTypeParm:
2793  case Type::SubstTemplateTypeParmPack:
2794  case Type::Auto:
2795  case Type::DeducedTemplateSpecialization:
2796  case Type::PackExpansion:
2797  llvm_unreachable("type should never be variably-modified");
2798 
2799  // These types can be variably-modified but should never need to
2800  // further decay.
2801  case Type::FunctionNoProto:
2802  case Type::FunctionProto:
2803  case Type::BlockPointer:
2804  case Type::MemberPointer:
2805  case Type::Pipe:
2806  return type;
2807 
2808  // These types can be variably-modified. All these modifications
2809  // preserve structure except as noted by comments.
2810  // TODO: if we ever care about optimizing VLAs, there are no-op
2811  // optimizations available here.
2812  case Type::Pointer:
2814  cast<PointerType>(ty)->getPointeeType()));
2815  break;
2816 
2817  case Type::LValueReference: {
2818  const LValueReferenceType *lv = cast<LValueReferenceType>(ty);
2819  result = getLValueReferenceType(
2821  lv->isSpelledAsLValue());
2822  break;
2823  }
2824 
2825  case Type::RValueReference: {
2826  const RValueReferenceType *lv = cast<RValueReferenceType>(ty);
2827  result = getRValueReferenceType(
2829  break;
2830  }
2831 
2832  case Type::Atomic: {
2833  const AtomicType *at = cast<AtomicType>(ty);
2835  break;
2836  }
2837 
2838  case Type::ConstantArray: {
2839  const ConstantArrayType *cat = cast<ConstantArrayType>(ty);
2840  result = getConstantArrayType(
2842  cat->getSize(),
2843  cat->getSizeModifier(),
2844  cat->getIndexTypeCVRQualifiers());
2845  break;
2846  }
2847 
2848  case Type::DependentSizedArray: {
2849  const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty);
2850  result = getDependentSizedArrayType(
2852  dat->getSizeExpr(),
2853  dat->getSizeModifier(),
2855  dat->getBracketsRange());
2856  break;
2857  }
2858 
2859  // Turn incomplete types into [*] types.
2860  case Type::IncompleteArray: {
2861  const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty);
2862  result = getVariableArrayType(
2864  /*size*/ nullptr,
2867  SourceRange());
2868  break;
2869  }
2870 
2871  // Turn VLA types into [*] types.
2872  case Type::VariableArray: {
2873  const VariableArrayType *vat = cast<VariableArrayType>(ty);
2874  result = getVariableArrayType(
2876  /*size*/ nullptr,
2879  vat->getBracketsRange());
2880  break;
2881  }
2882  }
2883 
2884  // Apply the top-level qualifiers from the original.
2885  return getQualifiedType(result, split.Quals);
2886 }
2887 
2888 /// getVariableArrayType - Returns a non-unique reference to the type for a
2889 /// variable array of the specified element type.
2891  Expr *NumElts,
2893  unsigned IndexTypeQuals,
2894  SourceRange Brackets) const {
2895  // Since we don't unique expressions, it isn't possible to unique VLA's
2896  // that have an expression provided for their size.
2897  QualType Canon;
2898 
2899  // Be sure to pull qualifiers off the element type.
2900  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
2901  SplitQualType canonSplit = getCanonicalType(EltTy).split();
2902  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
2903  IndexTypeQuals, Brackets);
2904  Canon = getQualifiedType(Canon, canonSplit.Quals);
2905  }
2906 
2907  VariableArrayType *New = new(*this, TypeAlignment)
2908  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
2909 
2910  VariableArrayTypes.push_back(New);
2911  Types.push_back(New);
2912  return QualType(New, 0);
2913 }
2914 
2915 /// getDependentSizedArrayType - Returns a non-unique reference to
2916 /// the type for a dependently-sized array of the specified element
2917 /// type.
2919  Expr *numElements,
2921  unsigned elementTypeQuals,
2922  SourceRange brackets) const {
2923  assert((!numElements || numElements->isTypeDependent() ||
2924  numElements->isValueDependent()) &&
2925  "Size must be type- or value-dependent!");
2926 
2927  // Dependently-sized array types that do not have a specified number
2928  // of elements will have their sizes deduced from a dependent
2929  // initializer. We do no canonicalization here at all, which is okay
2930  // because they can't be used in most locations.
2931  if (!numElements) {
2932  DependentSizedArrayType *newType
2933  = new (*this, TypeAlignment)
2934  DependentSizedArrayType(*this, elementType, QualType(),
2935  numElements, ASM, elementTypeQuals,
2936  brackets);
2937  Types.push_back(newType);
2938  return QualType(newType, 0);
2939  }
2940 
2941  // Otherwise, we actually build a new type every time, but we
2942  // also build a canonical type.
2943 
2944  SplitQualType canonElementType = getCanonicalType(elementType).split();
2945 
2946  void *insertPos = nullptr;
2947  llvm::FoldingSetNodeID ID;
2949  QualType(canonElementType.Ty, 0),
2950  ASM, elementTypeQuals, numElements);
2951 
2952  // Look for an existing type with these properties.
2953  DependentSizedArrayType *canonTy =
2954  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
2955 
2956  // If we don't have one, build one.
2957  if (!canonTy) {
2958  canonTy = new (*this, TypeAlignment)
2959  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
2960  QualType(), numElements, ASM, elementTypeQuals,
2961  brackets);
2962  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
2963  Types.push_back(canonTy);
2964  }
2965 
2966  // Apply qualifiers from the element type to the array.
2967  QualType canon = getQualifiedType(QualType(canonTy,0),
2968  canonElementType.Quals);
2969 
2970  // If we didn't need extra canonicalization for the element type or the size
2971  // expression, then just use that as our result.
2972  if (QualType(canonElementType.Ty, 0) == elementType &&
2973  canonTy->getSizeExpr() == numElements)
2974  return canon;
2975 
2976  // Otherwise, we need to build a type which follows the spelling
2977  // of the element type.
2978  DependentSizedArrayType *sugaredType
2979  = new (*this, TypeAlignment)
2980  DependentSizedArrayType(*this, elementType, canon, numElements,
2981  ASM, elementTypeQuals, brackets);
2982  Types.push_back(sugaredType);
2983  return QualType(sugaredType, 0);
2984 }
2985 
2988  unsigned elementTypeQuals) const {
2989  llvm::FoldingSetNodeID ID;
2990  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
2991 
2992  void *insertPos = nullptr;
2993  if (IncompleteArrayType *iat =
2994  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
2995  return QualType(iat, 0);
2996 
2997  // If the element type isn't canonical, this won't be a canonical type
2998  // either, so fill in the canonical type field. We also have to pull
2999  // qualifiers off the element type.
3000  QualType canon;
3001 
3002  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3003  SplitQualType canonSplit = getCanonicalType(elementType).split();
3004  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3005  ASM, elementTypeQuals);
3006  canon = getQualifiedType(canon, canonSplit.Quals);
3007 
3008  // Get the new insert position for the node we care about.
3009  IncompleteArrayType *existing =
3010  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3011  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3012  }
3013 
3014  IncompleteArrayType *newType = new (*this, TypeAlignment)
3015  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3016 
3017  IncompleteArrayTypes.InsertNode(newType, insertPos);
3018  Types.push_back(newType);
3019  return QualType(newType, 0);
3020 }
3021 
3022 /// getVectorType - Return the unique reference to a vector type of
3023 /// the specified element type and size. VectorType must be a built-in type.
3024 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
3025  VectorType::VectorKind VecKind) const {
3026  assert(vecType->isBuiltinType());
3027 
3028  // Check if we've already instantiated a vector of this type.
3029  llvm::FoldingSetNodeID ID;
3030  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
3031 
3032  void *InsertPos = nullptr;
3033  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3034  return QualType(VTP, 0);
3035 
3036  // If the element type isn't canonical, this won't be a canonical type either,
3037  // so fill in the canonical type field.
3038  QualType Canonical;
3039  if (!vecType.isCanonical()) {
3040  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
3041 
3042  // Get the new insert position for the node we care about.
3043  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3044  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3045  }
3046  VectorType *New = new (*this, TypeAlignment)
3047  VectorType(vecType, NumElts, Canonical, VecKind);
3048  VectorTypes.InsertNode(New, InsertPos);
3049  Types.push_back(New);
3050  return QualType(New, 0);
3051 }
3052 
3053 /// getExtVectorType - Return the unique reference to an extended vector type of
3054 /// the specified element type and size. VectorType must be a built-in type.
3055 QualType
3056 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
3057  assert(vecType->isBuiltinType() || vecType->isDependentType());
3058 
3059  // Check if we've already instantiated a vector of this type.
3060  llvm::FoldingSetNodeID ID;
3061  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
3063  void *InsertPos = nullptr;
3064  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3065  return QualType(VTP, 0);
3066 
3067  // If the element type isn't canonical, this won't be a canonical type either,
3068  // so fill in the canonical type field.
3069  QualType Canonical;
3070  if (!vecType.isCanonical()) {
3071  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
3072 
3073  // Get the new insert position for the node we care about.
3074  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3075  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3076  }
3077  ExtVectorType *New = new (*this, TypeAlignment)
3078  ExtVectorType(vecType, NumElts, Canonical);
3079  VectorTypes.InsertNode(New, InsertPos);
3080  Types.push_back(New);
3081  return QualType(New, 0);
3082 }
3083 
3084 QualType
3086  Expr *SizeExpr,
3087  SourceLocation AttrLoc) const {
3088  llvm::FoldingSetNodeID ID;
3090  SizeExpr);
3091 
3092  void *InsertPos = nullptr;
3094  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3096  if (Canon) {
3097  // We already have a canonical version of this array type; use it as
3098  // the canonical type for a newly-built type.
3099  New = new (*this, TypeAlignment)
3100  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
3101  SizeExpr, AttrLoc);
3102  } else {
3103  QualType CanonVecTy = getCanonicalType(vecType);
3104  if (CanonVecTy == vecType) {
3105  New = new (*this, TypeAlignment)
3106  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
3107  AttrLoc);
3108 
3109  DependentSizedExtVectorType *CanonCheck
3110  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3111  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
3112  (void)CanonCheck;
3113  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
3114  } else {
3115  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3116  SourceLocation());
3117  New = new (*this, TypeAlignment)
3118  DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
3119  }
3120  }
3121 
3122  Types.push_back(New);
3123  return QualType(New, 0);
3124 }
3125 
3127  Expr *AddrSpaceExpr,
3128  SourceLocation AttrLoc) const {
3129  assert(AddrSpaceExpr->isInstantiationDependent());
3130 
3131  QualType canonPointeeType = getCanonicalType(PointeeType);
3132 
3133  void *insertPos = nullptr;
3134  llvm::FoldingSetNodeID ID;
3135  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
3136  AddrSpaceExpr);
3137 
3138  DependentAddressSpaceType *canonTy =
3139  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
3140 
3141  if (!canonTy) {
3142  canonTy = new (*this, TypeAlignment)
3143  DependentAddressSpaceType(*this, canonPointeeType,
3144  QualType(), AddrSpaceExpr, AttrLoc);
3145  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
3146  Types.push_back(canonTy);
3147  }
3148 
3149  if (canonPointeeType == PointeeType &&
3150  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
3151  return QualType(canonTy, 0);
3152 
3153  DependentAddressSpaceType *sugaredType
3154  = new (*this, TypeAlignment)
3155  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
3156  AddrSpaceExpr, AttrLoc);
3157  Types.push_back(sugaredType);
3158  return QualType(sugaredType, 0);
3159 }
3160 
3161 /// \brief Determine whether \p T is canonical as the result type of a function.
3163  return T.isCanonical() &&
3166 }
3167 
3168 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
3169 ///
3170 QualType
3172  const FunctionType::ExtInfo &Info) const {
3173  // Unique functions, to guarantee there is only one function of a particular
3174  // structure.
3175  llvm::FoldingSetNodeID ID;
3176  FunctionNoProtoType::Profile(ID, ResultTy, Info);
3177 
3178  void *InsertPos = nullptr;
3179  if (FunctionNoProtoType *FT =
3180  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
3181  return QualType(FT, 0);
3182 
3183  QualType Canonical;
3184  if (!isCanonicalResultType(ResultTy)) {
3185  Canonical =
3187 
3188  // Get the new insert position for the node we care about.
3189  FunctionNoProtoType *NewIP =
3190  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3191  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3192  }
3193 
3194  FunctionNoProtoType *New = new (*this, TypeAlignment)
3195  FunctionNoProtoType(ResultTy, Canonical, Info);
3196  Types.push_back(New);
3197  FunctionNoProtoTypes.InsertNode(New, InsertPos);
3198  return QualType(New, 0);
3199 }
3200 
3203  CanQualType CanResultType = getCanonicalType(ResultType);
3204 
3205  // Canonical result types do not have ARC lifetime qualifiers.
3206  if (CanResultType.getQualifiers().hasObjCLifetime()) {
3207  Qualifiers Qs = CanResultType.getQualifiers();
3208  Qs.removeObjCLifetime();
3210  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
3211  }
3212 
3213  return CanResultType;
3214 }
3215 
3217  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
3218  if (ESI.Type == EST_None)
3219  return true;
3220  if (!NoexceptInType)
3221  return false;
3222 
3223  // C++17 onwards: exception specification is part of the type, as a simple
3224  // boolean "can this function type throw".
3225  if (ESI.Type == EST_BasicNoexcept)
3226  return true;
3227 
3228  // A dynamic exception specification is canonical if it only contains pack
3229  // expansions (so we can't tell whether it's non-throwing) and all its
3230  // contained types are canonical.
3231  if (ESI.Type == EST_Dynamic) {
3232  bool AnyPackExpansions = false;
3233  for (QualType ET : ESI.Exceptions) {
3234  if (!ET.isCanonical())
3235  return false;
3236  if (ET->getAs<PackExpansionType>())
3237  AnyPackExpansions = true;
3238  }
3239  return AnyPackExpansions;
3240  }
3241 
3242  // A noexcept(expr) specification is (possibly) canonical if expr is
3243  // value-dependent.
3244  if (ESI.Type == EST_ComputedNoexcept)
3245  return ESI.NoexceptExpr && ESI.NoexceptExpr->isValueDependent();
3246 
3247  return false;
3248 }
3249 
3250 QualType ASTContext::getFunctionTypeInternal(
3251  QualType ResultTy, ArrayRef<QualType> ArgArray,
3252  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
3253  size_t NumArgs = ArgArray.size();
3254 
3255  // Unique functions, to guarantee there is only one function of a particular
3256  // structure.
3257  llvm::FoldingSetNodeID ID;
3258  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
3259  *this, true);
3260 
3261  QualType Canonical;
3262  bool Unique = false;
3263 
3264  void *InsertPos = nullptr;
3265  if (FunctionProtoType *FPT =
3266  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
3267  QualType Existing = QualType(FPT, 0);
3268 
3269  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
3270  // it so long as our exception specification doesn't contain a dependent
3271  // noexcept expression, or we're just looking for a canonical type.
3272  // Otherwise, we're going to need to create a type
3273  // sugar node to hold the concrete expression.
3274  if (OnlyWantCanonical || EPI.ExceptionSpec.Type != EST_ComputedNoexcept ||
3275  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
3276  return Existing;
3277 
3278  // We need a new type sugar node for this one, to hold the new noexcept
3279  // expression. We do no canonicalization here, but that's OK since we don't
3280  // expect to see the same noexcept expression much more than once.
3281  Canonical = getCanonicalType(Existing);
3282  Unique = true;
3283  }
3284 
3285  bool NoexceptInType = getLangOpts().CPlusPlus1z;
3286  bool IsCanonicalExceptionSpec =
3287  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
3288 
3289  // Determine whether the type being created is already canonical or not.
3290  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
3291  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
3292  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
3293  if (!ArgArray[i].isCanonicalAsParam())
3294  isCanonical = false;
3295 
3296  if (OnlyWantCanonical)
3297  assert(isCanonical &&
3298  "given non-canonical parameters constructing canonical type");
3299 
3300  // If this type isn't canonical, get the canonical version of it if we don't
3301  // already have it. The exception spec is only partially part of the
3302  // canonical type, and only in C++17 onwards.
3303  if (!isCanonical && Canonical.isNull()) {
3304  SmallVector<QualType, 16> CanonicalArgs;
3305  CanonicalArgs.reserve(NumArgs);
3306  for (unsigned i = 0; i != NumArgs; ++i)
3307  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
3308 
3309  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
3310  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
3311  CanonicalEPI.HasTrailingReturn = false;
3312 
3313  if (IsCanonicalExceptionSpec) {
3314  // Exception spec is already OK.
3315  } else if (NoexceptInType) {
3316  switch (EPI.ExceptionSpec.Type) {
3318  // We don't know yet. It shouldn't matter what we pick here; no-one
3319  // should ever look at this.
3320  LLVM_FALLTHROUGH;
3321  case EST_None: case EST_MSAny:
3322  CanonicalEPI.ExceptionSpec.Type = EST_None;
3323  break;
3324 
3325  // A dynamic exception specification is almost always "not noexcept",
3326  // with the exception that a pack expansion might expand to no types.
3327  case EST_Dynamic: {
3328  bool AnyPacks = false;
3329  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
3330  if (ET->getAs<PackExpansionType>())
3331  AnyPacks = true;
3332  ExceptionTypeStorage.push_back(getCanonicalType(ET));
3333  }
3334  if (!AnyPacks)
3335  CanonicalEPI.ExceptionSpec.Type = EST_None;
3336  else {
3337  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
3338  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
3339  }
3340  break;
3341  }
3342 
3344  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
3345  break;
3346 
3347  case EST_ComputedNoexcept:
3348  llvm::APSInt Value(1);
3349  auto *E = CanonicalEPI.ExceptionSpec.NoexceptExpr;
3350  if (!E || !E->isIntegerConstantExpr(Value, *this, nullptr,
3351  /*IsEvaluated*/false)) {
3352  // This noexcept specification is invalid.
3353  // FIXME: Should this be able to happen?
3354  CanonicalEPI.ExceptionSpec.Type = EST_None;
3355  break;
3356  }
3357 
3358  CanonicalEPI.ExceptionSpec.Type =
3359  Value.getBoolValue() ? EST_BasicNoexcept : EST_None;
3360  break;
3361  }
3362  } else {
3364  }
3365 
3366  // Adjust the canonical function result type.
3367  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
3368  Canonical =
3369  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
3370 
3371  // Get the new insert position for the node we care about.
3372  FunctionProtoType *NewIP =
3373  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3374  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3375  }
3376 
3377  // FunctionProtoType objects are allocated with extra bytes after
3378  // them for three variable size arrays at the end:
3379  // - parameter types
3380  // - exception types
3381  // - extended parameter information
3382  // Instead of the exception types, there could be a noexcept
3383  // expression, or information used to resolve the exception
3384  // specification.
3385  size_t Size = sizeof(FunctionProtoType) +
3386  NumArgs * sizeof(QualType);
3387 
3388  if (EPI.ExceptionSpec.Type == EST_Dynamic) {
3389  Size += EPI.ExceptionSpec.Exceptions.size() * sizeof(QualType);
3390  } else if (EPI.ExceptionSpec.Type == EST_ComputedNoexcept) {
3391  Size += sizeof(Expr*);
3392  } else if (EPI.ExceptionSpec.Type == EST_Uninstantiated) {
3393  Size += 2 * sizeof(FunctionDecl*);
3394  } else if (EPI.ExceptionSpec.Type == EST_Unevaluated) {
3395  Size += sizeof(FunctionDecl*);
3396  }
3397 
3398  // Put the ExtParameterInfos last. If all were equal, it would make
3399  // more sense to put these before the exception specification, because
3400  // it's much easier to skip past them compared to the elaborate switch
3401  // required to skip the exception specification. However, all is not
3402  // equal; ExtParameterInfos are used to model very uncommon features,
3403  // and it's better not to burden the more common paths.
3404  if (EPI.ExtParameterInfos) {
3405  Size += NumArgs * sizeof(FunctionProtoType::ExtParameterInfo);
3406  }
3407 
3409  FunctionProtoType::ExtProtoInfo newEPI = EPI;
3410  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
3411  Types.push_back(FTP);
3412  if (!Unique)
3413  FunctionProtoTypes.InsertNode(FTP, InsertPos);
3414  return QualType(FTP, 0);
3415 }
3416 
3417 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
3418  llvm::FoldingSetNodeID ID;
3419  PipeType::Profile(ID, T, ReadOnly);
3420 
3421  void *InsertPos = 0;
3422  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
3423  return QualType(PT, 0);
3424 
3425  // If the pipe element type isn't canonical, this won't be a canonical type
3426  // either, so fill in the canonical type field.
3427  QualType Canonical;
3428  if (!T.isCanonical()) {
3429  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
3430 
3431  // Get the new insert position for the node we care about.
3432  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
3433  assert(!NewIP && "Shouldn't be in the map!");
3434  (void)NewIP;
3435  }
3436  PipeType *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
3437  Types.push_back(New);
3438  PipeTypes.InsertNode(New, InsertPos);
3439  return QualType(New, 0);
3440 }
3441 
3443  return getPipeType(T, true);
3444 }
3445 
3447  return getPipeType(T, false);
3448 }
3449 
3450 #ifndef NDEBUG
3452  if (!isa<CXXRecordDecl>(D)) return false;
3453  const CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
3454  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
3455  return true;
3456  if (RD->getDescribedClassTemplate() &&
3457  !isa<ClassTemplateSpecializationDecl>(RD))
3458  return true;
3459  return false;
3460 }
3461 #endif
3462 
3463 /// getInjectedClassNameType - Return the unique reference to the
3464 /// injected class name type for the specified templated declaration.
3466  QualType TST) const {
3467  assert(NeedsInjectedClassNameType(Decl));
3468  if (Decl->TypeForDecl) {
3469  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3470  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
3471  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
3472  Decl->TypeForDecl = PrevDecl->TypeForDecl;
3473  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3474  } else {
3475  Type *newType =
3476  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
3477  Decl->TypeForDecl = newType;
3478  Types.push_back(newType);
3479  }
3480  return QualType(Decl->TypeForDecl, 0);
3481 }
3482 
3483 /// getTypeDeclType - Return the unique reference to the type for the
3484 /// specified type declaration.
3485 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
3486  assert(Decl && "Passed null for Decl param");
3487  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
3488 
3489  if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl))
3490  return getTypedefType(Typedef);
3491 
3492  assert(!isa<TemplateTypeParmDecl>(Decl) &&
3493  "Template type parameter types are always available.");
3494 
3495  if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) {
3496  assert(Record->isFirstDecl() && "struct/union has previous declaration");
3497  assert(!NeedsInjectedClassNameType(Record));
3498  return getRecordType(Record);
3499  } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
3500  assert(Enum->isFirstDecl() && "enum has previous declaration");
3501  return getEnumType(Enum);
3502  } else if (const UnresolvedUsingTypenameDecl *Using =
3503  dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
3504  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
3505  Decl->TypeForDecl = newType;
3506  Types.push_back(newType);
3507  } else
3508  llvm_unreachable("TypeDecl without a type?");
3509 
3510  return QualType(Decl->TypeForDecl, 0);
3511 }
3512 
3513 /// getTypedefType - Return the unique reference to the type for the
3514 /// specified typedef name decl.
3515 QualType
3517  QualType Canonical) const {
3518  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3519 
3520  if (Canonical.isNull())
3521  Canonical = getCanonicalType(Decl->getUnderlyingType());
3522  TypedefType *newType = new(*this, TypeAlignment)
3523  TypedefType(Type::Typedef, Decl, Canonical);
3524  Decl->TypeForDecl = newType;
3525  Types.push_back(newType);
3526  return QualType(newType, 0);
3527 }
3528 
3530  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3531 
3532  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
3533  if (PrevDecl->TypeForDecl)
3534  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3535 
3536  RecordType *newType = new (*this, TypeAlignment) RecordType(Decl);
3537  Decl->TypeForDecl = newType;
3538  Types.push_back(newType);
3539  return QualType(newType, 0);
3540 }
3541 
3543  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3544 
3545  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
3546  if (PrevDecl->TypeForDecl)
3547  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3548 
3549  EnumType *newType = new (*this, TypeAlignment) EnumType(Decl);
3550  Decl->TypeForDecl = newType;
3551  Types.push_back(newType);
3552  return QualType(newType, 0);
3553 }
3554 
3556  QualType modifiedType,
3557  QualType equivalentType) {
3558  llvm::FoldingSetNodeID id;
3559  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
3560 
3561  void *insertPos = nullptr;
3562  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
3563  if (type) return QualType(type, 0);
3564 
3565  QualType canon = getCanonicalType(equivalentType);
3566  type = new (*this, TypeAlignment)
3567  AttributedType(canon, attrKind, modifiedType, equivalentType);
3568 
3569  Types.push_back(type);
3570  AttributedTypes.InsertNode(type, insertPos);
3571 
3572  return QualType(type, 0);
3573 }
3574 
3575 /// \brief Retrieve a substitution-result type.
3576 QualType
3578  QualType Replacement) const {
3579  assert(Replacement.isCanonical()
3580  && "replacement types must always be canonical");
3581 
3582  llvm::FoldingSetNodeID ID;
3583  SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
3584  void *InsertPos = nullptr;
3585  SubstTemplateTypeParmType *SubstParm
3586  = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3587 
3588  if (!SubstParm) {
3589  SubstParm = new (*this, TypeAlignment)
3590  SubstTemplateTypeParmType(Parm, Replacement);
3591  Types.push_back(SubstParm);
3592  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
3593  }
3594 
3595  return QualType(SubstParm, 0);
3596 }
3597 
3598 /// \brief Retrieve a
3600  const TemplateTypeParmType *Parm,
3601  const TemplateArgument &ArgPack) {
3602 #ifndef NDEBUG
3603  for (const auto &P : ArgPack.pack_elements()) {
3604  assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
3605  assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
3606  }
3607 #endif
3608 
3609  llvm::FoldingSetNodeID ID;
3610  SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
3611  void *InsertPos = nullptr;
3612  if (SubstTemplateTypeParmPackType *SubstParm
3613  = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
3614  return QualType(SubstParm, 0);
3615 
3616  QualType Canon;
3617  if (!Parm->isCanonicalUnqualified()) {
3618  Canon = getCanonicalType(QualType(Parm, 0));
3619  Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
3620  ArgPack);
3621  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
3622  }
3623 
3625  = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
3626  ArgPack);
3627  Types.push_back(SubstParm);
3628  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
3629  return QualType(SubstParm, 0);
3630 }
3631 
3632 /// \brief Retrieve the template type parameter type for a template
3633 /// parameter or parameter pack with the given depth, index, and (optionally)
3634 /// name.
3636  bool ParameterPack,
3637  TemplateTypeParmDecl *TTPDecl) const {
3638  llvm::FoldingSetNodeID ID;
3639  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
3640  void *InsertPos = nullptr;
3641  TemplateTypeParmType *TypeParm
3642  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3643 
3644  if (TypeParm)
3645  return QualType(TypeParm, 0);
3646 
3647  if (TTPDecl) {
3648  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
3649  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
3650 
3651  TemplateTypeParmType *TypeCheck
3652  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3653  assert(!TypeCheck && "Template type parameter canonical type broken");
3654  (void)TypeCheck;
3655  } else
3656  TypeParm = new (*this, TypeAlignment)
3657  TemplateTypeParmType(Depth, Index, ParameterPack);
3658 
3659  Types.push_back(TypeParm);
3660  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
3661 
3662  return QualType(TypeParm, 0);
3663 }
3664 
3667  SourceLocation NameLoc,
3668  const TemplateArgumentListInfo &Args,
3669  QualType Underlying) const {
3670  assert(!Name.getAsDependentTemplateName() &&
3671  "No dependent template names here!");
3672  QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
3673 
3678  TL.setTemplateNameLoc(NameLoc);
3679  TL.setLAngleLoc(Args.getLAngleLoc());
3680  TL.setRAngleLoc(Args.getRAngleLoc());
3681  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
3682  TL.setArgLocInfo(i, Args[i].getLocInfo());
3683  return DI;
3684 }
3685 
3686 QualType
3688  const TemplateArgumentListInfo &Args,
3689  QualType Underlying) const {
3690  assert(!Template.getAsDependentTemplateName() &&
3691  "No dependent template names here!");
3692 
3694  ArgVec.reserve(Args.size());
3695  for (const TemplateArgumentLoc &Arg : Args.arguments())
3696  ArgVec.push_back(Arg.getArgument());
3697 
3698  return getTemplateSpecializationType(Template, ArgVec, Underlying);
3699 }
3700 
3701 #ifndef NDEBUG
3703  for (const TemplateArgument &Arg : Args)
3704  if (Arg.isPackExpansion())
3705  return true;
3706 
3707  return true;
3708 }
3709 #endif
3710 
3711 QualType
3714  QualType Underlying) const {
3715  assert(!Template.getAsDependentTemplateName() &&
3716  "No dependent template names here!");
3717  // Look through qualified template names.
3718  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
3719  Template = TemplateName(QTN->getTemplateDecl());
3720 
3721  bool IsTypeAlias =
3722  Template.getAsTemplateDecl() &&
3723  isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
3724  QualType CanonType;
3725  if (!Underlying.isNull())
3726  CanonType = getCanonicalType(Underlying);
3727  else {
3728  // We can get here with an alias template when the specialization contains
3729  // a pack expansion that does not match up with a parameter pack.
3730  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
3731  "Caller must compute aliased type");
3732  IsTypeAlias = false;
3733  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
3734  }
3735 
3736  // Allocate the (non-canonical) template specialization type, but don't
3737  // try to unique it: these types typically have location information that
3738  // we don't unique and don't want to lose.
3739  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
3740  sizeof(TemplateArgument) * Args.size() +
3741  (IsTypeAlias? sizeof(QualType) : 0),
3742  TypeAlignment);
3744  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
3745  IsTypeAlias ? Underlying : QualType());
3746 
3747  Types.push_back(Spec);
3748  return QualType(Spec, 0);
3749 }
3750 
3752  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
3753  assert(!Template.getAsDependentTemplateName() &&
3754  "No dependent template names here!");
3755 
3756  // Look through qualified template names.
3757  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
3758  Template = TemplateName(QTN->getTemplateDecl());
3759 
3760  // Build the canonical template specialization type.
3761  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
3763  unsigned NumArgs = Args.size();
3764  CanonArgs.reserve(NumArgs);
3765  for (const TemplateArgument &Arg : Args)
3766  CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
3767 
3768  // Determine whether this canonical template specialization type already
3769  // exists.
3770  llvm::FoldingSetNodeID ID;
3771  TemplateSpecializationType::Profile(ID, CanonTemplate,
3772  CanonArgs, *this);
3773 
3774  void *InsertPos = nullptr;
3776  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
3777 
3778  if (!Spec) {
3779  // Allocate a new canonical template specialization type.
3780  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
3781  sizeof(TemplateArgument) * NumArgs),
3782  TypeAlignment);
3783  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
3784  CanonArgs,
3785  QualType(), QualType());
3786  Types.push_back(Spec);
3787  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
3788  }
3789 
3790  assert(Spec->isDependentType() &&
3791  "Non-dependent template-id type must have a canonical type");
3792  return QualType(Spec, 0);
3793 }
3794 
3795 QualType
3797  NestedNameSpecifier *NNS,
3798  QualType NamedType) const {
3799  llvm::FoldingSetNodeID ID;
3800  ElaboratedType::Profile(ID, Keyword, NNS, NamedType);
3801 
3802  void *InsertPos = nullptr;
3803  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
3804  if (T)
3805  return QualType(T, 0);
3806 
3807  QualType Canon = NamedType;
3808  if (!Canon.isCanonical()) {
3809  Canon = getCanonicalType(NamedType);
3810  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
3811  assert(!CheckT && "Elaborated canonical type broken");
3812  (void)CheckT;
3813  }
3814 
3815  T = new (*this, TypeAlignment) ElaboratedType(Keyword, NNS, NamedType, Canon);
3816  Types.push_back(T);
3817  ElaboratedTypes.InsertNode(T, InsertPos);
3818  return QualType(T, 0);
3819 }
3820 
3821 QualType
3823  llvm::FoldingSetNodeID ID;
3824  ParenType::Profile(ID, InnerType);
3825 
3826  void *InsertPos = nullptr;
3827  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
3828  if (T)
3829  return QualType(T, 0);
3830 
3831  QualType Canon = InnerType;
3832  if (!Canon.isCanonical()) {
3833  Canon = getCanonicalType(InnerType);
3834  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
3835  assert(!CheckT && "Paren canonical type broken");
3836  (void)CheckT;
3837  }
3838 
3839  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
3840  Types.push_back(T);
3841  ParenTypes.InsertNode(T, InsertPos);
3842  return QualType(T, 0);
3843 }
3844 
3846  NestedNameSpecifier *NNS,
3847  const IdentifierInfo *Name,
3848  QualType Canon) const {
3849  if (Canon.isNull()) {
3851  if (CanonNNS != NNS)
3852  Canon = getDependentNameType(Keyword, CanonNNS, Name);
3853  }
3854 
3855  llvm::FoldingSetNodeID ID;
3856  DependentNameType::Profile(ID, Keyword, NNS, Name);
3857 
3858  void *InsertPos = nullptr;
3860  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
3861  if (T)
3862  return QualType(T, 0);
3863 
3864  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
3865  Types.push_back(T);
3866  DependentNameTypes.InsertNode(T, InsertPos);
3867  return QualType(T, 0);
3868 }
3869 
3870 QualType
3872  ElaboratedTypeKeyword Keyword,
3873  NestedNameSpecifier *NNS,
3874  const IdentifierInfo *Name,
3875  const TemplateArgumentListInfo &Args) const {
3876  // TODO: avoid this copy
3878  for (unsigned I = 0, E = Args.size(); I != E; ++I)
3879  ArgCopy.push_back(Args[I].getArgument());
3880  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
3881 }
3882 
3883 QualType
3885  ElaboratedTypeKeyword Keyword,
3886  NestedNameSpecifier *NNS,
3887  const IdentifierInfo *Name,
3888  ArrayRef<TemplateArgument> Args) const {
3889  assert((!NNS || NNS->isDependent()) &&
3890  "nested-name-specifier must be dependent");
3891 
3892  llvm::FoldingSetNodeID ID;
3893  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
3894  Name, Args);
3895 
3896  void *InsertPos = nullptr;
3898  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
3899  if (T)
3900  return QualType(T, 0);
3901 
3903 
3904  ElaboratedTypeKeyword CanonKeyword = Keyword;
3905  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
3906 
3907  bool AnyNonCanonArgs = false;
3908  unsigned NumArgs = Args.size();
3909  SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
3910  for (unsigned I = 0; I != NumArgs; ++I) {
3911  CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
3912  if (!CanonArgs[I].structurallyEquals(Args[I]))
3913  AnyNonCanonArgs = true;
3914  }
3915 
3916  QualType Canon;
3917  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
3918  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
3919  Name,
3920  CanonArgs);
3921 
3922  // Find the insert position again.
3923  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
3924  }
3925 
3926  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
3927  sizeof(TemplateArgument) * NumArgs),
3928  TypeAlignment);
3929  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
3930  Name, Args, Canon);
3931  Types.push_back(T);
3932  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
3933  return QualType(T, 0);
3934 }
3935 
3937  TemplateArgument Arg;
3938  if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
3939  QualType ArgType = getTypeDeclType(TTP);
3940  if (TTP->isParameterPack())
3941  ArgType = getPackExpansionType(ArgType, None);
3942 
3943  Arg = TemplateArgument(ArgType);
3944  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
3945  Expr *E = new (*this) DeclRefExpr(
3946  NTTP, /*enclosing*/false,
3947  NTTP->getType().getNonLValueExprType(*this),
3948  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
3949 
3950  if (NTTP->isParameterPack())
3951  E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
3952  None);
3953  Arg = TemplateArgument(E);
3954  } else {
3955  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
3956  if (TTP->isParameterPack())
3958  else
3959  Arg = TemplateArgument(TemplateName(TTP));
3960  }
3961 
3962  if (Param->isTemplateParameterPack())
3963  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
3964 
3965  return Arg;
3966 }
3967 
3968 void
3971  Args.reserve(Args.size() + Params->size());
3972 
3973  for (NamedDecl *Param : *Params)
3974  Args.push_back(getInjectedTemplateArg(Param));
3975 }
3976 
3978  Optional<unsigned> NumExpansions) {
3979  llvm::FoldingSetNodeID ID;
3980  PackExpansionType::Profile(ID, Pattern, NumExpansions);
3981 
3982  assert(Pattern->containsUnexpandedParameterPack() &&
3983  "Pack expansions must expand one or more parameter packs");
3984  void *InsertPos = nullptr;
3986  = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
3987  if (T)
3988  return QualType(T, 0);
3989 
3990  QualType Canon;
3991  if (!Pattern.isCanonical()) {
3992  Canon = getCanonicalType(Pattern);
3993  // The canonical type might not contain an unexpanded parameter pack, if it
3994  // contains an alias template specialization which ignores one of its
3995  // parameters.
3996  if (Canon->containsUnexpandedParameterPack()) {
3997  Canon = getPackExpansionType(Canon, NumExpansions);
3998 
3999  // Find the insert position again, in case we inserted an element into
4000  // PackExpansionTypes and invalidated our insert position.
4001  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4002  }
4003  }
4004 
4005  T = new (*this, TypeAlignment)
4006  PackExpansionType(Pattern, Canon, NumExpansions);
4007  Types.push_back(T);
4008  PackExpansionTypes.InsertNode(T, InsertPos);
4009  return QualType(T, 0);
4010 }
4011 
4012 /// CmpProtocolNames - Comparison predicate for sorting protocols
4013 /// alphabetically.
4014 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
4015  ObjCProtocolDecl *const *RHS) {
4016  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
4017 }
4018 
4020  if (Protocols.empty()) return true;
4021 
4022  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
4023  return false;
4024 
4025  for (unsigned i = 1; i != Protocols.size(); ++i)
4026  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
4027  Protocols[i]->getCanonicalDecl() != Protocols[i])
4028  return false;
4029  return true;
4030 }
4031 
4032 static void
4034  // Sort protocols, keyed by name.
4035  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
4036 
4037  // Canonicalize.
4038  for (ObjCProtocolDecl *&P : Protocols)
4039  P = P->getCanonicalDecl();
4040 
4041  // Remove duplicates.
4042  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
4043  Protocols.erase(ProtocolsEnd, Protocols.end());
4044 }
4045 
4047  ObjCProtocolDecl * const *Protocols,
4048  unsigned NumProtocols) const {
4049  return getObjCObjectType(BaseType, { },
4050  llvm::makeArrayRef(Protocols, NumProtocols),
4051  /*isKindOf=*/false);
4052 }
4053 
4055  QualType baseType,
4056  ArrayRef<QualType> typeArgs,
4057  ArrayRef<ObjCProtocolDecl *> protocols,
4058  bool isKindOf) const {
4059  // If the base type is an interface and there aren't any protocols or
4060  // type arguments to add, then the interface type will do just fine.
4061  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
4062  isa<ObjCInterfaceType>(baseType))
4063  return baseType;
4064 
4065  // Look in the folding set for an existing type.
4066  llvm::FoldingSetNodeID ID;
4067  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
4068  void *InsertPos = nullptr;
4069  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
4070  return QualType(QT, 0);
4071 
4072  // Determine the type arguments to be used for canonicalization,
4073  // which may be explicitly specified here or written on the base
4074  // type.
4075  ArrayRef<QualType> effectiveTypeArgs = typeArgs;
4076  if (effectiveTypeArgs.empty()) {
4077  if (auto baseObject = baseType->getAs<ObjCObjectType>())
4078  effectiveTypeArgs = baseObject->getTypeArgs();
4079  }
4080 
4081  // Build the canonical type, which has the canonical base type and a
4082  // sorted-and-uniqued list of protocols and the type arguments
4083  // canonicalized.
4084  QualType canonical;
4085  bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
4086  effectiveTypeArgs.end(),
4087  [&](QualType type) {
4088  return type.isCanonical();
4089  });
4090  bool protocolsSorted = areSortedAndUniqued(protocols);
4091  if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
4092  // Determine the canonical type arguments.
4093  ArrayRef<QualType> canonTypeArgs;
4094  SmallVector<QualType, 4> canonTypeArgsVec;
4095  if (!typeArgsAreCanonical) {
4096  canonTypeArgsVec.reserve(effectiveTypeArgs.size());
4097  for (auto typeArg : effectiveTypeArgs)
4098  canonTypeArgsVec.push_back(getCanonicalType(typeArg));
4099  canonTypeArgs = canonTypeArgsVec;
4100  } else {
4101  canonTypeArgs = effectiveTypeArgs;
4102  }
4103 
4104  ArrayRef<ObjCProtocolDecl *> canonProtocols;
4105  SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
4106  if (!protocolsSorted) {
4107  canonProtocolsVec.append(protocols.begin(), protocols.end());
4108  SortAndUniqueProtocols(canonProtocolsVec);
4109  canonProtocols = canonProtocolsVec;
4110  } else {
4111  canonProtocols = protocols;
4112  }
4113 
4114  canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
4115  canonProtocols, isKindOf);
4116 
4117  // Regenerate InsertPos.
4118  ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
4119  }
4120 
4121  unsigned size = sizeof(ObjCObjectTypeImpl);
4122  size += typeArgs.size() * sizeof(QualType);
4123  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4124  void *mem = Allocate(size, TypeAlignment);
4125  ObjCObjectTypeImpl *T =
4126  new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
4127  isKindOf);
4128 
4129  Types.push_back(T);
4130  ObjCObjectTypes.InsertNode(T, InsertPos);
4131  return QualType(T, 0);
4132 }
4133 
4134 /// Apply Objective-C protocol qualifiers to the given type.
4135 /// If this is for the canonical type of a type parameter, we can apply
4136 /// protocol qualifiers on the ObjCObjectPointerType.
4137 QualType
4139  ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
4140  bool allowOnPointerType) const {
4141  hasError = false;
4142 
4143  if (const ObjCTypeParamType *objT =
4144  dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
4145  return getObjCTypeParamType(objT->getDecl(), protocols);
4146  }
4147 
4148  // Apply protocol qualifiers to ObjCObjectPointerType.
4149  if (allowOnPointerType) {
4150  if (const ObjCObjectPointerType *objPtr =
4151  dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
4152  const ObjCObjectType *objT = objPtr->getObjectType();
4153  // Merge protocol lists and construct ObjCObjectType.
4154  SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
4155  protocolsVec.append(objT->qual_begin(),
4156  objT->qual_end());
4157  protocolsVec.append(protocols.begin(), protocols.end());
4158  ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
4159  type = getObjCObjectType(
4160  objT->getBaseType(),
4161  objT->getTypeArgsAsWritten(),
4162  protocols,
4163  objT->isKindOfTypeAsWritten());
4164  return getObjCObjectPointerType(type);
4165  }
4166  }
4167 
4168  // Apply protocol qualifiers to ObjCObjectType.
4169  if (const ObjCObjectType *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
4170  // FIXME: Check for protocols to which the class type is already
4171  // known to conform.
4172 
4173  return getObjCObjectType(objT->getBaseType(),
4174  objT->getTypeArgsAsWritten(),
4175  protocols,
4176  objT->isKindOfTypeAsWritten());
4177  }
4178 
4179  // If the canonical type is ObjCObjectType, ...
4180  if (type->isObjCObjectType()) {
4181  // Silently overwrite any existing protocol qualifiers.
4182  // TODO: determine whether that's the right thing to do.
4183 
4184  // FIXME: Check for protocols to which the class type is already
4185  // known to conform.
4186  return getObjCObjectType(type, { }, protocols, false);
4187  }
4188 
4189  // id<protocol-list>
4190  if (type->isObjCIdType()) {
4191  const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
4192  type = getObjCObjectType(ObjCBuiltinIdTy, { }, protocols,
4193  objPtr->isKindOfType());
4194  return getObjCObjectPointerType(type);
4195  }
4196 
4197  // Class<protocol-list>
4198  if (type->isObjCClassType()) {
4199  const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
4200  type = getObjCObjectType(ObjCBuiltinClassTy, { }, protocols,
4201  objPtr->isKindOfType());
4202  return getObjCObjectPointerType(type);
4203  }
4204 
4205  hasError = true;
4206  return type;
4207 }
4208 
4209 QualType
4211  ArrayRef<ObjCProtocolDecl *> protocols,
4212  QualType Canonical) const {
4213  // Look in the folding set for an existing type.
4214  llvm::FoldingSetNodeID ID;
4215  ObjCTypeParamType::Profile(ID, Decl, protocols);
4216  void *InsertPos = nullptr;
4217  if (ObjCTypeParamType *TypeParam =
4218  ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
4219  return QualType(TypeParam, 0);
4220 
4221  if (Canonical.isNull()) {
4222  // We canonicalize to the underlying type.
4223  Canonical = getCanonicalType(Decl->getUnderlyingType());
4224  if (!protocols.empty()) {
4225  // Apply the protocol qualifers.
4226  bool hasError;
4227  Canonical = applyObjCProtocolQualifiers(Canonical, protocols, hasError,
4228  true/*allowOnPointerType*/);
4229  assert(!hasError && "Error when apply protocol qualifier to bound type");
4230  }
4231  }
4232 
4233  unsigned size = sizeof(ObjCTypeParamType);
4234  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4235  void *mem = Allocate(size, TypeAlignment);
4236  ObjCTypeParamType *newType = new (mem)
4237  ObjCTypeParamType(Decl, Canonical, protocols);
4238 
4239  Types.push_back(newType);
4240  ObjCTypeParamTypes.InsertNode(newType, InsertPos);
4241  return QualType(newType, 0);
4242 }
4243 
4244 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
4245 /// protocol list adopt all protocols in QT's qualified-id protocol
4246 /// list.
4248  ObjCInterfaceDecl *IC) {
4249  if (!QT->isObjCQualifiedIdType())
4250  return false;
4251 
4252  if (const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>()) {
4253  // If both the right and left sides have qualifiers.
4254  for (auto *Proto : OPT->quals()) {
4255  if (!IC->ClassImplementsProtocol(Proto, false))
4256  return false;
4257  }
4258  return true;
4259  }
4260  return false;
4261 }
4262 
4263 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
4264 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
4265 /// of protocols.
4267  ObjCInterfaceDecl *IDecl) {
4268  if (!QT->isObjCQualifiedIdType())
4269  return false;
4271  if (!OPT)
4272  return false;
4273  if (!IDecl->hasDefinition())
4274  return false;
4275  llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
4276  CollectInheritedProtocols(IDecl, InheritedProtocols);
4277  if (InheritedProtocols.empty())
4278  return false;
4279  // Check that if every protocol in list of id<plist> conforms to a protcol
4280  // of IDecl's, then bridge casting is ok.
4281  bool Conforms = false;
4282  for (auto *Proto : OPT->quals()) {
4283  Conforms = false;
4284  for (auto *PI : InheritedProtocols) {
4285  if (ProtocolCompatibleWithProtocol(Proto, PI)) {
4286  Conforms = true;
4287  break;
4288  }
4289  }
4290  if (!Conforms)
4291  break;
4292  }
4293  if (Conforms)
4294  return true;
4295 
4296  for (auto *PI : InheritedProtocols) {
4297  // If both the right and left sides have qualifiers.
4298  bool Adopts = false;
4299  for (auto *Proto : OPT->quals()) {
4300  // return 'true' if 'PI' is in the inheritance hierarchy of Proto
4301  if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
4302  break;
4303  }
4304  if (!Adopts)
4305  return false;
4306  }
4307  return true;
4308 }
4309 
4310 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
4311 /// the given object type.
4313  llvm::FoldingSetNodeID ID;
4314  ObjCObjectPointerType::Profile(ID, ObjectT);
4315 
4316  void *InsertPos = nullptr;
4317  if (ObjCObjectPointerType *QT =
4318  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4319  return QualType(QT, 0);
4320 
4321  // Find the canonical object type.
4322  QualType Canonical;
4323  if (!ObjectT.isCanonical()) {
4324  Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
4325 
4326  // Regenerate InsertPos.
4327  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4328  }
4329 
4330  // No match.
4331  void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
4332  ObjCObjectPointerType *QType =
4333  new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
4334 
4335  Types.push_back(QType);
4336  ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
4337  return QualType(QType, 0);
4338 }
4339 
4340 /// getObjCInterfaceType - Return the unique reference to the type for the
4341 /// specified ObjC interface decl. The list of protocols is optional.
4343  ObjCInterfaceDecl *PrevDecl) const {
4344  if (Decl->TypeForDecl)
4345  return QualType(Decl->TypeForDecl, 0);
4346 
4347  if (PrevDecl) {
4348  assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
4349  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4350  return QualType(PrevDecl->TypeForDecl, 0);
4351  }
4352 
4353  // Prefer the definition, if there is one.
4354  if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
4355  Decl = Def;
4356 
4357  void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
4358  ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl);
4359  Decl->TypeForDecl = T;
4360  Types.push_back(T);
4361  return QualType(T, 0);
4362 }
4363 
4364 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
4365 /// TypeOfExprType AST's (since expression's are never shared). For example,
4366 /// multiple declarations that refer to "typeof(x)" all contain different
4367 /// DeclRefExpr's. This doesn't effect the type checker, since it operates
4368 /// on canonical type's (which are always unique).
4370  TypeOfExprType *toe;
4371  if (tofExpr->isTypeDependent()) {
4372  llvm::FoldingSetNodeID ID;
4373  DependentTypeOfExprType::Profile(ID, *this, tofExpr);
4374 
4375  void *InsertPos = nullptr;
4377  = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
4378  if (Canon) {
4379  // We already have a "canonical" version of an identical, dependent
4380  // typeof(expr) type. Use that as our canonical type.
4381  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
4382  QualType((TypeOfExprType*)Canon, 0));
4383  } else {
4384  // Build a new, canonical typeof(expr) type.
4385  Canon
4386  = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
4387  DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
4388  toe = Canon;
4389  }
4390  } else {
4391  QualType Canonical = getCanonicalType(tofExpr->getType());
4392  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
4393  }
4394  Types.push_back(toe);
4395  return QualType(toe, 0);
4396 }
4397 
4398 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
4399 /// TypeOfType nodes. The only motivation to unique these nodes would be
4400 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
4401 /// an issue. This doesn't affect the type checker, since it operates
4402 /// on canonical types (which are always unique).
4404  QualType Canonical = getCanonicalType(tofType);
4405  TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
4406  Types.push_back(tot);
4407  return QualType(tot, 0);
4408 }
4409 
4410 /// \brief Unlike many "get<Type>" functions, we don't unique DecltypeType
4411 /// nodes. This would never be helpful, since each such type has its own
4412 /// expression, and would not give a significant memory saving, since there
4413 /// is an Expr tree under each such type.
4415  DecltypeType *dt;
4416 
4417  // C++11 [temp.type]p2:
4418  // If an expression e involves a template parameter, decltype(e) denotes a
4419  // unique dependent type. Two such decltype-specifiers refer to the same
4420  // type only if their expressions are equivalent (14.5.6.1).
4421  if (e->isInstantiationDependent()) {
4422  llvm::FoldingSetNodeID ID;
4423  DependentDecltypeType::Profile(ID, *this, e);
4424 
4425  void *InsertPos = nullptr;
4426  DependentDecltypeType *Canon
4427  = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
4428  if (!Canon) {
4429  // Build a new, canonical decltype(expr) type.
4430  Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
4431  DependentDecltypeTypes.InsertNode(Canon, InsertPos);
4432  }
4433  dt = new (*this, TypeAlignment)
4434  DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
4435  } else {
4436  dt = new (*this, TypeAlignment)
4437  DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
4438  }
4439  Types.push_back(dt);
4440  return QualType(dt, 0);
4441 }
4442 
4443 /// getUnaryTransformationType - We don't unique these, since the memory
4444 /// savings are minimal and these are rare.
4446  QualType UnderlyingType,
4448  const {
4449  UnaryTransformType *ut = nullptr;
4450 
4451  if (BaseType->isDependentType()) {
4452  // Look in the folding set for an existing type.
4453  llvm::FoldingSetNodeID ID;
4455 
4456  void *InsertPos = nullptr;
4458  = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
4459 
4460  if (!Canon) {
4461  // Build a new, canonical __underlying_type(type) type.
4462  Canon = new (*this, TypeAlignment)
4464  Kind);
4465  DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
4466  }
4467  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4468  QualType(), Kind,
4469  QualType(Canon, 0));
4470  } else {
4471  QualType CanonType = getCanonicalType(UnderlyingType);
4472  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4473  UnderlyingType, Kind,
4474  CanonType);
4475  }
4476  Types.push_back(ut);
4477  return QualType(ut, 0);
4478 }
4479 
4480 /// getAutoType - Return the uniqued reference to the 'auto' type which has been
4481 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the
4482 /// canonical deduced-but-dependent 'auto' type.
4484  bool IsDependent) const {
4485  if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
4486  return getAutoDeductType();
4487 
4488  // Look in the folding set for an existing type.
4489  void *InsertPos = nullptr;
4490  llvm::FoldingSetNodeID ID;
4491  AutoType::Profile(ID, DeducedType, Keyword, IsDependent);
4492  if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
4493  return QualType(AT, 0);
4494 
4495  AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType,
4496  Keyword,
4497  IsDependent);
4498  Types.push_back(AT);
4499  if (InsertPos)
4500  AutoTypes.InsertNode(AT, InsertPos);
4501  return QualType(AT, 0);
4502 }
4503 
4504 /// Return the uniqued reference to the deduced template specialization type
4505 /// which has been deduced to the given type, or to the canonical undeduced
4506 /// such type, or the canonical deduced-but-dependent such type.
4508  TemplateName Template, QualType DeducedType, bool IsDependent) const {
4509  // Look in the folding set for an existing type.
4510  void *InsertPos = nullptr;
4511  llvm::FoldingSetNodeID ID;
4512  DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
4513  IsDependent);
4515  DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
4516  return QualType(DTST, 0);
4517 
4519  DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
4520  Types.push_back(DTST);
4521  if (InsertPos)
4522  DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
4523  return QualType(DTST, 0);
4524 }
4525 
4526 /// getAtomicType - Return the uniqued reference to the atomic type for
4527 /// the given value type.
4529  // Unique pointers, to guarantee there is only one pointer of a particular
4530  // structure.
4531  llvm::FoldingSetNodeID ID;
4532  AtomicType::Profile(ID, T);
4533 
4534  void *InsertPos = nullptr;
4535  if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
4536  return QualType(AT, 0);
4537 
4538  // If the atomic value type isn't canonical, this won't be a canonical type
4539  // either, so fill in the canonical type field.
4540  QualType Canonical;
4541  if (!T.isCanonical()) {
4542  Canonical = getAtomicType(getCanonicalType(T));
4543 
4544  // Get the new insert position for the node we care about.
4545  AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
4546  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4547  }
4548  AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
4549  Types.push_back(New);
4550  AtomicTypes.InsertNode(New, InsertPos);
4551  return QualType(New, 0);
4552 }
4553 
4554 /// getAutoDeductType - Get type pattern for deducing against 'auto'.
4556  if (AutoDeductTy.isNull())
4559  /*dependent*/false),
4560  0);
4561  return AutoDeductTy;
4562 }
4563 
4564 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
4566  if (AutoRRefDeductTy.isNull())
4568  assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
4569  return AutoRRefDeductTy;
4570 }
4571 
4572 /// getTagDeclType - Return the unique reference to the type for the
4573 /// specified TagDecl (struct/union/class/enum) decl.
4575  assert (Decl);
4576  // FIXME: What is the design on getTagDeclType when it requires casting
4577  // away const? mutable?
4578  return getTypeDeclType(const_cast<TagDecl*>(Decl));
4579 }
4580 
4581 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
4582 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
4583 /// needs to agree with the definition in <stddef.h>.
4585  return getFromTargetType(Target->getSizeType());
4586 }
4587 
4588 /// Return the unique signed counterpart of the integer type
4589 /// corresponding to size_t.
4591  return getFromTargetType(Target->getSignedSizeType());
4592 }
4593 
4594 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
4596  return getFromTargetType(Target->getIntMaxType());
4597 }
4598 
4599 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
4601  return getFromTargetType(Target->getUIntMaxType());
4602 }
4603 
4604 /// getSignedWCharType - Return the type of "signed wchar_t".
4605 /// Used when in C++, as a GCC extension.
4607  // FIXME: derive from "Target" ?
4608  return WCharTy;
4609 }
4610 
4611 /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
4612 /// Used when in C++, as a GCC extension.
4614  // FIXME: derive from "Target" ?
4615  return UnsignedIntTy;
4616 }
4617 
4619  return getFromTargetType(Target->getIntPtrType());
4620 }
4621 
4624 }
4625 
4626 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
4627 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
4629  return getFromTargetType(Target->getPtrDiffType(0));
4630 }
4631 
4632 /// \brief Return the unique unsigned counterpart of "ptrdiff_t"
4633 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
4634 /// in the definition of %tu format specifier.
4636  return getFromTargetType(Target->getUnsignedPtrDiffType(0));
4637 }
4638 
4639 /// \brief Return the unique type for "pid_t" defined in
4640 /// <sys/types.h>. We need this to compute the correct type for vfork().
4642  return getFromTargetType(Target->getProcessIDType());
4643 }
4644 
4645 //===----------------------------------------------------------------------===//
4646 // Type Operators
4647 //===----------------------------------------------------------------------===//
4648 
4650  // Push qualifiers into arrays, and then discard any remaining
4651  // qualifiers.
4652  T = getCanonicalType(T);
4654  const Type *Ty = T.getTypePtr();
4655  QualType Result;
4656  if (isa<ArrayType>(Ty)) {
4657  Result = getArrayDecayedType(QualType(Ty,0));
4658  } else if (isa<FunctionType>(Ty)) {
4659  Result = getPointerType(QualType(Ty, 0));
4660  } else {
4661  Result = QualType(Ty, 0);
4662  }
4663 
4664  return CanQualType::CreateUnsafe(Result);
4665 }
4666 
4668  Qualifiers &quals) {
4669  SplitQualType splitType = type.getSplitUnqualifiedType();
4670 
4671  // FIXME: getSplitUnqualifiedType() actually walks all the way to
4672  // the unqualified desugared type and then drops it on the floor.
4673  // We then have to strip that sugar back off with
4674  // getUnqualifiedDesugaredType(), which is silly.
4675  const ArrayType *AT =
4676  dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
4677 
4678  // If we don't have an array, just use the results in splitType.
4679  if (!AT) {
4680  quals = splitType.Quals;
4681  return QualType(splitType.Ty, 0);
4682  }
4683 
4684  // Otherwise, recurse on the array's element type.
4685  QualType elementType = AT->getElementType();
4686  QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
4687 
4688  // If that didn't change the element type, AT has no qualifiers, so we
4689  // can just use the results in splitType.
4690  if (elementType == unqualElementType) {
4691  assert(quals.empty()); // from the recursive call
4692  quals = splitType.Quals;
4693  return QualType(splitType.Ty, 0);
4694  }
4695 
4696  // Otherwise, add in the qualifiers from the outermost type, then
4697  // build the type back up.
4698  quals.addConsistentQualifiers(splitType.Quals);
4699 
4700  if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
4701  return getConstantArrayType(unqualElementType, CAT->getSize(),
4702  CAT->getSizeModifier(), 0);
4703  }
4704 
4705  if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
4706  return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
4707  }
4708 
4709  if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) {
4710  return getVariableArrayType(unqualElementType,
4711  VAT->getSizeExpr(),
4712  VAT->getSizeModifier(),
4713  VAT->getIndexTypeCVRQualifiers(),
4714  VAT->getBracketsRange());
4715  }
4716 
4717  const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT);
4718  return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
4719  DSAT->getSizeModifier(), 0,
4720  SourceRange());
4721 }
4722 
4723 /// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
4724 /// may be similar (C++ 4.4), replaces T1 and T2 with the type that
4725 /// they point to and return true. If T1 and T2 aren't pointer types
4726 /// or pointer-to-member types, or if they are not similar at this
4727 /// level, returns false and leaves T1 and T2 unchanged. Top-level
4728 /// qualifiers on T1 and T2 are ignored. This function will typically
4729 /// be called in a loop that successively "unwraps" pointer and
4730 /// pointer-to-member types to compare them at each level.
4732  const PointerType *T1PtrType = T1->getAs<PointerType>(),
4733  *T2PtrType = T2->getAs<PointerType>();
4734  if (T1PtrType && T2PtrType) {
4735  T1 = T1PtrType->getPointeeType();
4736  T2 = T2PtrType->getPointeeType();
4737  return true;
4738  }
4739 
4740  const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
4741  *T2MPType = T2->getAs<MemberPointerType>();
4742  if (T1MPType && T2MPType &&
4743  hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
4744  QualType(T2MPType->getClass(), 0))) {
4745  T1 = T1MPType->getPointeeType();
4746  T2 = T2MPType->getPointeeType();
4747  return true;
4748  }
4749 
4750  if (getLangOpts().ObjC1) {
4751  const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
4752  *T2OPType = T2->getAs<ObjCObjectPointerType>();
4753  if (T1OPType && T2OPType) {
4754  T1 = T1OPType->getPointeeType();
4755  T2 = T2OPType->getPointeeType();
4756  return true;
4757  }
4758  }
4759 
4760  // FIXME: Block pointers, too?
4761 
4762  return false;
4763 }
4764 
4767  SourceLocation NameLoc) const {
4768  switch (Name.getKind()) {
4771  // DNInfo work in progress: CHECKME: what about DNLoc?
4773  NameLoc);
4774 
4777  // DNInfo work in progress: CHECKME: what about DNLoc?
4778  return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
4779  }
4780 
4783  DeclarationName DName;
4784  if (DTN->isIdentifier()) {
4786  return DeclarationNameInfo(DName, NameLoc);
4787  } else {
4789  // DNInfo work in progress: FIXME: source locations?
4790  DeclarationNameLoc DNLoc;
4793  return DeclarationNameInfo(DName, NameLoc, DNLoc);
4794  }
4795  }
4796 
4800  return DeclarationNameInfo(subst->getParameter()->getDeclName(),
4801  NameLoc);
4802  }
4803 
4808  NameLoc);
4809  }
4810  }
4811 
4812  llvm_unreachable("bad template name kind!");
4813 }
4814 
4816  switch (Name.getKind()) {
4818  case TemplateName::Template: {
4819  TemplateDecl *Template = Name.getAsTemplateDecl();
4820  if (TemplateTemplateParmDecl *TTP
4821  = dyn_cast<TemplateTemplateParmDecl>(Template))
4822  Template = getCanonicalTemplateTemplateParmDecl(TTP);
4823 
4824  // The canonical template name is the canonical template declaration.
4825  return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
4826  }
4827 
4829  llvm_unreachable("cannot canonicalize overloaded template");
4830 
4833  assert(DTN && "Non-dependent template names must refer to template decls.");
4834  return DTN->CanonicalTemplateName;
4835  }
4836 
4840  return getCanonicalTemplateName(subst->getReplacement());
4841  }
4842 
4846  TemplateTemplateParmDecl *canonParameter
4847  = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
4848  TemplateArgument canonArgPack
4850  return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
4851  }
4852  }
4853 
4854  llvm_unreachable("bad template name!");
4855 }
4856 
4858  X = getCanonicalTemplateName(X);
4859  Y = getCanonicalTemplateName(Y);
4860  return X.getAsVoidPointer() == Y.getAsVoidPointer();
4861 }
4862 
4865  switch (Arg.getKind()) {
4867  return Arg;
4868 
4870  return Arg;
4871 
4873  ValueDecl *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
4874  return TemplateArgument(D, Arg.getParamTypeForDecl());
4875  }
4876 
4879  /*isNullPtr*/true);
4880 
4883 
4887  Arg.getNumTemplateExpansions());
4888 
4891 
4894 
4895  case TemplateArgument::Pack: {
4896  if (Arg.pack_size() == 0)
4897  return Arg;
4898 
4899  TemplateArgument *CanonArgs
4900  = new (*this) TemplateArgument[Arg.pack_size()];
4901  unsigned Idx = 0;
4903  AEnd = Arg.pack_end();
4904  A != AEnd; (void)++A, ++Idx)
4905  CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
4906 
4907  return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
4908  }
4909  }
4910 
4911  // Silence GCC warning
4912  llvm_unreachable("Unhandled template argument kind");
4913 }
4914 
4917  if (!NNS)
4918  return nullptr;
4919 
4920  switch (NNS->getKind()) {
4922  // Canonicalize the prefix but keep the identifier the same.
4923  return NestedNameSpecifier::Create(*this,
4925  NNS->getAsIdentifier());
4926 
4928  // A namespace is canonical; build a nested-name-specifier with
4929  // this namespace and no prefix.
4930  return NestedNameSpecifier::Create(*this, nullptr,
4932 
4934  // A namespace is canonical; build a nested-name-specifier with
4935  // this namespace and no prefix.
4936  return NestedNameSpecifier::Create(*this, nullptr,
4938  ->getOriginalNamespace());
4939 
4942  QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
4943 
4944  // If we have some kind of dependent-named type (e.g., "typename T::type"),
4945  // break it apart into its prefix and identifier, then reconsititute those
4946  // as the canonical nested-name-specifier. This is required to canonicalize
4947  // a dependent nested-name-specifier involving typedefs of dependent-name
4948  // types, e.g.,
4949  // typedef typename T::type T1;
4950  // typedef typename T1::type T2;
4951  if (const DependentNameType *DNT = T->getAs<DependentNameType>())
4952  return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
4953  const_cast<IdentifierInfo *>(DNT->getIdentifier()));
4954 
4955  // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
4956  // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
4957  // first place?
4958  return NestedNameSpecifier::Create(*this, nullptr, false,
4959  const_cast<Type *>(T.getTypePtr()));
4960  }
4961 
4964  // The global specifier and __super specifer are canonical and unique.
4965  return NNS;
4966  }
4967 
4968  llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
4969 }
4970 
4972  // Handle the non-qualified case efficiently.
4973  if (!T.hasLocalQualifiers()) {
4974  // Handle the common positive case fast.
4975  if (const ArrayType *AT = dyn_cast<ArrayType>(T))
4976  return AT;
4977  }
4978 
4979  // Handle the common negative case fast.
4980  if (!isa<ArrayType>(T.getCanonicalType()))
4981  return nullptr;
4982 
4983  // Apply any qualifiers from the array type to the element type. This
4984  // implements C99 6.7.3p8: "If the specification of an array type includes
4985  // any type qualifiers, the element type is so qualified, not the array type."
4986 
4987  // If we get here, we either have type qualifiers on the type, or we have
4988  // sugar such as a typedef in the way. If we have type qualifiers on the type
4989  // we must propagate them down into the element type.
4990 
4992  Qualifiers qs = split.Quals;
4993 
4994  // If we have a simple case, just return now.
4995  const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty);
4996  if (!ATy || qs.empty())
4997  return ATy;
4998 
4999  // Otherwise, we have an array and we have qualifiers on it. Push the
5000  // qualifiers into the array element type and return a new array type.
5001  QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
5002 
5003  if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy))
5004  return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
5005  CAT->getSizeModifier(),
5006  CAT->getIndexTypeCVRQualifiers()));
5007  if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy))
5008  return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
5009  IAT->getSizeModifier(),
5010  IAT->getIndexTypeCVRQualifiers()));
5011 
5012  if (const DependentSizedArrayType *DSAT
5013  = dyn_cast<DependentSizedArrayType>(ATy))
5014  return cast<ArrayType>(
5015  getDependentSizedArrayType(NewEltTy,
5016  DSAT->getSizeExpr(),
5017  DSAT->getSizeModifier(),
5018  DSAT->getIndexTypeCVRQualifiers(),
5019  DSAT->getBracketsRange()));
5020 
5021  const VariableArrayType *VAT = cast<VariableArrayType>(ATy);
5022  return cast<ArrayType>(getVariableArrayType(NewEltTy,
5023  VAT->getSizeExpr(),
5024  VAT->getSizeModifier(),
5026  VAT->getBracketsRange()));
5027 }
5028 
5030  if (T->isArrayType() || T->isFunctionType())
5031  return getDecayedType(T);
5032  return T;
5033 }
5034 
5037  T = getAdjustedParameterType(T);
5038  return T.getUnqualifiedType();
5039 }
5040 
5042  // C++ [except.throw]p3:
5043  // A throw-expression initializes a temporary object, called the exception
5044  // object, the type of which is determined by removing any top-level
5045  // cv-qualifiers from the static type of the operand of throw and adjusting
5046  // the type from "array of T" or "function returning T" to "pointer to T"
5047  // or "pointer to function returning T", [...]
5049  if (T->isArrayType() || T->isFunctionType())
5050  T = getDecayedType(T);
5051  return T.getUnqualifiedType();
5052 }
5053 
5054 /// getArrayDecayedType - Return the properly qualified result of decaying the
5055 /// specified array type to a pointer. This operation is non-trivial when
5056 /// handling typedefs etc. The canonical type of "T" must be an array type,
5057 /// this returns a pointer to a properly qualified element of the array.
5058 ///
5059 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
5061  // Get the element type with 'getAsArrayType' so that we don't lose any
5062  // typedefs in the element type of the array. This also handles propagation
5063  // of type qualifiers from the array type into the element type if present
5064  // (C99 6.7.3p8).
5065  const ArrayType *PrettyArrayType = getAsArrayType(Ty);
5066  assert(PrettyArrayType && "Not an array type!");
5067 
5068  QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
5069 
5070  // int x[restrict 4] -> int *restrict
5072  PrettyArrayType->getIndexTypeQualifiers());
5073 
5074  // int x[_Nullable] -> int * _Nullable
5075  if (auto Nullability = Ty->getNullability(*this)) {
5076  Result = const_cast<ASTContext *>(this)->getAttributedType(
5078  }
5079  return Result;
5080 }
5081 
5083  return getBaseElementType(array->getElementType());
5084 }
5085 
5087  Qualifiers qs;
5088  while (true) {
5089  SplitQualType split = type.getSplitDesugaredType();
5090  const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
5091  if (!array) break;
5092 
5093  type = array->getElementType();
5094  qs.addConsistentQualifiers(split.Quals);
5095  }
5096 
5097  return getQualifiedType(type, qs);
5098 }
5099 
5100 /// getConstantArrayElementCount - Returns number of constant array elements.
5101 uint64_t
5103  uint64_t ElementCount = 1;
5104  do {
5105  ElementCount *= CA->getSize().getZExtValue();
5106  CA = dyn_cast_or_null<ConstantArrayType>(
5108  } while (CA);
5109  return ElementCount;
5110 }
5111 
5112 /// getFloatingRank - Return a relative rank for floating point types.
5113 /// This routine will assert if passed a built-in type that isn't a float.
5115  if (const ComplexType *CT = T->getAs<ComplexType>())
5116  return getFloatingRank(CT->getElementType());
5117 
5118  assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type");
5119  switch (T->getAs<BuiltinType>()->getKind()) {
5120  default: llvm_unreachable("getFloatingRank(): not a floating type");
5121  case BuiltinType::Float16: return Float16Rank;
5122  case BuiltinType::Half: return HalfRank;
5123  case BuiltinType::Float: return FloatRank;
5124  case BuiltinType::Double: return DoubleRank;
5125  case BuiltinType::LongDouble: return LongDoubleRank;
5126  case BuiltinType::Float128: return Float128Rank;
5127  }
5128 }
5129 
5130 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating
5131 /// point or a complex type (based on typeDomain/typeSize).
5132 /// 'typeDomain' is a real floating point or complex type.
5133 /// 'typeSize' is a real floating point or complex type.
5135  QualType Domain) const {
5136  FloatingRank EltRank = getFloatingRank(Size);
5137  if (Domain->isComplexType()) {
5138  switch (EltRank) {
5139  case Float16Rank:
5140  case HalfRank: llvm_unreachable("Complex half is not supported");
5141  case FloatRank: return FloatComplexTy;
5142  case DoubleRank: return DoubleComplexTy;
5143  case LongDoubleRank: return LongDoubleComplexTy;
5144  case Float128Rank: return Float128ComplexTy;
5145  }
5146  }
5147 
5148  assert(Domain->isRealFloatingType() && "Unknown domain!");
5149  switch (EltRank) {
5150  case Float16Rank: return HalfTy;
5151  case HalfRank: return HalfTy;
5152  case FloatRank: return FloatTy;
5153  case DoubleRank: return DoubleTy;
5154  case LongDoubleRank: return LongDoubleTy;
5155  case Float128Rank: return Float128Ty;
5156  }
5157  llvm_unreachable("getFloatingRank(): illegal value for rank");
5158 }
5159 
5160 /// getFloatingTypeOrder - Compare the rank of the two specified floating
5161 /// point types, ignoring the domain of the type (i.e. 'double' ==
5162 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
5163 /// LHS < RHS, return -1.
5165  FloatingRank LHSR = getFloatingRank(LHS);
5166  FloatingRank RHSR = getFloatingRank(RHS);
5167 
5168  if (LHSR == RHSR)
5169  return 0;
5170  if (LHSR > RHSR)
5171  return 1;
5172  return -1;
5173 }
5174 
5175 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
5176 /// routine will assert if passed a built-in type that isn't an integer or enum,
5177 /// or if it is not canonicalized.
5178 unsigned ASTContext::getIntegerRank(const Type *T) const {
5179  assert(T->isCanonicalUnqualified() && "T should be canonicalized");
5180 
5181  switch (cast<BuiltinType>(T)->getKind()) {
5182  default: llvm_unreachable("getIntegerRank(): not a built-in integer");
5183  case BuiltinType::Bool:
5184  return 1 + (getIntWidth(BoolTy) << 3);
5185  case BuiltinType::Char_S:
5186  case BuiltinType::Char_U:
5187  case BuiltinType::SChar:
5188  case BuiltinType::UChar:
5189  return 2 + (getIntWidth(CharTy) << 3);
5190  case BuiltinType::Short:
5191  case BuiltinType::UShort:
5192  return 3 + (getIntWidth(ShortTy) << 3);
5193  case BuiltinType::Int:
5194  case BuiltinType::UInt:
5195  return 4 + (getIntWidth(IntTy) << 3);
5196  case BuiltinType::Long:
5197  case BuiltinType::ULong:
5198  return 5 + (getIntWidth(LongTy) << 3);
5199  case BuiltinType::LongLong:
5200  case BuiltinType::ULongLong:
5201  return 6 + (getIntWidth(LongLongTy) << 3);
5202  case BuiltinType::Int128:
5203  case BuiltinType::UInt128:
5204  return 7 + (getIntWidth(Int128Ty) << 3);
5205  }
5206 }
5207 
5208 /// \brief Whether this is a promotable bitfield reference according
5209 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
5210 ///
5211 /// \returns the type this bit-field will promote to, or NULL if no
5212 /// promotion occurs.
5214  if (E->isTypeDependent() || E->isValueDependent())
5215  return QualType();
5216 
5217  // FIXME: We should not do this unless E->refersToBitField() is true. This
5218  // matters in C where getSourceBitField() will find bit-fields for various
5219  // cases where the source expression is not a bit-field designator.
5220 
5221  FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
5222  if (!Field)
5223  return QualType();
5224 
5225  QualType FT = Field->getType();
5226 
5227  uint64_t BitWidth = Field->getBitWidthValue(*this);
5228  uint64_t IntSize = getTypeSize(IntTy);
5229  // C++ [conv.prom]p5:
5230  // A prvalue for an integral bit-field can be converted to a prvalue of type
5231  // int if int can represent all the values of the bit-field; otherwise, it
5232  // can be converted to unsigned int if unsigned int can represent all the
5233  // values of the bit-field. If the bit-field is larger yet, no integral
5234  // promotion applies to it.
5235  // C11 6.3.1.1/2:
5236  // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
5237  // If an int can represent all values of the original type (as restricted by
5238  // the width, for a bit-field), the value is converted to an int; otherwise,
5239  // it is converted to an unsigned int.
5240  //
5241  // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
5242  // We perform that promotion here to match GCC and C++.
5243  if (BitWidth < IntSize)
5244  return IntTy;
5245 
5246  if (BitWidth == IntSize)
5247  return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
5248 
5249  // Types bigger than int are not subject to promotions, and therefore act
5250  // like the base type. GCC has some weird bugs in this area that we
5251  // deliberately do not follow (GCC follows a pre-standard resolution to
5252  // C's DR315 which treats bit-width as being part of the type, and this leaks
5253  // into their semantics in some cases).
5254  return QualType();
5255 }
5256 
5257 /// getPromotedIntegerType - Returns the type that Promotable will
5258 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
5259 /// integer type.
5261  assert(!Promotable.isNull());
5262  assert(Promotable->isPromotableIntegerType());
5263  if (const EnumType *ET = Promotable->getAs<EnumType>())
5264  return ET->getDecl()->getPromotionType();
5265 
5266  if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) {
5267  // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
5268  // (3.9.1) can be converted to a prvalue of the first of the following
5269  // types that can represent all the values of its underlying type:
5270  // int, unsigned int, long int, unsigned long int, long long int, or
5271  // unsigned long long int [...]
5272  // FIXME: Is there some better way to compute this?
5273  if (BT->getKind() == BuiltinType::WChar_S ||
5274  BT->getKind() == BuiltinType::WChar_U ||
5275  BT->getKind() == BuiltinType::Char16 ||
5276  BT->getKind() == BuiltinType::Char32) {
5277  bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
5278  uint64_t FromSize = getTypeSize(BT);
5279  QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
5281  for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) {
5282  uint64_t ToSize = getTypeSize(PromoteTypes[Idx]);
5283  if (FromSize < ToSize ||
5284  (FromSize == ToSize &&
5285  FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType()))
5286  return PromoteTypes[Idx];
5287  }
5288  llvm_unreachable("char type should fit into long long");
5289  }
5290  }
5291 
5292  // At this point, we should have a signed or unsigned integer type.
5293  if (Promotable->isSignedIntegerType())
5294  return IntTy;
5295  uint64_t PromotableSize = getIntWidth(Promotable);
5296  uint64_t IntSize = getIntWidth(IntTy);
5297  assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
5298  return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
5299 }
5300 
5301 /// \brief Recurses in pointer/array types until it finds an objc retainable
5302 /// type and returns its ownership.
5304  while (!T.isNull()) {
5306  return T.getObjCLifetime();
5307  if (T->isArrayType())
5308  T = getBaseElementType(T);
5309  else if (const PointerType *PT = T->getAs<PointerType>())
5310  T = PT->getPointeeType();
5311  else if (const ReferenceType *RT = T->getAs<ReferenceType>())
5312  T = RT->getPointeeType();
5313  else
5314  break;
5315  }
5316 
5317  return Qualifiers::OCL_None;
5318 }
5319 
5320 static const Type *getIntegerTypeForEnum(const EnumType *ET) {
5321  // Incomplete enum types are not treated as integer types.
5322  // FIXME: In C++, enum types are never integer types.
5323  if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
5324  return ET->getDecl()->getIntegerType().getTypePtr();
5325  return nullptr;
5326 }
5327 
5328 /// getIntegerTypeOrder - Returns the highest ranked integer type:
5329 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
5330 /// LHS < RHS, return -1.
5332  const Type *LHSC = getCanonicalType(LHS).getTypePtr();
5333  const Type *RHSC = getCanonicalType(RHS).getTypePtr();
5334 
5335  // Unwrap enums to their underlying type.
5336  if (const EnumType *ET = dyn_cast<EnumType>(LHSC))
5337  LHSC = getIntegerTypeForEnum(ET);
5338  if (const EnumType *ET = dyn_cast<EnumType>(RHSC))
5339  RHSC = getIntegerTypeForEnum(ET);
5340 
5341  if (LHSC == RHSC) return 0;
5342 
5343  bool LHSUnsigned = LHSC->isUnsignedIntegerType();
5344  bool RHSUnsigned = RHSC->isUnsignedIntegerType();
5345 
5346  unsigned LHSRank = getIntegerRank(LHSC);
5347  unsigned RHSRank = getIntegerRank(RHSC);
5348 
5349  if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
5350  if (LHSRank == RHSRank) return 0;
5351  return LHSRank > RHSRank ? 1 : -1;
5352  }
5353 
5354  // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
5355  if (LHSUnsigned) {
5356  // If the unsigned [LHS] type is larger, return it.
5357  if (LHSRank >= RHSRank)
5358  return 1;
5359 
5360  // If the signed type can represent all values of the unsigned type, it
5361  // wins. Because we are dealing with 2's complement and types that are
5362  // powers of two larger than each other, this is always safe.
5363  return -1;
5364  }
5365 
5366  // If the unsigned [RHS] type is larger, return it.
5367  if (RHSRank >= LHSRank)
5368  return -1;
5369 
5370  // If the signed type can represent all values of the unsigned type, it
5371  // wins. Because we are dealing with 2's complement and types that are
5372  // powers of two larger than each other, this is always safe.
5373  return 1;
5374 }
5375 
5377  if (!CFConstantStringTypeDecl) {
5378  assert(!CFConstantStringTagDecl &&
5379  "tag and typedef should be initialized together");
5380  CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag");
5381  CFConstantStringTagDecl->startDefinition();
5382 
5383  QualType FieldTypes[4];
5384  const char *FieldNames[4];
5385 
5386  // const int *isa;
5387  FieldTypes[0] = getPointerType(IntTy.withConst());
5388  FieldNames[0] = "isa";
5389  // int flags;
5390  FieldTypes[1] = IntTy;
5391  FieldNames[1] = "flags";
5392  // const char *str;
5393  FieldTypes[2] = getPointerType(CharTy.withConst());
5394  FieldNames[2] = "str";
5395  // long length;
5396  FieldTypes[3] = LongTy;
5397  FieldNames[3] = "length";
5398 
5399  // Create fields
5400  for (unsigned i = 0; i < 4; ++i) {
5401  FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTagDecl,
5402  SourceLocation(),
5403  SourceLocation(),
5404  &Idents.get(FieldNames[i]),
5405  FieldTypes[i], /*TInfo=*/nullptr,
5406  /*BitWidth=*/nullptr,
5407  /*Mutable=*/false,
5408  ICIS_NoInit);
5409  Field->setAccess(AS_public);
5410  CFConstantStringTagDecl->addDecl(Field);
5411  }
5412 
5413  CFConstantStringTagDecl->completeDefinition();
5414  // This type is designed to be compatible with NSConstantString, but cannot
5415  // use the same name, since NSConstantString is an interface.
5416  auto tagType = getTagDeclType(CFConstantStringTagDecl);
5417  CFConstantStringTypeDecl =
5418  buildImplicitTypedef(tagType, "__NSConstantString");
5419  }
5420 
5421  return CFConstantStringTypeDecl;
5422 }
5423 
5425  if (!CFConstantStringTagDecl)
5426  getCFConstantStringDecl(); // Build the tag and the typedef.
5427  return CFConstantStringTagDecl;
5428 }
5429 
5430 // getCFConstantStringType - Return the type used for constant CFStrings.
5433 }
5434 
5436  if (ObjCSuperType.isNull()) {
5437  RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
5438  TUDecl->addDecl(ObjCSuperTypeDecl);
5439  ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
5440  }
5441  return ObjCSuperType;
5442 }
5443 
5445  const TypedefType *TD = T->getAs<TypedefType>();
5446  assert(TD && "Invalid CFConstantStringType");
5447  CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl());