clang  7.0.0svn
ASTContext.cpp
Go to the documentation of this file.
1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the ASTContext interface.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/AST/ASTContext.h"
15 #include "CXXABI.h"
16 #include "clang/AST/APValue.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/AttrIterator.h"
21 #include "clang/AST/CharUnits.h"
22 #include "clang/AST/Comment.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclBase.h"
25 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/DeclOpenMP.h"
29 #include "clang/AST/DeclTemplate.h"
31 #include "clang/AST/Expr.h"
32 #include "clang/AST/ExprCXX.h"
34 #include "clang/AST/Mangle.h"
38 #include "clang/AST/RecordLayout.h"
40 #include "clang/AST/Stmt.h"
41 #include "clang/AST/TemplateBase.h"
42 #include "clang/AST/TemplateName.h"
43 #include "clang/AST/Type.h"
44 #include "clang/AST/TypeLoc.h"
48 #include "clang/Basic/Builtins.h"
52 #include "clang/Basic/LLVM.h"
54 #include "clang/Basic/Linkage.h"
59 #include "clang/Basic/Specifiers.h"
61 #include "clang/Basic/TargetInfo.h"
62 #include "clang/Basic/XRayLists.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/APSInt.h"
65 #include "llvm/ADT/ArrayRef.h"
66 #include "llvm/ADT/DenseMap.h"
67 #include "llvm/ADT/DenseSet.h"
68 #include "llvm/ADT/FoldingSet.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/PointerUnion.h"
72 #include "llvm/ADT/STLExtras.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/StringExtras.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Triple.h"
78 #include "llvm/Support/Capacity.h"
79 #include "llvm/Support/Casting.h"
80 #include "llvm/Support/Compiler.h"
81 #include "llvm/Support/ErrorHandling.h"
82 #include "llvm/Support/MathExtras.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include <algorithm>
85 #include <cassert>
86 #include <cstddef>
87 #include <cstdint>
88 #include <cstdlib>
89 #include <map>
90 #include <memory>
91 #include <string>
92 #include <tuple>
93 #include <utility>
94 
95 using namespace clang;
96 
109 
112 };
113 
115  if (!CommentsLoaded && ExternalSource) {
116  ExternalSource->ReadComments();
117 
118 #ifndef NDEBUG
120  assert(std::is_sorted(RawComments.begin(), RawComments.end(),
121  BeforeThanCompare<RawComment>(SourceMgr)));
122 #endif
123 
124  CommentsLoaded = true;
125  }
126 
127  assert(D);
128 
129  // User can not attach documentation to implicit declarations.
130  if (D->isImplicit())
131  return nullptr;
132 
133  // User can not attach documentation to implicit instantiations.
134  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
135  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
136  return nullptr;
137  }
138 
139  if (const auto *VD = dyn_cast<VarDecl>(D)) {
140  if (VD->isStaticDataMember() &&
141  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
142  return nullptr;
143  }
144 
145  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
146  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
147  return nullptr;
148  }
149 
150  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
151  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
152  if (TSK == TSK_ImplicitInstantiation ||
153  TSK == TSK_Undeclared)
154  return nullptr;
155  }
156 
157  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
158  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
159  return nullptr;
160  }
161  if (const auto *TD = dyn_cast<TagDecl>(D)) {
162  // When tag declaration (but not definition!) is part of the
163  // decl-specifier-seq of some other declaration, it doesn't get comment
164  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
165  return nullptr;
166  }
167  // TODO: handle comments for function parameters properly.
168  if (isa<ParmVarDecl>(D))
169  return nullptr;
170 
171  // TODO: we could look up template parameter documentation in the template
172  // documentation.
173  if (isa<TemplateTypeParmDecl>(D) ||
174  isa<NonTypeTemplateParmDecl>(D) ||
175  isa<TemplateTemplateParmDecl>(D))
176  return nullptr;
177 
179 
180  // If there are no comments anywhere, we won't find anything.
181  if (RawComments.empty())
182  return nullptr;
183 
184  // Find declaration location.
185  // For Objective-C declarations we generally don't expect to have multiple
186  // declarators, thus use declaration starting location as the "declaration
187  // location".
188  // For all other declarations multiple declarators are used quite frequently,
189  // so we use the location of the identifier as the "declaration location".
190  SourceLocation DeclLoc;
191  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
192  isa<ObjCPropertyDecl>(D) ||
193  isa<RedeclarableTemplateDecl>(D) ||
194  isa<ClassTemplateSpecializationDecl>(D))
195  DeclLoc = D->getLocStart();
196  else {
197  DeclLoc = D->getLocation();
198  if (DeclLoc.isMacroID()) {
199  if (isa<TypedefDecl>(D)) {
200  // If location of the typedef name is in a macro, it is because being
201  // declared via a macro. Try using declaration's starting location as
202  // the "declaration location".
203  DeclLoc = D->getLocStart();
204  } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
205  // If location of the tag decl is inside a macro, but the spelling of
206  // the tag name comes from a macro argument, it looks like a special
207  // macro like NS_ENUM is being used to define the tag decl. In that
208  // case, adjust the source location to the expansion loc so that we can
209  // attach the comment to the tag decl.
210  if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
211  TD->isCompleteDefinition())
212  DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
213  }
214  }
215  }
216 
217  // If the declaration doesn't map directly to a location in a file, we
218  // can't find the comment.
219  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
220  return nullptr;
221 
222  // Find the comment that occurs just after this declaration.
224  {
225  // When searching for comments during parsing, the comment we are looking
226  // for is usually among the last two comments we parsed -- check them
227  // first.
228  RawComment CommentAtDeclLoc(
229  SourceMgr, SourceRange(DeclLoc), LangOpts.CommentOpts, false);
230  BeforeThanCompare<RawComment> Compare(SourceMgr);
231  ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
232  bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
233  if (!Found && RawComments.size() >= 2) {
234  MaybeBeforeDecl--;
235  Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
236  }
237 
238  if (Found) {
239  Comment = MaybeBeforeDecl + 1;
240  assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
241  &CommentAtDeclLoc, Compare));
242  } else {
243  // Slow path.
244  Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
245  &CommentAtDeclLoc, Compare);
246  }
247  }
248 
249  // Decompose the location for the declaration and find the beginning of the
250  // file buffer.
251  std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc);
252 
253  // First check whether we have a trailing comment.
254  if (Comment != RawComments.end() &&
255  ((*Comment)->isDocumentation() || LangOpts.CommentOpts.ParseAllComments)
256  && (*Comment)->isTrailingComment() &&
257  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
258  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
259  std::pair<FileID, unsigned> CommentBeginDecomp
260  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin());
261  // Check that Doxygen trailing comment comes after the declaration, starts
262  // on the same line and in the same file as the declaration.
263  if (DeclLocDecomp.first == CommentBeginDecomp.first &&
264  SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
265  == SourceMgr.getLineNumber(CommentBeginDecomp.first,
266  CommentBeginDecomp.second)) {
267  return *Comment;
268  }
269  }
270 
271  // The comment just after the declaration was not a trailing comment.
272  // Let's look at the previous comment.
273  if (Comment == RawComments.begin())
274  return nullptr;
275  --Comment;
276 
277  // Check that we actually have a non-member Doxygen comment.
278  if (!((*Comment)->isDocumentation() ||
279  LangOpts.CommentOpts.ParseAllComments) ||
280  (*Comment)->isTrailingComment())
281  return nullptr;
282 
283  // Decompose the end of the comment.
284  std::pair<FileID, unsigned> CommentEndDecomp
285  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd());
286 
287  // If the comment and the declaration aren't in the same file, then they
288  // aren't related.
289  if (DeclLocDecomp.first != CommentEndDecomp.first)
290  return nullptr;
291 
292  // Get the corresponding buffer.
293  bool Invalid = false;
294  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
295  &Invalid).data();
296  if (Invalid)
297  return nullptr;
298 
299  // Extract text between the comment and declaration.
300  StringRef Text(Buffer + CommentEndDecomp.second,
301  DeclLocDecomp.second - CommentEndDecomp.second);
302 
303  // There should be no other declarations or preprocessor directives between
304  // comment and declaration.
305  if (Text.find_first_of(";{}#@") != StringRef::npos)
306  return nullptr;
307 
308  return *Comment;
309 }
310 
311 /// If we have a 'templated' declaration for a template, adjust 'D' to
312 /// refer to the actual template.
313 /// If we have an implicit instantiation, adjust 'D' to refer to template.
314 static const Decl *adjustDeclToTemplate(const Decl *D) {
315  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
316  // Is this function declaration part of a function template?
317  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
318  return FTD;
319 
320  // Nothing to do if function is not an implicit instantiation.
321  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
322  return D;
323 
324  // Function is an implicit instantiation of a function template?
325  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
326  return FTD;
327 
328  // Function is instantiated from a member definition of a class template?
329  if (const FunctionDecl *MemberDecl =
331  return MemberDecl;
332 
333  return D;
334  }
335  if (const auto *VD = dyn_cast<VarDecl>(D)) {
336  // Static data member is instantiated from a member definition of a class
337  // template?
338  if (VD->isStaticDataMember())
339  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
340  return MemberDecl;
341 
342  return D;
343  }
344  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
345  // Is this class declaration part of a class template?
346  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
347  return CTD;
348 
349  // Class is an implicit instantiation of a class template or partial
350  // specialization?
351  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
352  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
353  return D;
354  llvm::PointerUnion<ClassTemplateDecl *,
356  PU = CTSD->getSpecializedTemplateOrPartial();
357  return PU.is<ClassTemplateDecl*>() ?
358  static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) :
359  static_cast<const Decl*>(
361  }
362 
363  // Class is instantiated from a member definition of a class template?
364  if (const MemberSpecializationInfo *Info =
365  CRD->getMemberSpecializationInfo())
366  return Info->getInstantiatedFrom();
367 
368  return D;
369  }
370  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
371  // Enum is instantiated from a member definition of a class template?
372  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
373  return MemberDecl;
374 
375  return D;
376  }
377  // FIXME: Adjust alias templates?
378  return D;
379 }
380 
382  const Decl *D,
383  const Decl **OriginalDecl) const {
384  D = adjustDeclToTemplate(D);
385 
386  // Check whether we have cached a comment for this declaration already.
387  {
388  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
389  RedeclComments.find(D);
390  if (Pos != RedeclComments.end()) {
391  const RawCommentAndCacheFlags &Raw = Pos->second;
393  if (OriginalDecl)
394  *OriginalDecl = Raw.getOriginalDecl();
395  return Raw.getRaw();
396  }
397  }
398  }
399 
400  // Search for comments attached to declarations in the redeclaration chain.
401  const RawComment *RC = nullptr;
402  const Decl *OriginalDeclForRC = nullptr;
403  for (auto I : D->redecls()) {
404  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
405  RedeclComments.find(I);
406  if (Pos != RedeclComments.end()) {
407  const RawCommentAndCacheFlags &Raw = Pos->second;
409  RC = Raw.getRaw();
410  OriginalDeclForRC = Raw.getOriginalDecl();
411  break;
412  }
413  } else {
415  OriginalDeclForRC = I;
417  if (RC) {
418  // Call order swapped to work around ICE in VS2015 RTM (Release Win32)
419  // https://connect.microsoft.com/VisualStudio/feedback/details/1741530
421  Raw.setRaw(RC);
422  } else
424  Raw.setOriginalDecl(I);
425  RedeclComments[I] = Raw;
426  if (RC)
427  break;
428  }
429  }
430 
431  // If we found a comment, it should be a documentation comment.
432  assert(!RC || RC->isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
433 
434  if (OriginalDecl)
435  *OriginalDecl = OriginalDeclForRC;
436 
437  // Update cache for every declaration in the redeclaration chain.
439  Raw.setRaw(RC);
441  Raw.setOriginalDecl(OriginalDeclForRC);
442 
443  for (auto I : D->redecls()) {
446  R = Raw;
447  }
448 
449  return RC;
450 }
451 
452 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
454  const DeclContext *DC = ObjCMethod->getDeclContext();
455  if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
456  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
457  if (!ID)
458  return;
459  // Add redeclared method here.
460  for (const auto *Ext : ID->known_extensions()) {
461  if (ObjCMethodDecl *RedeclaredMethod =
462  Ext->getMethod(ObjCMethod->getSelector(),
463  ObjCMethod->isInstanceMethod()))
464  Redeclared.push_back(RedeclaredMethod);
465  }
466  }
467 }
468 
470  const Decl *D) const {
471  auto *ThisDeclInfo = new (*this) comments::DeclInfo;
472  ThisDeclInfo->CommentDecl = D;
473  ThisDeclInfo->IsFilled = false;
474  ThisDeclInfo->fill();
475  ThisDeclInfo->CommentDecl = FC->getDecl();
476  if (!ThisDeclInfo->TemplateParameters)
477  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
478  comments::FullComment *CFC =
479  new (*this) comments::FullComment(FC->getBlocks(),
480  ThisDeclInfo);
481  return CFC;
482 }
483 
486  return RC ? RC->parse(*this, nullptr, D) : nullptr;
487 }
488 
490  const Decl *D,
491  const Preprocessor *PP) const {
492  if (D->isInvalidDecl())
493  return nullptr;
494  D = adjustDeclToTemplate(D);
495 
496  const Decl *Canonical = D->getCanonicalDecl();
497  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
498  ParsedComments.find(Canonical);
499 
500  if (Pos != ParsedComments.end()) {
501  if (Canonical != D) {
502  comments::FullComment *FC = Pos->second;
504  return CFC;
505  }
506  return Pos->second;
507  }
508 
509  const Decl *OriginalDecl;
510 
511  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
512  if (!RC) {
513  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
515  const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
516  if (OMD && OMD->isPropertyAccessor())
517  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
518  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
519  return cloneFullComment(FC, D);
520  if (OMD)
521  addRedeclaredMethods(OMD, Overridden);
522  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
523  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
524  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
525  return cloneFullComment(FC, D);
526  }
527  else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
528  // Attach any tag type's documentation to its typedef if latter
529  // does not have one of its own.
530  QualType QT = TD->getUnderlyingType();
531  if (const auto *TT = QT->getAs<TagType>())
532  if (const Decl *TD = TT->getDecl())
533  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
534  return cloneFullComment(FC, D);
535  }
536  else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
537  while (IC->getSuperClass()) {
538  IC = IC->getSuperClass();
539  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
540  return cloneFullComment(FC, D);
541  }
542  }
543  else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
544  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
545  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
546  return cloneFullComment(FC, D);
547  }
548  else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
549  if (!(RD = RD->getDefinition()))
550  return nullptr;
551  // Check non-virtual bases.
552  for (const auto &I : RD->bases()) {
553  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
554  continue;
555  QualType Ty = I.getType();
556  if (Ty.isNull())
557  continue;
558  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
559  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
560  continue;
561 
562  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
563  return cloneFullComment(FC, D);
564  }
565  }
566  // Check virtual bases.
567  for (const auto &I : RD->vbases()) {
568  if (I.getAccessSpecifier() != AS_public)
569  continue;
570  QualType Ty = I.getType();
571  if (Ty.isNull())
572  continue;
573  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
574  if (!(VirtualBase= VirtualBase->getDefinition()))
575  continue;
576  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
577  return cloneFullComment(FC, D);
578  }
579  }
580  }
581  return nullptr;
582  }
583 
584  // If the RawComment was attached to other redeclaration of this Decl, we
585  // should parse the comment in context of that other Decl. This is important
586  // because comments can contain references to parameter names which can be
587  // different across redeclarations.
588  if (D != OriginalDecl)
589  return getCommentForDecl(OriginalDecl, PP);
590 
591  comments::FullComment *FC = RC->parse(*this, PP, D);
592  ParsedComments[Canonical] = FC;
593  return FC;
594 }
595 
596 void
597 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
598  TemplateTemplateParmDecl *Parm) {
599  ID.AddInteger(Parm->getDepth());
600  ID.AddInteger(Parm->getPosition());
601  ID.AddBoolean(Parm->isParameterPack());
602 
604  ID.AddInteger(Params->size());
606  PEnd = Params->end();
607  P != PEnd; ++P) {
608  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
609  ID.AddInteger(0);
610  ID.AddBoolean(TTP->isParameterPack());
611  continue;
612  }
613 
614  if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
615  ID.AddInteger(1);
616  ID.AddBoolean(NTTP->isParameterPack());
617  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
618  if (NTTP->isExpandedParameterPack()) {
619  ID.AddBoolean(true);
620  ID.AddInteger(NTTP->getNumExpansionTypes());
621  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
622  QualType T = NTTP->getExpansionType(I);
623  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
624  }
625  } else
626  ID.AddBoolean(false);
627  continue;
628  }
629 
630  auto *TTP = cast<TemplateTemplateParmDecl>(*P);
631  ID.AddInteger(2);
632  Profile(ID, TTP);
633  }
634 }
635 
637 ASTContext::getCanonicalTemplateTemplateParmDecl(
638  TemplateTemplateParmDecl *TTP) const {
639  // Check if we already have a canonical template template parameter.
640  llvm::FoldingSetNodeID ID;
641  CanonicalTemplateTemplateParm::Profile(ID, TTP);
642  void *InsertPos = nullptr;
643  CanonicalTemplateTemplateParm *Canonical
644  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
645  if (Canonical)
646  return Canonical->getParam();
647 
648  // Build a canonical template parameter list.
650  SmallVector<NamedDecl *, 4> CanonParams;
651  CanonParams.reserve(Params->size());
653  PEnd = Params->end();
654  P != PEnd; ++P) {
655  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
656  CanonParams.push_back(
658  SourceLocation(),
659  SourceLocation(),
660  TTP->getDepth(),
661  TTP->getIndex(), nullptr, false,
662  TTP->isParameterPack()));
663  else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
664  QualType T = getCanonicalType(NTTP->getType());
667  if (NTTP->isExpandedParameterPack()) {
668  SmallVector<QualType, 2> ExpandedTypes;
669  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
670  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
671  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
672  ExpandedTInfos.push_back(
673  getTrivialTypeSourceInfo(ExpandedTypes.back()));
674  }
675 
677  SourceLocation(),
678  SourceLocation(),
679  NTTP->getDepth(),
680  NTTP->getPosition(), nullptr,
681  T,
682  TInfo,
683  ExpandedTypes,
684  ExpandedTInfos);
685  } else {
687  SourceLocation(),
688  SourceLocation(),
689  NTTP->getDepth(),
690  NTTP->getPosition(), nullptr,
691  T,
692  NTTP->isParameterPack(),
693  TInfo);
694  }
695  CanonParams.push_back(Param);
696 
697  } else
698  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
699  cast<TemplateTemplateParmDecl>(*P)));
700  }
701 
702  assert(!TTP->getRequiresClause() &&
703  "Unexpected requires-clause on template template-parameter");
704  Expr *const CanonRequiresClause = nullptr;
705 
706  TemplateTemplateParmDecl *CanonTTP
708  SourceLocation(), TTP->getDepth(),
709  TTP->getPosition(),
710  TTP->isParameterPack(),
711  nullptr,
713  SourceLocation(),
714  CanonParams,
715  SourceLocation(),
716  CanonRequiresClause));
717 
718  // Get the new insert position for the node we care about.
719  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
720  assert(!Canonical && "Shouldn't be in the map!");
721  (void)Canonical;
722 
723  // Create the canonical template template parameter entry.
724  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
725  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
726  return CanonTTP;
727 }
728 
729 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
730  if (!LangOpts.CPlusPlus) return nullptr;
731 
732  switch (T.getCXXABI().getKind()) {
733  case TargetCXXABI::GenericARM: // Same as Itanium at this level
734  case TargetCXXABI::iOS:
735  case TargetCXXABI::iOS64:
741  return CreateItaniumCXXABI(*this);
743  return CreateMicrosoftCXXABI(*this);
744  }
745  llvm_unreachable("Invalid CXXABI type!");
746 }
747 
748 static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
749  const LangOptions &LOpts) {
750  if (LOpts.FakeAddressSpaceMap) {
751  // The fake address space map must have a distinct entry for each
752  // language-specific address space.
753  static const unsigned FakeAddrSpaceMap[] = {
754  0, // Default
755  1, // opencl_global
756  3, // opencl_local
757  2, // opencl_constant
758  0, // opencl_private
759  4, // opencl_generic
760  5, // cuda_device
761  6, // cuda_constant
762  7 // cuda_shared
763  };
764  return &FakeAddrSpaceMap;
765  } else {
766  return &T.getAddressSpaceMap();
767  }
768 }
769 
771  const LangOptions &LangOpts) {
772  switch (LangOpts.getAddressSpaceMapMangling()) {
774  return TI.useAddressSpaceMapMangling();
776  return true;
778  return false;
779  }
780  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
781 }
782 
784  IdentifierTable &idents, SelectorTable &sels,
785  Builtin::Context &builtins)
786  : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
787  DependentTemplateSpecializationTypes(this_()),
788  SubstTemplateTemplateParmPacks(this_()), SourceMgr(SM), LangOpts(LOpts),
789  SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
790  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
791  LangOpts.XRayNeverInstrumentFiles,
792  LangOpts.XRayAttrListFiles, SM)),
793  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
794  BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
795  CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
796  CompCategories(this_()), LastSDM(nullptr, 0) {
797  TUDecl = TranslationUnitDecl::Create(*this);
798 }
799 
801  ReleaseParentMapEntries();
802 
803  // Release the DenseMaps associated with DeclContext objects.
804  // FIXME: Is this the ideal solution?
805  ReleaseDeclContextMaps();
806 
807  // Call all of the deallocation functions on all of their targets.
808  for (auto &Pair : Deallocations)
809  (Pair.first)(Pair.second);
810 
811  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
812  // because they can contain DenseMaps.
813  for (llvm::DenseMap<const ObjCContainerDecl*,
814  const ASTRecordLayout*>::iterator
815  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
816  // Increment in loop to prevent using deallocated memory.
817  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
818  R->Destroy(*this);
819 
820  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
821  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
822  // Increment in loop to prevent using deallocated memory.
823  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
824  R->Destroy(*this);
825  }
826 
827  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
828  AEnd = DeclAttrs.end();
829  A != AEnd; ++A)
830  A->second->~AttrVec();
831 
832  for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
833  MaterializedTemporaryValues)
834  MTVPair.second->~APValue();
835 
836  for (const auto &Value : ModuleInitializers)
837  Value.second->~PerModuleInitializers();
838 }
839 
840 void ASTContext::ReleaseParentMapEntries() {
841  if (!PointerParents) return;
842  for (const auto &Entry : *PointerParents) {
843  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
844  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
845  } else if (Entry.second.is<ParentVector *>()) {
846  delete Entry.second.get<ParentVector *>();
847  }
848  }
849  for (const auto &Entry : *OtherParents) {
850  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
851  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
852  } else if (Entry.second.is<ParentVector *>()) {
853  delete Entry.second.get<ParentVector *>();
854  }
855  }
856 }
857 
858 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
859  Deallocations.push_back({Callback, Data});
860 }
861 
862 void
864  ExternalSource = std::move(Source);
865 }
866 
868  llvm::errs() << "\n*** AST Context Stats:\n";
869  llvm::errs() << " " << Types.size() << " types total.\n";
870 
871  unsigned counts[] = {
872 #define TYPE(Name, Parent) 0,
873 #define ABSTRACT_TYPE(Name, Parent)
874 #include "clang/AST/TypeNodes.def"
875  0 // Extra
876  };
877 
878  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
879  Type *T = Types[i];
880  counts[(unsigned)T->getTypeClass()]++;
881  }
882 
883  unsigned Idx = 0;
884  unsigned TotalBytes = 0;
885 #define TYPE(Name, Parent) \
886  if (counts[Idx]) \
887  llvm::errs() << " " << counts[Idx] << " " << #Name \
888  << " types\n"; \
889  TotalBytes += counts[Idx] * sizeof(Name##Type); \
890  ++Idx;
891 #define ABSTRACT_TYPE(Name, Parent)
892 #include "clang/AST/TypeNodes.def"
893 
894  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
895 
896  // Implicit special member functions.
897  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
899  << " implicit default constructors created\n";
900  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
902  << " implicit copy constructors created\n";
903  if (getLangOpts().CPlusPlus)
904  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
906  << " implicit move constructors created\n";
907  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
909  << " implicit copy assignment operators created\n";
910  if (getLangOpts().CPlusPlus)
911  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
913  << " implicit move assignment operators created\n";
914  llvm::errs() << NumImplicitDestructorsDeclared << "/"
916  << " implicit destructors created\n";
917 
918  if (ExternalSource) {
919  llvm::errs() << "\n";
920  ExternalSource->PrintStats();
921  }
922 
923  BumpAlloc.PrintStats();
924 }
925 
927  bool NotifyListeners) {
928  if (NotifyListeners)
929  if (auto *Listener = getASTMutationListener())
931 
932  if (getLangOpts().ModulesLocalVisibility)
933  MergedDefModules[ND].push_back(M);
934  else
936 }
937 
939  auto It = MergedDefModules.find(ND);
940  if (It == MergedDefModules.end())
941  return;
942 
943  auto &Merged = It->second;
945  for (Module *&M : Merged)
946  if (!Found.insert(M).second)
947  M = nullptr;
948  Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
949 }
950 
951 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
952  if (LazyInitializers.empty())
953  return;
954 
955  auto *Source = Ctx.getExternalSource();
956  assert(Source && "lazy initializers but no external source");
957 
958  auto LazyInits = std::move(LazyInitializers);
959  LazyInitializers.clear();
960 
961  for (auto ID : LazyInits)
962  Initializers.push_back(Source->GetExternalDecl(ID));
963 
964  assert(LazyInitializers.empty() &&
965  "GetExternalDecl for lazy module initializer added more inits");
966 }
967 
969  // One special case: if we add a module initializer that imports another
970  // module, and that module's only initializer is an ImportDecl, simplify.
971  if (const auto *ID = dyn_cast<ImportDecl>(D)) {
972  auto It = ModuleInitializers.find(ID->getImportedModule());
973 
974  // Maybe the ImportDecl does nothing at all. (Common case.)
975  if (It == ModuleInitializers.end())
976  return;
977 
978  // Maybe the ImportDecl only imports another ImportDecl.
979  auto &Imported = *It->second;
980  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
981  Imported.resolve(*this);
982  auto *OnlyDecl = Imported.Initializers.front();
983  if (isa<ImportDecl>(OnlyDecl))
984  D = OnlyDecl;
985  }
986  }
987 
988  auto *&Inits = ModuleInitializers[M];
989  if (!Inits)
990  Inits = new (*this) PerModuleInitializers;
991  Inits->Initializers.push_back(D);
992 }
993 
995  auto *&Inits = ModuleInitializers[M];
996  if (!Inits)
997  Inits = new (*this) PerModuleInitializers;
998  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
999  IDs.begin(), IDs.end());
1000 }
1001 
1003  auto It = ModuleInitializers.find(M);
1004  if (It == ModuleInitializers.end())
1005  return None;
1006 
1007  auto *Inits = It->second;
1008  Inits->resolve(*this);
1009  return Inits->Initializers;
1010 }
1011 
1013  if (!ExternCContext)
1014  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1015 
1016  return ExternCContext;
1017 }
1018 
1021  const IdentifierInfo *II) const {
1022  auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
1023  BuiltinTemplate->setImplicit();
1024  TUDecl->addDecl(BuiltinTemplate);
1025 
1026  return BuiltinTemplate;
1027 }
1028 
1031  if (!MakeIntegerSeqDecl)
1032  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1034  return MakeIntegerSeqDecl;
1035 }
1036 
1039  if (!TypePackElementDecl)
1040  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1042  return TypePackElementDecl;
1043 }
1044 
1046  RecordDecl::TagKind TK) const {
1047  SourceLocation Loc;
1048  RecordDecl *NewDecl;
1049  if (getLangOpts().CPlusPlus)
1050  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1051  Loc, &Idents.get(Name));
1052  else
1053  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1054  &Idents.get(Name));
1055  NewDecl->setImplicit();
1056  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1057  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1058  return NewDecl;
1059 }
1060 
1062  StringRef Name) const {
1064  TypedefDecl *NewDecl = TypedefDecl::Create(
1065  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1066  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1067  NewDecl->setImplicit();
1068  return NewDecl;
1069 }
1070 
1072  if (!Int128Decl)
1073  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1074  return Int128Decl;
1075 }
1076 
1078  if (!UInt128Decl)
1079  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1080  return UInt128Decl;
1081 }
1082 
1083 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1084  auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1085  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1086  Types.push_back(Ty);
1087 }
1088 
1090  const TargetInfo *AuxTarget) {
1091  assert((!this->Target || this->Target == &Target) &&
1092  "Incorrect target reinitialization");
1093  assert(VoidTy.isNull() && "Context reinitialized?");
1094 
1095  this->Target = &Target;
1096  this->AuxTarget = AuxTarget;
1097 
1098  ABI.reset(createCXXABI(Target));
1099  AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1100  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1101 
1102  // C99 6.2.5p19.
1103  InitBuiltinType(VoidTy, BuiltinType::Void);
1104 
1105  // C99 6.2.5p2.
1106  InitBuiltinType(BoolTy, BuiltinType::Bool);
1107  // C99 6.2.5p3.
1108  if (LangOpts.CharIsSigned)
1109  InitBuiltinType(CharTy, BuiltinType::Char_S);
1110  else
1111  InitBuiltinType(CharTy, BuiltinType::Char_U);
1112  // C99 6.2.5p4.
1113  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1114  InitBuiltinType(ShortTy, BuiltinType::Short);
1115  InitBuiltinType(IntTy, BuiltinType::Int);
1116  InitBuiltinType(LongTy, BuiltinType::Long);
1117  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1118 
1119  // C99 6.2.5p6.
1120  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1121  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1122  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1123  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1124  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1125 
1126  // C99 6.2.5p10.
1127  InitBuiltinType(FloatTy, BuiltinType::Float);
1128  InitBuiltinType(DoubleTy, BuiltinType::Double);
1129  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1130 
1131  // GNU extension, __float128 for IEEE quadruple precision
1132  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1133 
1134  // C11 extension ISO/IEC TS 18661-3
1135  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1136 
1137  // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1138  InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1139  InitBuiltinType(AccumTy, BuiltinType::Accum);
1140  InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1141  InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1142  InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1143  InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1144  InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1145  InitBuiltinType(FractTy, BuiltinType::Fract);
1146  InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1147  InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1148  InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1149  InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1150  InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1151  InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1152  InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1153  InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1154  InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1155  InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1156  InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1157  InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1158  InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1159  InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1160  InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1161  InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1162 
1163  // GNU extension, 128-bit integers.
1164  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1165  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1166 
1167  // C++ 3.9.1p5
1168  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1169  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1170  else // -fshort-wchar makes wchar_t be unsigned.
1171  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1172  if (LangOpts.CPlusPlus && LangOpts.WChar)
1173  WideCharTy = WCharTy;
1174  else {
1175  // C99 (or C++ using -fno-wchar).
1176  WideCharTy = getFromTargetType(Target.getWCharType());
1177  }
1178 
1179  WIntTy = getFromTargetType(Target.getWIntType());
1180 
1181  // C++20 (proposed)
1182  InitBuiltinType(Char8Ty, BuiltinType::Char8);
1183 
1184  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1185  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1186  else // C99
1187  Char16Ty = getFromTargetType(Target.getChar16Type());
1188 
1189  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1190  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1191  else // C99
1192  Char32Ty = getFromTargetType(Target.getChar32Type());
1193 
1194  // Placeholder type for type-dependent expressions whose type is
1195  // completely unknown. No code should ever check a type against
1196  // DependentTy and users should never see it; however, it is here to
1197  // help diagnose failures to properly check for type-dependent
1198  // expressions.
1199  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1200 
1201  // Placeholder type for functions.
1202  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1203 
1204  // Placeholder type for bound members.
1205  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1206 
1207  // Placeholder type for pseudo-objects.
1208  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1209 
1210  // "any" type; useful for debugger-like clients.
1211  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1212 
1213  // Placeholder type for unbridged ARC casts.
1214  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1215 
1216  // Placeholder type for builtin functions.
1217  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1218 
1219  // Placeholder type for OMP array sections.
1220  if (LangOpts.OpenMP)
1221  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1222 
1223  // C99 6.2.5p11.
1228 
1229  // Builtin types for 'id', 'Class', and 'SEL'.
1230  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1231  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1232  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1233 
1234  if (LangOpts.OpenCL) {
1235 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1236  InitBuiltinType(SingletonId, BuiltinType::Id);
1237 #include "clang/Basic/OpenCLImageTypes.def"
1238 
1239  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1240  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1241  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1242  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1243  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1244  }
1245 
1246  // Builtin type for __objc_yes and __objc_no
1247  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1248  SignedCharTy : BoolTy);
1249 
1250  ObjCConstantStringType = QualType();
1251 
1252  ObjCSuperType = QualType();
1253 
1254  // void * type
1255  if (LangOpts.OpenCLVersion >= 200) {
1256  auto Q = VoidTy.getQualifiers();
1260  } else {
1262  }
1263 
1264  // nullptr type (C++0x 2.14.7)
1265  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1266 
1267  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1268  InitBuiltinType(HalfTy, BuiltinType::Half);
1269 
1270  // Builtin type used to help define __builtin_va_list.
1271  VaListTagDecl = nullptr;
1272 }
1273 
1275  return SourceMgr.getDiagnostics();
1276 }
1277 
1279  AttrVec *&Result = DeclAttrs[D];
1280  if (!Result) {
1281  void *Mem = Allocate(sizeof(AttrVec));
1282  Result = new (Mem) AttrVec;
1283  }
1284 
1285  return *Result;
1286 }
1287 
1288 /// Erase the attributes corresponding to the given declaration.
1290  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1291  if (Pos != DeclAttrs.end()) {
1292  Pos->second->~AttrVec();
1293  DeclAttrs.erase(Pos);
1294  }
1295 }
1296 
1297 // FIXME: Remove ?
1300  assert(Var->isStaticDataMember() && "Not a static data member");
1302  .dyn_cast<MemberSpecializationInfo *>();
1303 }
1304 
1307  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1308  TemplateOrInstantiation.find(Var);
1309  if (Pos == TemplateOrInstantiation.end())
1310  return {};
1311 
1312  return Pos->second;
1313 }
1314 
1315 void
1318  SourceLocation PointOfInstantiation) {
1319  assert(Inst->isStaticDataMember() && "Not a static data member");
1320  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1322  Tmpl, TSK, PointOfInstantiation));
1323 }
1324 
1325 void
1328  assert(!TemplateOrInstantiation[Inst] &&
1329  "Already noted what the variable was instantiated from");
1330  TemplateOrInstantiation[Inst] = TSI;
1331 }
1332 
1334  const FunctionDecl *FD){
1335  assert(FD && "Specialization is 0");
1336  llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
1337  = ClassScopeSpecializationPattern.find(FD);
1338  if (Pos == ClassScopeSpecializationPattern.end())
1339  return nullptr;
1340 
1341  return Pos->second;
1342 }
1343 
1345  FunctionDecl *Pattern) {
1346  assert(FD && "Specialization is 0");
1347  assert(Pattern && "Class scope specialization pattern is 0");
1348  ClassScopeSpecializationPattern[FD] = Pattern;
1349 }
1350 
1351 NamedDecl *
1353  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1354  if (Pos == InstantiatedFromUsingDecl.end())
1355  return nullptr;
1356 
1357  return Pos->second;
1358 }
1359 
1360 void
1362  assert((isa<UsingDecl>(Pattern) ||
1363  isa<UnresolvedUsingValueDecl>(Pattern) ||
1364  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1365  "pattern decl is not a using decl");
1366  assert((isa<UsingDecl>(Inst) ||
1367  isa<UnresolvedUsingValueDecl>(Inst) ||
1368  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1369  "instantiation did not produce a using decl");
1370  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1371  InstantiatedFromUsingDecl[Inst] = Pattern;
1372 }
1373 
1376  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1377  = InstantiatedFromUsingShadowDecl.find(Inst);
1378  if (Pos == InstantiatedFromUsingShadowDecl.end())
1379  return nullptr;
1380 
1381  return Pos->second;
1382 }
1383 
1384 void
1386  UsingShadowDecl *Pattern) {
1387  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1388  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1389 }
1390 
1392  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1393  = InstantiatedFromUnnamedFieldDecl.find(Field);
1394  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1395  return nullptr;
1396 
1397  return Pos->second;
1398 }
1399 
1401  FieldDecl *Tmpl) {
1402  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1403  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1404  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1405  "Already noted what unnamed field was instantiated from");
1406 
1407  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1408 }
1409 
1412  return overridden_methods(Method).begin();
1413 }
1414 
1417  return overridden_methods(Method).end();
1418 }
1419 
1420 unsigned
1422  auto Range = overridden_methods(Method);
1423  return Range.end() - Range.begin();
1424 }
1425 
1428  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1429  OverriddenMethods.find(Method->getCanonicalDecl());
1430  if (Pos == OverriddenMethods.end())
1431  return overridden_method_range(nullptr, nullptr);
1432  return overridden_method_range(Pos->second.begin(), Pos->second.end());
1433 }
1434 
1436  const CXXMethodDecl *Overridden) {
1437  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1438  OverriddenMethods[Method].push_back(Overridden);
1439 }
1440 
1442  const NamedDecl *D,
1443  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1444  assert(D);
1445 
1446  if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1447  Overridden.append(overridden_methods_begin(CXXMethod),
1448  overridden_methods_end(CXXMethod));
1449  return;
1450  }
1451 
1452  const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1453  if (!Method)
1454  return;
1455 
1457  Method->getOverriddenMethods(OverDecls);
1458  Overridden.append(OverDecls.begin(), OverDecls.end());
1459 }
1460 
1462  assert(!Import->NextLocalImport && "Import declaration already in the chain");
1463  assert(!Import->isFromASTFile() && "Non-local import declaration");
1464  if (!FirstLocalImport) {
1465  FirstLocalImport = Import;
1466  LastLocalImport = Import;
1467  return;
1468  }
1469 
1470  LastLocalImport->NextLocalImport = Import;
1471  LastLocalImport = Import;
1472 }
1473 
1474 //===----------------------------------------------------------------------===//
1475 // Type Sizing and Analysis
1476 //===----------------------------------------------------------------------===//
1477 
1478 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1479 /// scalar floating point type.
1480 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1481  const auto *BT = T->getAs<BuiltinType>();
1482  assert(BT && "Not a floating point type!");
1483  switch (BT->getKind()) {
1484  default: llvm_unreachable("Not a floating point type!");
1485  case BuiltinType::Float16:
1486  case BuiltinType::Half:
1487  return Target->getHalfFormat();
1488  case BuiltinType::Float: return Target->getFloatFormat();
1489  case BuiltinType::Double: return Target->getDoubleFormat();
1490  case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
1491  case BuiltinType::Float128: return Target->getFloat128Format();
1492  }
1493 }
1494 
1495 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1496  unsigned Align = Target->getCharWidth();
1497 
1498  bool UseAlignAttrOnly = false;
1499  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1500  Align = AlignFromAttr;
1501 
1502  // __attribute__((aligned)) can increase or decrease alignment
1503  // *except* on a struct or struct member, where it only increases
1504  // alignment unless 'packed' is also specified.
1505  //
1506  // It is an error for alignas to decrease alignment, so we can
1507  // ignore that possibility; Sema should diagnose it.
1508  if (isa<FieldDecl>(D)) {
1509  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1510  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1511  } else {
1512  UseAlignAttrOnly = true;
1513  }
1514  }
1515  else if (isa<FieldDecl>(D))
1516  UseAlignAttrOnly =
1517  D->hasAttr<PackedAttr>() ||
1518  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1519 
1520  // If we're using the align attribute only, just ignore everything
1521  // else about the declaration and its type.
1522  if (UseAlignAttrOnly) {
1523  // do nothing
1524  } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1525  QualType T = VD->getType();
1526  if (const auto *RT = T->getAs<ReferenceType>()) {
1527  if (ForAlignof)
1528  T = RT->getPointeeType();
1529  else
1530  T = getPointerType(RT->getPointeeType());
1531  }
1532  QualType BaseT = getBaseElementType(T);
1533  if (T->isFunctionType())
1534  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1535  else if (!BaseT->isIncompleteType()) {
1536  // Adjust alignments of declarations with array type by the
1537  // large-array alignment on the target.
1538  if (const ArrayType *arrayType = getAsArrayType(T)) {
1539  unsigned MinWidth = Target->getLargeArrayMinWidth();
1540  if (!ForAlignof && MinWidth) {
1541  if (isa<VariableArrayType>(arrayType))
1542  Align = std::max(Align, Target->getLargeArrayAlign());
1543  else if (isa<ConstantArrayType>(arrayType) &&
1544  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1545  Align = std::max(Align, Target->getLargeArrayAlign());
1546  }
1547  }
1548  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1549  if (BaseT.getQualifiers().hasUnaligned())
1550  Align = Target->getCharWidth();
1551  if (const auto *VD = dyn_cast<VarDecl>(D)) {
1552  if (VD->hasGlobalStorage() && !ForAlignof)
1553  Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
1554  }
1555  }
1556 
1557  // Fields can be subject to extra alignment constraints, like if
1558  // the field is packed, the struct is packed, or the struct has a
1559  // a max-field-alignment constraint (#pragma pack). So calculate
1560  // the actual alignment of the field within the struct, and then
1561  // (as we're expected to) constrain that by the alignment of the type.
1562  if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1563  const RecordDecl *Parent = Field->getParent();
1564  // We can only produce a sensible answer if the record is valid.
1565  if (!Parent->isInvalidDecl()) {
1566  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1567 
1568  // Start with the record's overall alignment.
1569  unsigned FieldAlign = toBits(Layout.getAlignment());
1570 
1571  // Use the GCD of that and the offset within the record.
1572  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1573  if (Offset > 0) {
1574  // Alignment is always a power of 2, so the GCD will be a power of 2,
1575  // which means we get to do this crazy thing instead of Euclid's.
1576  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1577  if (LowBitOfOffset < FieldAlign)
1578  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1579  }
1580 
1581  Align = std::min(Align, FieldAlign);
1582  }
1583  }
1584  }
1585 
1586  return toCharUnitsFromBits(Align);
1587 }
1588 
1589 // getTypeInfoDataSizeInChars - Return the size of a type, in
1590 // chars. If the type is a record, its data size is returned. This is
1591 // the size of the memcpy that's performed when assigning this type
1592 // using a trivial copy/move assignment operator.
1593 std::pair<CharUnits, CharUnits>
1595  std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
1596 
1597  // In C++, objects can sometimes be allocated into the tail padding
1598  // of a base-class subobject. We decide whether that's possible
1599  // during class layout, so here we can just trust the layout results.
1600  if (getLangOpts().CPlusPlus) {
1601  if (const auto *RT = T->getAs<RecordType>()) {
1602  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1603  sizeAndAlign.first = layout.getDataSize();
1604  }
1605  }
1606 
1607  return sizeAndAlign;
1608 }
1609 
1610 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1611 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1612 std::pair<CharUnits, CharUnits>
1614  const ConstantArrayType *CAT) {
1615  std::pair<CharUnits, CharUnits> EltInfo =
1616  Context.getTypeInfoInChars(CAT->getElementType());
1617  uint64_t Size = CAT->getSize().getZExtValue();
1618  assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
1619  (uint64_t)(-1)/Size) &&
1620  "Overflow in array type char size evaluation");
1621  uint64_t Width = EltInfo.first.getQuantity() * Size;
1622  unsigned Align = EltInfo.second.getQuantity();
1623  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1624  Context.getTargetInfo().getPointerWidth(0) == 64)
1625  Width = llvm::alignTo(Width, Align);
1626  return std::make_pair(CharUnits::fromQuantity(Width),
1627  CharUnits::fromQuantity(Align));
1628 }
1629 
1630 std::pair<CharUnits, CharUnits>
1632  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1633  return getConstantArrayInfoInChars(*this, CAT);
1634  TypeInfo Info = getTypeInfo(T);
1635  return std::make_pair(toCharUnitsFromBits(Info.Width),
1636  toCharUnitsFromBits(Info.Align));
1637 }
1638 
1639 std::pair<CharUnits, CharUnits>
1641  return getTypeInfoInChars(T.getTypePtr());
1642 }
1643 
1645  return getTypeInfo(T).AlignIsRequired;
1646 }
1647 
1649  return isAlignmentRequired(T.getTypePtr());
1650 }
1651 
1653  // An alignment on a typedef overrides anything else.
1654  if (const auto *TT = T->getAs<TypedefType>())
1655  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1656  return Align;
1657 
1658  // If we have an (array of) complete type, we're done.
1659  T = getBaseElementType(T);
1660  if (!T->isIncompleteType())
1661  return getTypeAlign(T);
1662 
1663  // If we had an array type, its element type might be a typedef
1664  // type with an alignment attribute.
1665  if (const auto *TT = T->getAs<TypedefType>())
1666  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1667  return Align;
1668 
1669  // Otherwise, see if the declaration of the type had an attribute.
1670  if (const auto *TT = T->getAs<TagType>())
1671  return TT->getDecl()->getMaxAlignment();
1672 
1673  return 0;
1674 }
1675 
1677  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1678  if (I != MemoizedTypeInfo.end())
1679  return I->second;
1680 
1681  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1682  TypeInfo TI = getTypeInfoImpl(T);
1683  MemoizedTypeInfo[T] = TI;
1684  return TI;
1685 }
1686 
1687 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1688 /// method does not work on incomplete types.
1689 ///
1690 /// FIXME: Pointers into different addr spaces could have different sizes and
1691 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1692 /// should take a QualType, &c.
1693 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1694  uint64_t Width = 0;
1695  unsigned Align = 8;
1696  bool AlignIsRequired = false;
1697  unsigned AS = 0;
1698  switch (T->getTypeClass()) {
1699 #define TYPE(Class, Base)
1700 #define ABSTRACT_TYPE(Class, Base)
1701 #define NON_CANONICAL_TYPE(Class, Base)
1702 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1703 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1704  case Type::Class: \
1705  assert(!T->isDependentType() && "should not see dependent types here"); \
1706  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1707 #include "clang/AST/TypeNodes.def"
1708  llvm_unreachable("Should not see dependent types");
1709 
1710  case Type::FunctionNoProto:
1711  case Type::FunctionProto:
1712  // GCC extension: alignof(function) = 32 bits
1713  Width = 0;
1714  Align = 32;
1715  break;
1716 
1717  case Type::IncompleteArray:
1718  case Type::VariableArray:
1719  Width = 0;
1720  Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
1721  break;
1722 
1723  case Type::ConstantArray: {
1724  const auto *CAT = cast<ConstantArrayType>(T);
1725 
1726  TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
1727  uint64_t Size = CAT->getSize().getZExtValue();
1728  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1729  "Overflow in array type bit size evaluation");
1730  Width = EltInfo.Width * Size;
1731  Align = EltInfo.Align;
1732  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1733  getTargetInfo().getPointerWidth(0) == 64)
1734  Width = llvm::alignTo(Width, Align);
1735  break;
1736  }
1737  case Type::ExtVector:
1738  case Type::Vector: {
1739  const auto *VT = cast<VectorType>(T);
1740  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1741  Width = EltInfo.Width * VT->getNumElements();
1742  Align = Width;
1743  // If the alignment is not a power of 2, round up to the next power of 2.
1744  // This happens for non-power-of-2 length vectors.
1745  if (Align & (Align-1)) {
1746  Align = llvm::NextPowerOf2(Align);
1747  Width = llvm::alignTo(Width, Align);
1748  }
1749  // Adjust the alignment based on the target max.
1750  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1751  if (TargetVectorAlign && TargetVectorAlign < Align)
1752  Align = TargetVectorAlign;
1753  break;
1754  }
1755 
1756  case Type::Builtin:
1757  switch (cast<BuiltinType>(T)->getKind()) {
1758  default: llvm_unreachable("Unknown builtin type!");
1759  case BuiltinType::Void:
1760  // GCC extension: alignof(void) = 8 bits.
1761  Width = 0;
1762  Align = 8;
1763  break;
1764  case BuiltinType::Bool:
1765  Width = Target->getBoolWidth();
1766  Align = Target->getBoolAlign();
1767  break;
1768  case BuiltinType::Char_S:
1769  case BuiltinType::Char_U:
1770  case BuiltinType::UChar:
1771  case BuiltinType::SChar:
1772  case BuiltinType::Char8:
1773  Width = Target->getCharWidth();
1774  Align = Target->getCharAlign();
1775  break;
1776  case BuiltinType::WChar_S:
1777  case BuiltinType::WChar_U:
1778  Width = Target->getWCharWidth();
1779  Align = Target->getWCharAlign();
1780  break;
1781  case BuiltinType::Char16:
1782  Width = Target->getChar16Width();
1783  Align = Target->getChar16Align();
1784  break;
1785  case BuiltinType::Char32:
1786  Width = Target->getChar32Width();
1787  Align = Target->getChar32Align();
1788  break;
1789  case BuiltinType::UShort:
1790  case BuiltinType::Short:
1791  Width = Target->getShortWidth();
1792  Align = Target->getShortAlign();
1793  break;
1794  case BuiltinType::UInt:
1795  case BuiltinType::Int:
1796  Width = Target->getIntWidth();
1797  Align = Target->getIntAlign();
1798  break;
1799  case BuiltinType::ULong:
1800  case BuiltinType::Long:
1801  Width = Target->getLongWidth();
1802  Align = Target->getLongAlign();
1803  break;
1804  case BuiltinType::ULongLong:
1805  case BuiltinType::LongLong:
1806  Width = Target->getLongLongWidth();
1807  Align = Target->getLongLongAlign();
1808  break;
1809  case BuiltinType::Int128:
1810  case BuiltinType::UInt128:
1811  Width = 128;
1812  Align = 128; // int128_t is 128-bit aligned on all targets.
1813  break;
1814  case BuiltinType::ShortAccum:
1815  case BuiltinType::UShortAccum:
1816  case BuiltinType::SatShortAccum:
1817  case BuiltinType::SatUShortAccum:
1818  Width = Target->getShortAccumWidth();
1819  Align = Target->getShortAccumAlign();
1820  break;
1821  case BuiltinType::Accum:
1822  case BuiltinType::UAccum:
1823  case BuiltinType::SatAccum:
1824  case BuiltinType::SatUAccum:
1825  Width = Target->getAccumWidth();
1826  Align = Target->getAccumAlign();
1827  break;
1828  case BuiltinType::LongAccum:
1829  case BuiltinType::ULongAccum:
1830  case BuiltinType::SatLongAccum:
1831  case BuiltinType::SatULongAccum:
1832  Width = Target->getLongAccumWidth();
1833  Align = Target->getLongAccumAlign();
1834  break;
1835  case BuiltinType::ShortFract:
1836  case BuiltinType::UShortFract:
1837  case BuiltinType::SatShortFract:
1838  case BuiltinType::SatUShortFract:
1839  Width = Target->getShortFractWidth();
1840  Align = Target->getShortFractAlign();
1841  break;
1842  case BuiltinType::Fract:
1843  case BuiltinType::UFract:
1844  case BuiltinType::SatFract:
1845  case BuiltinType::SatUFract:
1846  Width = Target->getFractWidth();
1847  Align = Target->getFractAlign();
1848  break;
1849  case BuiltinType::LongFract:
1850  case BuiltinType::ULongFract:
1851  case BuiltinType::SatLongFract:
1852  case BuiltinType::SatULongFract:
1853  Width = Target->getLongFractWidth();
1854  Align = Target->getLongFractAlign();
1855  break;
1856  case BuiltinType::Float16:
1857  case BuiltinType::Half:
1858  Width = Target->getHalfWidth();
1859  Align = Target->getHalfAlign();
1860  break;
1861  case BuiltinType::Float:
1862  Width = Target->getFloatWidth();
1863  Align = Target->getFloatAlign();
1864  break;
1865  case BuiltinType::Double:
1866  Width = Target->getDoubleWidth();
1867  Align = Target->getDoubleAlign();
1868  break;
1869  case BuiltinType::LongDouble:
1870  Width = Target->getLongDoubleWidth();
1871  Align = Target->getLongDoubleAlign();
1872  break;
1873  case BuiltinType::Float128:
1874  Width = Target->getFloat128Width();
1875  Align = Target->getFloat128Align();
1876  break;
1877  case BuiltinType::NullPtr:
1878  Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
1879  Align = Target->getPointerAlign(0); // == sizeof(void*)
1880  break;
1881  case BuiltinType::ObjCId:
1882  case BuiltinType::ObjCClass:
1883  case BuiltinType::ObjCSel:
1884  Width = Target->getPointerWidth(0);
1885  Align = Target->getPointerAlign(0);
1886  break;
1887  case BuiltinType::OCLSampler:
1888  case BuiltinType::OCLEvent:
1889  case BuiltinType::OCLClkEvent:
1890  case BuiltinType::OCLQueue:
1891  case BuiltinType::OCLReserveID:
1892 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1893  case BuiltinType::Id:
1894 #include "clang/Basic/OpenCLImageTypes.def"
1895  AS = getTargetAddressSpace(
1897  Width = Target->getPointerWidth(AS);
1898  Align = Target->getPointerAlign(AS);
1899  break;
1900  }
1901  break;
1902  case Type::ObjCObjectPointer:
1903  Width = Target->getPointerWidth(0);
1904  Align = Target->getPointerAlign(0);
1905  break;
1906  case Type::BlockPointer:
1907  AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
1908  Width = Target->getPointerWidth(AS);
1909  Align = Target->getPointerAlign(AS);
1910  break;
1911  case Type::LValueReference:
1912  case Type::RValueReference:
1913  // alignof and sizeof should never enter this code path here, so we go
1914  // the pointer route.
1915  AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
1916  Width = Target->getPointerWidth(AS);
1917  Align = Target->getPointerAlign(AS);
1918  break;
1919  case Type::Pointer:
1920  AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
1921  Width = Target->getPointerWidth(AS);
1922  Align = Target->getPointerAlign(AS);
1923  break;
1924  case Type::MemberPointer: {
1925  const auto *MPT = cast<MemberPointerType>(T);
1926  CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
1927  Width = MPI.Width;
1928  Align = MPI.Align;
1929  break;
1930  }
1931  case Type::Complex: {
1932  // Complex types have the same alignment as their elements, but twice the
1933  // size.
1934  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
1935  Width = EltInfo.Width * 2;
1936  Align = EltInfo.Align;
1937  break;
1938  }
1939  case Type::ObjCObject:
1940  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
1941  case Type::Adjusted:
1942  case Type::Decayed:
1943  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
1944  case Type::ObjCInterface: {
1945  const auto *ObjCI = cast<ObjCInterfaceType>(T);
1946  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
1947  Width = toBits(Layout.getSize());
1948  Align = toBits(Layout.getAlignment());
1949  break;
1950  }
1951  case Type::Record:
1952  case Type::Enum: {
1953  const auto *TT = cast<TagType>(T);
1954 
1955  if (TT->getDecl()->isInvalidDecl()) {
1956  Width = 8;
1957  Align = 8;
1958  break;
1959  }
1960 
1961  if (const auto *ET = dyn_cast<EnumType>(TT)) {
1962  const EnumDecl *ED = ET->getDecl();
1963  TypeInfo Info =
1965  if (unsigned AttrAlign = ED->getMaxAlignment()) {
1966  Info.Align = AttrAlign;
1967  Info.AlignIsRequired = true;
1968  }
1969  return Info;
1970  }
1971 
1972  const auto *RT = cast<RecordType>(TT);
1973  const RecordDecl *RD = RT->getDecl();
1974  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
1975  Width = toBits(Layout.getSize());
1976  Align = toBits(Layout.getAlignment());
1977  AlignIsRequired = RD->hasAttr<AlignedAttr>();
1978  break;
1979  }
1980 
1981  case Type::SubstTemplateTypeParm:
1982  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
1983  getReplacementType().getTypePtr());
1984 
1985  case Type::Auto:
1986  case Type::DeducedTemplateSpecialization: {
1987  const auto *A = cast<DeducedType>(T);
1988  assert(!A->getDeducedType().isNull() &&
1989  "cannot request the size of an undeduced or dependent auto type");
1990  return getTypeInfo(A->getDeducedType().getTypePtr());
1991  }
1992 
1993  case Type::Paren:
1994  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
1995 
1996  case Type::ObjCTypeParam:
1997  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
1998 
1999  case Type::Typedef: {
2000  const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
2001  TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
2002  // If the typedef has an aligned attribute on it, it overrides any computed
2003  // alignment we have. This violates the GCC documentation (which says that
2004  // attribute(aligned) can only round up) but matches its implementation.
2005  if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
2006  Align = AttrAlign;
2007  AlignIsRequired = true;
2008  } else {
2009  Align = Info.Align;
2010  AlignIsRequired = Info.AlignIsRequired;
2011  }
2012  Width = Info.Width;
2013  break;
2014  }
2015 
2016  case Type::Elaborated:
2017  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2018 
2019  case Type::Attributed:
2020  return getTypeInfo(
2021  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2022 
2023  case Type::Atomic: {
2024  // Start with the base type information.
2025  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2026  Width = Info.Width;
2027  Align = Info.Align;
2028 
2029  if (!Width) {
2030  // An otherwise zero-sized type should still generate an
2031  // atomic operation.
2032  Width = Target->getCharWidth();
2033  assert(Align);
2034  } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2035  // If the size of the type doesn't exceed the platform's max
2036  // atomic promotion width, make the size and alignment more
2037  // favorable to atomic operations:
2038 
2039  // Round the size up to a power of 2.
2040  if (!llvm::isPowerOf2_64(Width))
2041  Width = llvm::NextPowerOf2(Width);
2042 
2043  // Set the alignment equal to the size.
2044  Align = static_cast<unsigned>(Width);
2045  }
2046  }
2047  break;
2048 
2049  case Type::Pipe:
2052  break;
2053  }
2054 
2055  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2056  return TypeInfo(Width, Align, AlignIsRequired);
2057 }
2058 
2060  unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
2061  // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
2062  if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
2063  getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
2064  getTargetInfo().getABI() == "elfv1-qpx" &&
2065  T->isSpecificBuiltinType(BuiltinType::Double))
2066  SimdAlign = 256;
2067  return SimdAlign;
2068 }
2069 
2070 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2072  return CharUnits::fromQuantity(BitSize / getCharWidth());
2073 }
2074 
2075 /// toBits - Convert a size in characters to a size in characters.
2076 int64_t ASTContext::toBits(CharUnits CharSize) const {
2077  return CharSize.getQuantity() * getCharWidth();
2078 }
2079 
2080 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2081 /// This method does not work on incomplete types.
2083  return getTypeInfoInChars(T).first;
2084 }
2086  return getTypeInfoInChars(T).first;
2087 }
2088 
2089 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2090 /// characters. This method does not work on incomplete types.
2092  return toCharUnitsFromBits(getTypeAlign(T));
2093 }
2095  return toCharUnitsFromBits(getTypeAlign(T));
2096 }
2097 
2098 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2099 /// type for the current target in bits. This can be different than the ABI
2100 /// alignment in cases where it is beneficial for performance to overalign
2101 /// a data type.
2102 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2103  TypeInfo TI = getTypeInfo(T);
2104  unsigned ABIAlign = TI.Align;
2105 
2106  T = T->getBaseElementTypeUnsafe();
2107 
2108  // The preferred alignment of member pointers is that of a pointer.
2109  if (T->isMemberPointerType())
2111 
2112  if (!Target->allowsLargerPreferedTypeAlignment())
2113  return ABIAlign;
2114 
2115  // Double and long long should be naturally aligned if possible.
2116  if (const auto *CT = T->getAs<ComplexType>())
2117  T = CT->getElementType().getTypePtr();
2118  if (const auto *ET = T->getAs<EnumType>())
2119  T = ET->getDecl()->getIntegerType().getTypePtr();
2120  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2121  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2122  T->isSpecificBuiltinType(BuiltinType::ULongLong))
2123  // Don't increase the alignment if an alignment attribute was specified on a
2124  // typedef declaration.
2125  if (!TI.AlignIsRequired)
2126  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2127 
2128  return ABIAlign;
2129 }
2130 
2131 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2132 /// for __attribute__((aligned)) on this target, to be used if no alignment
2133 /// value is specified.
2136 }
2137 
2138 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2139 /// to a global variable of the specified type.
2141  return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign());
2142 }
2143 
2144 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2145 /// should be given to a global variable of the specified type.
2148 }
2149 
2152  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2153  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2154  Offset += Layout->getBaseClassOffset(Base);
2155  Layout = &getASTRecordLayout(Base);
2156  }
2157  return Offset;
2158 }
2159 
2160 /// DeepCollectObjCIvars -
2161 /// This routine first collects all declared, but not synthesized, ivars in
2162 /// super class and then collects all ivars, including those synthesized for
2163 /// current class. This routine is used for implementation of current class
2164 /// when all ivars, declared and synthesized are known.
2166  bool leafClass,
2167  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2168  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2169  DeepCollectObjCIvars(SuperClass, false, Ivars);
2170  if (!leafClass) {
2171  for (const auto *I : OI->ivars())
2172  Ivars.push_back(I);
2173  } else {
2174  auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2175  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2176  Iv= Iv->getNextIvar())
2177  Ivars.push_back(Iv);
2178  }
2179 }
2180 
2181 /// CollectInheritedProtocols - Collect all protocols in current class and
2182 /// those inherited by it.
2184  llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2185  if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2186  // We can use protocol_iterator here instead of
2187  // all_referenced_protocol_iterator since we are walking all categories.
2188  for (auto *Proto : OI->all_referenced_protocols()) {
2189  CollectInheritedProtocols(Proto, Protocols);
2190  }
2191 
2192  // Categories of this Interface.
2193  for (const auto *Cat : OI->visible_categories())
2194  CollectInheritedProtocols(Cat, Protocols);
2195 
2196  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2197  while (SD) {
2198  CollectInheritedProtocols(SD, Protocols);
2199  SD = SD->getSuperClass();
2200  }
2201  } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2202  for (auto *Proto : OC->protocols()) {
2203  CollectInheritedProtocols(Proto, Protocols);
2204  }
2205  } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2206  // Insert the protocol.
2207  if (!Protocols.insert(
2208  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2209  return;
2210 
2211  for (auto *Proto : OP->protocols())
2212  CollectInheritedProtocols(Proto, Protocols);
2213  }
2214 }
2215 
2217  const RecordDecl *RD) {
2218  assert(RD->isUnion() && "Must be union type");
2219  CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2220 
2221  for (const auto *Field : RD->fields()) {
2222  if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2223  return false;
2224  CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2225  if (FieldSize != UnionSize)
2226  return false;
2227  }
2228  return !RD->field_empty();
2229 }
2230 
2231 static bool isStructEmpty(QualType Ty) {
2232  const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
2233 
2234  if (!RD->field_empty())
2235  return false;
2236 
2237  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
2238  return ClassDecl->isEmpty();
2239 
2240  return true;
2241 }
2242 
2245  const RecordDecl *RD) {
2246  assert(!RD->isUnion() && "Must be struct/class type");
2247  const auto &Layout = Context.getASTRecordLayout(RD);
2248 
2249  int64_t CurOffsetInBits = 0;
2250  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2251  if (ClassDecl->isDynamicClass())
2252  return llvm::None;
2253 
2255  for (const auto Base : ClassDecl->bases()) {
2256  // Empty types can be inherited from, and non-empty types can potentially
2257  // have tail padding, so just make sure there isn't an error.
2258  if (!isStructEmpty(Base.getType())) {
2260  Context, Base.getType()->getAs<RecordType>()->getDecl());
2261  if (!Size)
2262  return llvm::None;
2263  Bases.emplace_back(Base.getType(), Size.getValue());
2264  }
2265  }
2266 
2267  llvm::sort(
2268  Bases.begin(), Bases.end(), [&](const std::pair<QualType, int64_t> &L,
2269  const std::pair<QualType, int64_t> &R) {
2270  return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
2271  Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
2272  });
2273 
2274  for (const auto Base : Bases) {
2275  int64_t BaseOffset = Context.toBits(
2276  Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl()));
2277  int64_t BaseSize = Base.second;
2278  if (BaseOffset != CurOffsetInBits)
2279  return llvm::None;
2280  CurOffsetInBits = BaseOffset + BaseSize;
2281  }
2282  }
2283 
2284  for (const auto *Field : RD->fields()) {
2285  if (!Field->getType()->isReferenceType() &&
2286  !Context.hasUniqueObjectRepresentations(Field->getType()))
2287  return llvm::None;
2288 
2289  int64_t FieldSizeInBits =
2290  Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2291  if (Field->isBitField()) {
2292  int64_t BitfieldSize = Field->getBitWidthValue(Context);
2293 
2294  if (BitfieldSize > FieldSizeInBits)
2295  return llvm::None;
2296  FieldSizeInBits = BitfieldSize;
2297  }
2298 
2299  int64_t FieldOffsetInBits = Context.getFieldOffset(Field);
2300 
2301  if (FieldOffsetInBits != CurOffsetInBits)
2302  return llvm::None;
2303 
2304  CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits;
2305  }
2306 
2307  return CurOffsetInBits;
2308 }
2309 
2311  // C++17 [meta.unary.prop]:
2312  // The predicate condition for a template specialization
2313  // has_unique_object_representations<T> shall be
2314  // satisfied if and only if:
2315  // (9.1) - T is trivially copyable, and
2316  // (9.2) - any two objects of type T with the same value have the same
2317  // object representation, where two objects
2318  // of array or non-union class type are considered to have the same value
2319  // if their respective sequences of
2320  // direct subobjects have the same values, and two objects of union type
2321  // are considered to have the same
2322  // value if they have the same active member and the corresponding members
2323  // have the same value.
2324  // The set of scalar types for which this condition holds is
2325  // implementation-defined. [ Note: If a type has padding
2326  // bits, the condition does not hold; otherwise, the condition holds true
2327  // for unsigned integral types. -- end note ]
2328  assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2329 
2330  // Arrays are unique only if their element type is unique.
2331  if (Ty->isArrayType())
2333 
2334  // (9.1) - T is trivially copyable...
2335  if (!Ty.isTriviallyCopyableType(*this))
2336  return false;
2337 
2338  // All integrals and enums are unique.
2339  if (Ty->isIntegralOrEnumerationType())
2340  return true;
2341 
2342  // All other pointers are unique.
2343  if (Ty->isPointerType())
2344  return true;
2345 
2346  if (Ty->isMemberPointerType()) {
2347  const auto *MPT = Ty->getAs<MemberPointerType>();
2348  return !ABI->getMemberPointerInfo(MPT).HasPadding;
2349  }
2350 
2351  if (Ty->isRecordType()) {
2352  const RecordDecl *Record = Ty->getAs<RecordType>()->getDecl();
2353 
2354  if (Record->isInvalidDecl())
2355  return false;
2356 
2357  if (Record->isUnion())
2358  return unionHasUniqueObjectRepresentations(*this, Record);
2359 
2360  Optional<int64_t> StructSize =
2361  structHasUniqueObjectRepresentations(*this, Record);
2362 
2363  return StructSize &&
2364  StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
2365  }
2366 
2367  // FIXME: More cases to handle here (list by rsmith):
2368  // vectors (careful about, eg, vector of 3 foo)
2369  // _Complex int and friends
2370  // _Atomic T
2371  // Obj-C block pointers
2372  // Obj-C object pointers
2373  // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2374  // clk_event_t, queue_t, reserve_id_t)
2375  // There're also Obj-C class types and the Obj-C selector type, but I think it
2376  // makes sense for those to return false here.
2377 
2378  return false;
2379 }
2380 
2382  unsigned count = 0;
2383  // Count ivars declared in class extension.
2384  for (const auto *Ext : OI->known_extensions())
2385  count += Ext->ivar_size();
2386 
2387  // Count ivar defined in this class's implementation. This
2388  // includes synthesized ivars.
2389  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2390  count += ImplDecl->ivar_size();
2391 
2392  return count;
2393 }
2394 
2396  if (!E)
2397  return false;
2398 
2399  // nullptr_t is always treated as null.
2400  if (E->getType()->isNullPtrType()) return true;
2401 
2402  if (E->getType()->isAnyPointerType() &&
2405  return true;
2406 
2407  // Unfortunately, __null has type 'int'.
2408  if (isa<GNUNullExpr>(E)) return true;
2409 
2410  return false;
2411 }
2412 
2413 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2414 /// exists.
2416  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2417  I = ObjCImpls.find(D);
2418  if (I != ObjCImpls.end())
2419  return cast<ObjCImplementationDecl>(I->second);
2420  return nullptr;
2421 }
2422 
2423 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2424 /// exists.
2426  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2427  I = ObjCImpls.find(D);
2428  if (I != ObjCImpls.end())
2429  return cast<ObjCCategoryImplDecl>(I->second);
2430  return nullptr;
2431 }
2432 
2433 /// Set the implementation of ObjCInterfaceDecl.
2435  ObjCImplementationDecl *ImplD) {
2436  assert(IFaceD && ImplD && "Passed null params");
2437  ObjCImpls[IFaceD] = ImplD;
2438 }
2439 
2440 /// Set the implementation of ObjCCategoryDecl.
2442  ObjCCategoryImplDecl *ImplD) {
2443  assert(CatD && ImplD && "Passed null params");
2444  ObjCImpls[CatD] = ImplD;
2445 }
2446 
2447 const ObjCMethodDecl *
2449  return ObjCMethodRedecls.lookup(MD);
2450 }
2451 
2453  const ObjCMethodDecl *Redecl) {
2454  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2455  ObjCMethodRedecls[MD] = Redecl;
2456 }
2457 
2459  const NamedDecl *ND) const {
2460  if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2461  return ID;
2462  if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2463  return CD->getClassInterface();
2464  if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2465  return IMD->getClassInterface();
2466 
2467  return nullptr;
2468 }
2469 
2470 /// Get the copy initialization expression of VarDecl, or nullptr if
2471 /// none exists.
2473  assert(VD && "Passed null params");
2474  assert(VD->hasAttr<BlocksAttr>() &&
2475  "getBlockVarCopyInits - not __block var");
2476  llvm::DenseMap<const VarDecl*, Expr*>::iterator
2477  I = BlockVarCopyInits.find(VD);
2478  return (I != BlockVarCopyInits.end()) ? I->second : nullptr;
2479 }
2480 
2481 /// Set the copy inialization expression of a block var decl.
2483  assert(VD && Init && "Passed null params");
2484  assert(VD->hasAttr<BlocksAttr>() &&
2485  "setBlockVarCopyInits - not __block var");
2486  BlockVarCopyInits[VD] = Init;
2487 }
2488 
2490  unsigned DataSize) const {
2491  if (!DataSize)
2492  DataSize = TypeLoc::getFullDataSizeForType(T);
2493  else
2494  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2495  "incorrect data size provided to CreateTypeSourceInfo!");
2496 
2497  auto *TInfo =
2498  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2499  new (TInfo) TypeSourceInfo(T);
2500  return TInfo;
2501 }
2502 
2504  SourceLocation L) const {
2506  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2507  return DI;
2508 }
2509 
2510 const ASTRecordLayout &
2512  return getObjCLayout(D, nullptr);
2513 }
2514 
2515 const ASTRecordLayout &
2517  const ObjCImplementationDecl *D) const {
2518  return getObjCLayout(D->getClassInterface(), D);
2519 }
2520 
2521 //===----------------------------------------------------------------------===//
2522 // Type creation/memoization methods
2523 //===----------------------------------------------------------------------===//
2524 
2525 QualType
2526 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2527  unsigned fastQuals = quals.getFastQualifiers();
2528  quals.removeFastQualifiers();
2529 
2530  // Check if we've already instantiated this type.
2531  llvm::FoldingSetNodeID ID;
2532  ExtQuals::Profile(ID, baseType, quals);
2533  void *insertPos = nullptr;
2534  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2535  assert(eq->getQualifiers() == quals);
2536  return QualType(eq, fastQuals);
2537  }
2538 
2539  // If the base type is not canonical, make the appropriate canonical type.
2540  QualType canon;
2541  if (!baseType->isCanonicalUnqualified()) {
2542  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2543  canonSplit.Quals.addConsistentQualifiers(quals);
2544  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
2545 
2546  // Re-find the insert position.
2547  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
2548  }
2549 
2550  auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
2551  ExtQualNodes.InsertNode(eq, insertPos);
2552  return QualType(eq, fastQuals);
2553 }
2554 
2556  LangAS AddressSpace) const {
2557  QualType CanT = getCanonicalType(T);
2558  if (CanT.getAddressSpace() == AddressSpace)
2559  return T;
2560 
2561  // If we are composing extended qualifiers together, merge together
2562  // into one ExtQuals node.
2563  QualifierCollector Quals;
2564  const Type *TypeNode = Quals.strip(T);
2565 
2566  // If this type already has an address space specified, it cannot get
2567  // another one.
2568  assert(!Quals.hasAddressSpace() &&
2569  "Type cannot be in multiple addr spaces!");
2570  Quals.addAddressSpace(AddressSpace);
2571 
2572  return getExtQualType(TypeNode, Quals);
2573 }
2574 
2576  // If we are composing extended qualifiers together, merge together
2577  // into one ExtQuals node.
2578  QualifierCollector Quals;
2579  const Type *TypeNode = Quals.strip(T);
2580 
2581  // If the qualifier doesn't have an address space just return it.
2582  if (!Quals.hasAddressSpace())
2583  return T;
2584 
2585  Quals.removeAddressSpace();
2586 
2587  // Removal of the address space can mean there are no longer any
2588  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
2589  // or required.
2590  if (Quals.hasNonFastQualifiers())
2591  return getExtQualType(TypeNode, Quals);
2592  else
2593  return QualType(TypeNode, Quals.getFastQualifiers());
2594 }
2595 
2597  Qualifiers::GC GCAttr) const {
2598  QualType CanT = getCanonicalType(T);
2599  if (CanT.getObjCGCAttr() == GCAttr)
2600  return T;
2601 
2602  if (const auto *ptr = T->getAs<PointerType>()) {
2603  QualType Pointee = ptr->getPointeeType();
2604  if (Pointee->isAnyPointerType()) {
2605  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
2606  return getPointerType(ResultType);
2607  }
2608  }
2609 
2610  // If we are composing extended qualifiers together, merge together
2611  // into one ExtQuals node.
2612  QualifierCollector Quals;
2613  const Type *TypeNode = Quals.strip(T);
2614 
2615  // If this type already has an ObjCGC specified, it cannot get
2616  // another one.
2617  assert(!Quals.hasObjCGCAttr() &&
2618  "Type cannot have multiple ObjCGCs!");
2619  Quals.addObjCGCAttr(GCAttr);
2620 
2621  return getExtQualType(TypeNode, Quals);
2622 }
2623 
2625  FunctionType::ExtInfo Info) {
2626  if (T->getExtInfo() == Info)
2627  return T;
2628 
2629  QualType Result;
2630  if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
2631  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
2632  } else {
2633  const auto *FPT = cast<FunctionProtoType>(T);
2634  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2635  EPI.ExtInfo = Info;
2636  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
2637  }
2638 
2639  return cast<FunctionType>(Result.getTypePtr());
2640 }
2641 
2643  QualType ResultType) {
2644  FD = FD->getMostRecentDecl();
2645  while (true) {
2646  const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
2647  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2648  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
2649  if (FunctionDecl *Next = FD->getPreviousDecl())
2650  FD = Next;
2651  else
2652  break;
2653  }
2655  L->DeducedReturnType(FD, ResultType);
2656 }
2657 
2658 /// Get a function type and produce the equivalent function type with the
2659 /// specified exception specification. Type sugar that can be present on a
2660 /// declaration of a function with an exception specification is permitted
2661 /// and preserved. Other type sugar (for instance, typedefs) is not.
2664  // Might have some parens.
2665  if (const auto *PT = dyn_cast<ParenType>(Orig))
2666  return getParenType(
2667  getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
2668 
2669  // Might have a calling-convention attribute.
2670  if (const auto *AT = dyn_cast<AttributedType>(Orig))
2671  return getAttributedType(
2672  AT->getAttrKind(),
2673  getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
2674  getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
2675 
2676  // Anything else must be a function type. Rebuild it with the new exception
2677  // specification.
2678  const auto *Proto = cast<FunctionProtoType>(Orig);
2679  return getFunctionType(
2680  Proto->getReturnType(), Proto->getParamTypes(),
2681  Proto->getExtProtoInfo().withExceptionSpec(ESI));
2682 }
2683 
2685  QualType U) {
2686  return hasSameType(T, U) ||
2687  (getLangOpts().CPlusPlus17 &&
2690 }
2691 
2694  bool AsWritten) {
2695  // Update the type.
2696  QualType Updated =
2698  FD->setType(Updated);
2699 
2700  if (!AsWritten)
2701  return;
2702 
2703  // Update the type in the type source information too.
2704  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
2705  // If the type and the type-as-written differ, we may need to update
2706  // the type-as-written too.
2707  if (TSInfo->getType() != FD->getType())
2708  Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
2709 
2710  // FIXME: When we get proper type location information for exceptions,
2711  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
2712  // up the TypeSourceInfo;
2713  assert(TypeLoc::getFullDataSizeForType(Updated) ==
2714  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
2715  "TypeLoc size mismatch from updating exception specification");
2716  TSInfo->overrideType(Updated);
2717  }
2718 }
2719 
2720 /// getComplexType - Return the uniqued reference to the type for a complex
2721 /// number with the specified element type.
2723  // Unique pointers, to guarantee there is only one pointer of a particular
2724  // structure.
2725  llvm::FoldingSetNodeID ID;
2726  ComplexType::Profile(ID, T);
2727 
2728  void *InsertPos = nullptr;
2729  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
2730  return QualType(CT, 0);
2731 
2732  // If the pointee type isn't canonical, this won't be a canonical type either,
2733  // so fill in the canonical type field.
2734  QualType Canonical;
2735  if (!T.isCanonical()) {
2736  Canonical = getComplexType(getCanonicalType(T));
2737 
2738  // Get the new insert position for the node we care about.
2739  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
2740  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2741  }
2742  auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
2743  Types.push_back(New);
2744  ComplexTypes.InsertNode(New, InsertPos);
2745  return QualType(New, 0);
2746 }
2747 
2748 /// getPointerType - Return the uniqued reference to the type for a pointer to
2749 /// the specified type.
2751  // Unique pointers, to guarantee there is only one pointer of a particular
2752  // structure.
2753  llvm::FoldingSetNodeID ID;
2754  PointerType::Profile(ID, T);
2755 
2756  void *InsertPos = nullptr;
2757  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2758  return QualType(PT, 0);
2759 
2760  // If the pointee type isn't canonical, this won't be a canonical type either,
2761  // so fill in the canonical type field.
2762  QualType Canonical;
2763  if (!T.isCanonical()) {
2764  Canonical = getPointerType(getCanonicalType(T));
2765 
2766  // Get the new insert position for the node we care about.
2767  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2768  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2769  }
2770  auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
2771  Types.push_back(New);
2772  PointerTypes.InsertNode(New, InsertPos);
2773  return QualType(New, 0);
2774 }
2775 
2777  llvm::FoldingSetNodeID ID;
2778  AdjustedType::Profile(ID, Orig, New);
2779  void *InsertPos = nullptr;
2780  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2781  if (AT)
2782  return QualType(AT, 0);
2783 
2784  QualType Canonical = getCanonicalType(New);
2785 
2786  // Get the new insert position for the node we care about.
2787  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2788  assert(!AT && "Shouldn't be in the map!");
2789 
2790  AT = new (*this, TypeAlignment)
2791  AdjustedType(Type::Adjusted, Orig, New, Canonical);
2792  Types.push_back(AT);
2793  AdjustedTypes.InsertNode(AT, InsertPos);
2794  return QualType(AT, 0);
2795 }
2796 
2798  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
2799 
2800  QualType Decayed;
2801 
2802  // C99 6.7.5.3p7:
2803  // A declaration of a parameter as "array of type" shall be
2804  // adjusted to "qualified pointer to type", where the type
2805  // qualifiers (if any) are those specified within the [ and ] of
2806  // the array type derivation.
2807  if (T->isArrayType())
2808  Decayed = getArrayDecayedType(T);
2809 
2810  // C99 6.7.5.3p8:
2811  // A declaration of a parameter as "function returning type"
2812  // shall be adjusted to "pointer to function returning type", as
2813  // in 6.3.2.1.
2814  if (T->isFunctionType())
2815  Decayed = getPointerType(T);
2816 
2817  llvm::FoldingSetNodeID ID;
2818  AdjustedType::Profile(ID, T, Decayed);
2819  void *InsertPos = nullptr;
2820  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2821  if (AT)
2822  return QualType(AT, 0);
2823 
2824  QualType Canonical = getCanonicalType(Decayed);
2825 
2826  // Get the new insert position for the node we care about.
2827  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2828  assert(!AT && "Shouldn't be in the map!");
2829 
2830  AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
2831  Types.push_back(AT);
2832  AdjustedTypes.InsertNode(AT, InsertPos);
2833  return QualType(AT, 0);
2834 }
2835 
2836 /// getBlockPointerType - Return the uniqued reference to the type for
2837 /// a pointer to the specified block.
2839  assert(T->isFunctionType() && "block of function types only");
2840  // Unique pointers, to guarantee there is only one block of a particular
2841  // structure.
2842  llvm::FoldingSetNodeID ID;
2844 
2845  void *InsertPos = nullptr;
2846  if (BlockPointerType *PT =
2847  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2848  return QualType(PT, 0);
2849 
2850  // If the block pointee type isn't canonical, this won't be a canonical
2851  // type either so fill in the canonical type field.
2852  QualType Canonical;
2853  if (!T.isCanonical()) {
2854  Canonical = getBlockPointerType(getCanonicalType(T));
2855 
2856  // Get the new insert position for the node we care about.
2857  BlockPointerType *NewIP =
2858  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2859  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2860  }
2861  auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
2862  Types.push_back(New);
2863  BlockPointerTypes.InsertNode(New, InsertPos);
2864  return QualType(New, 0);
2865 }
2866 
2867 /// getLValueReferenceType - Return the uniqued reference to the type for an
2868 /// lvalue reference to the specified type.
2869 QualType
2870 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
2871  assert(getCanonicalType(T) != OverloadTy &&
2872  "Unresolved overloaded function type");
2873 
2874  // Unique pointers, to guarantee there is only one pointer of a particular
2875  // structure.
2876  llvm::FoldingSetNodeID ID;
2877  ReferenceType::Profile(ID, T, SpelledAsLValue);
2878 
2879  void *InsertPos = nullptr;
2880  if (LValueReferenceType *RT =
2881  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
2882  return QualType(RT, 0);
2883 
2884  const auto *InnerRef = T->getAs<ReferenceType>();
2885 
2886  // If the referencee type isn't canonical, this won't be a canonical type
2887  // either, so fill in the canonical type field.
2888  QualType Canonical;
2889  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
2890  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
2891  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
2892 
2893  // Get the new insert position for the node we care about.
2894  LValueReferenceType *NewIP =
2895  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
2896  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2897  }
2898 
2899  auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
2900  SpelledAsLValue);
2901  Types.push_back(New);
2902  LValueReferenceTypes.InsertNode(New, InsertPos);
2903 
2904  return QualType(New, 0);
2905 }
2906 
2907 /// getRValueReferenceType - Return the uniqued reference to the type for an
2908 /// rvalue reference to the specified type.
2910  // Unique pointers, to guarantee there is only one pointer of a particular
2911  // structure.
2912  llvm::FoldingSetNodeID ID;
2913  ReferenceType::Profile(ID, T, false);
2914 
2915  void *InsertPos = nullptr;
2916  if (RValueReferenceType *RT =
2917  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
2918  return QualType(RT, 0);
2919 
2920  const auto *InnerRef = T->getAs<ReferenceType>();
2921 
2922  // If the referencee type isn't canonical, this won't be a canonical type
2923  // either, so fill in the canonical type field.
2924  QualType Canonical;
2925  if (InnerRef || !T.isCanonical()) {
2926  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
2927  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
2928 
2929  // Get the new insert position for the node we care about.
2930  RValueReferenceType *NewIP =
2931  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
2932  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2933  }
2934 
2935  auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
2936  Types.push_back(New);
2937  RValueReferenceTypes.InsertNode(New, InsertPos);
2938  return QualType(New, 0);
2939 }
2940 
2941 /// getMemberPointerType - Return the uniqued reference to the type for a
2942 /// member pointer to the specified type, in the specified class.
2944  // Unique pointers, to guarantee there is only one pointer of a particular
2945  // structure.
2946  llvm::FoldingSetNodeID ID;
2947  MemberPointerType::Profile(ID, T, Cls);
2948 
2949  void *InsertPos = nullptr;
2950  if (MemberPointerType *PT =
2951  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2952  return QualType(PT, 0);
2953 
2954  // If the pointee or class type isn't canonical, this won't be a canonical
2955  // type either, so fill in the canonical type field.
2956  QualType Canonical;
2957  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
2959 
2960  // Get the new insert position for the node we care about.
2961  MemberPointerType *NewIP =
2962  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2963  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2964  }
2965  auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
2966  Types.push_back(New);
2967  MemberPointerTypes.InsertNode(New, InsertPos);
2968  return QualType(New, 0);
2969 }
2970 
2971 /// getConstantArrayType - Return the unique reference to the type for an
2972 /// array of the specified element type.
2974  const llvm::APInt &ArySizeIn,
2976  unsigned IndexTypeQuals) const {
2977  assert((EltTy->isDependentType() ||
2978  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
2979  "Constant array of VLAs is illegal!");
2980 
2981  // Convert the array size into a canonical width matching the pointer size for
2982  // the target.
2983  llvm::APInt ArySize(ArySizeIn);
2984  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
2985 
2986  llvm::FoldingSetNodeID ID;
2987  ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
2988 
2989  void *InsertPos = nullptr;
2990  if (ConstantArrayType *ATP =
2991  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
2992  return QualType(ATP, 0);
2993 
2994  // If the element type isn't canonical or has qualifiers, this won't
2995  // be a canonical type either, so fill in the canonical type field.
2996  QualType Canon;
2997  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
2998  SplitQualType canonSplit = getCanonicalType(EltTy).split();
2999  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
3000  ASM, IndexTypeQuals);
3001  Canon = getQualifiedType(Canon, canonSplit.Quals);
3002 
3003  // Get the new insert position for the node we care about.
3004  ConstantArrayType *NewIP =
3005  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3006  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3007  }
3008 
3009  auto *New = new (*this,TypeAlignment)
3010  ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
3011  ConstantArrayTypes.InsertNode(New, InsertPos);
3012  Types.push_back(New);
3013  return QualType(New, 0);
3014 }
3015 
3016 /// getVariableArrayDecayedType - Turns the given type, which may be
3017 /// variably-modified, into the corresponding type with all the known
3018 /// sizes replaced with [*].
3020  // Vastly most common case.
3021  if (!type->isVariablyModifiedType()) return type;
3022 
3023  QualType result;
3024 
3025  SplitQualType split = type.getSplitDesugaredType();
3026  const Type *ty = split.Ty;
3027  switch (ty->getTypeClass()) {
3028 #define TYPE(Class, Base)
3029 #define ABSTRACT_TYPE(Class, Base)
3030 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3031 #include "clang/AST/TypeNodes.def"
3032  llvm_unreachable("didn't desugar past all non-canonical types?");
3033 
3034  // These types should never be variably-modified.
3035  case Type::Builtin:
3036  case Type::Complex:
3037  case Type::Vector:
3038  case Type::ExtVector:
3039  case Type::DependentSizedExtVector:
3040  case Type::DependentAddressSpace:
3041  case Type::ObjCObject:
3042  case Type::ObjCInterface:
3043  case Type::ObjCObjectPointer:
3044  case Type::Record:
3045  case Type::Enum:
3046  case Type::UnresolvedUsing:
3047  case Type::TypeOfExpr:
3048  case Type::TypeOf:
3049  case Type::Decltype:
3050  case Type::UnaryTransform:
3051  case Type::DependentName:
3052  case Type::InjectedClassName:
3053  case Type::TemplateSpecialization:
3054  case Type::DependentTemplateSpecialization:
3055  case Type::TemplateTypeParm:
3056  case Type::SubstTemplateTypeParmPack:
3057  case Type::Auto:
3058  case Type::DeducedTemplateSpecialization:
3059  case Type::PackExpansion:
3060  llvm_unreachable("type should never be variably-modified");
3061 
3062  // These types can be variably-modified but should never need to
3063  // further decay.
3064  case Type::FunctionNoProto:
3065  case Type::FunctionProto:
3066  case Type::BlockPointer:
3067  case Type::MemberPointer:
3068  case Type::Pipe:
3069  return type;
3070 
3071  // These types can be variably-modified. All these modifications
3072  // preserve structure except as noted by comments.
3073  // TODO: if we ever care about optimizing VLAs, there are no-op
3074  // optimizations available here.
3075  case Type::Pointer:
3077  cast<PointerType>(ty)->getPointeeType()));
3078  break;
3079 
3080  case Type::LValueReference: {
3081  const auto *lv = cast<LValueReferenceType>(ty);
3082  result = getLValueReferenceType(
3083  getVariableArrayDecayedType(lv->getPointeeType()),
3084  lv->isSpelledAsLValue());
3085  break;
3086  }
3087 
3088  case Type::RValueReference: {
3089  const auto *lv = cast<RValueReferenceType>(ty);
3090  result = getRValueReferenceType(
3091  getVariableArrayDecayedType(lv->getPointeeType()));
3092  break;
3093  }
3094 
3095  case Type::Atomic: {
3096  const auto *at = cast<AtomicType>(ty);
3097  result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3098  break;
3099  }
3100 
3101  case Type::ConstantArray: {
3102  const auto *cat = cast<ConstantArrayType>(ty);
3103  result = getConstantArrayType(
3104  getVariableArrayDecayedType(cat->getElementType()),
3105  cat->getSize(),
3106  cat->getSizeModifier(),
3107  cat->getIndexTypeCVRQualifiers());
3108  break;
3109  }
3110 
3111  case Type::DependentSizedArray: {
3112  const auto *dat = cast<DependentSizedArrayType>(ty);
3113  result = getDependentSizedArrayType(
3114  getVariableArrayDecayedType(dat->getElementType()),
3115  dat->getSizeExpr(),
3116  dat->getSizeModifier(),
3117  dat->getIndexTypeCVRQualifiers(),
3118  dat->getBracketsRange());
3119  break;
3120  }
3121 
3122  // Turn incomplete types into [*] types.
3123  case Type::IncompleteArray: {
3124  const auto *iat = cast<IncompleteArrayType>(ty);
3125  result = getVariableArrayType(
3126  getVariableArrayDecayedType(iat->getElementType()),
3127  /*size*/ nullptr,
3129  iat->getIndexTypeCVRQualifiers(),
3130  SourceRange());
3131  break;
3132  }
3133 
3134  // Turn VLA types into [*] types.
3135  case Type::VariableArray: {
3136  const auto *vat = cast<VariableArrayType>(ty);
3137  result = getVariableArrayType(
3138  getVariableArrayDecayedType(vat->getElementType()),
3139  /*size*/ nullptr,
3141  vat->getIndexTypeCVRQualifiers(),
3142  vat->getBracketsRange());
3143  break;
3144  }
3145  }
3146 
3147  // Apply the top-level qualifiers from the original.
3148  return getQualifiedType(result, split.Quals);
3149 }
3150 
3151 /// getVariableArrayType - Returns a non-unique reference to the type for a
3152 /// variable array of the specified element type.
3154  Expr *NumElts,
3156  unsigned IndexTypeQuals,
3157  SourceRange Brackets) const {
3158  // Since we don't unique expressions, it isn't possible to unique VLA's
3159  // that have an expression provided for their size.
3160  QualType Canon;
3161 
3162  // Be sure to pull qualifiers off the element type.
3163  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3164  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3165  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3166  IndexTypeQuals, Brackets);
3167  Canon = getQualifiedType(Canon, canonSplit.Quals);
3168  }
3169 
3170  auto *New = new (*this, TypeAlignment)
3171  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3172 
3173  VariableArrayTypes.push_back(New);
3174  Types.push_back(New);
3175  return QualType(New, 0);
3176 }
3177 
3178 /// getDependentSizedArrayType - Returns a non-unique reference to
3179 /// the type for a dependently-sized array of the specified element
3180 /// type.
3182  Expr *numElements,
3184  unsigned elementTypeQuals,
3185  SourceRange brackets) const {
3186  assert((!numElements || numElements->isTypeDependent() ||
3187  numElements->isValueDependent()) &&
3188  "Size must be type- or value-dependent!");
3189 
3190  // Dependently-sized array types that do not have a specified number
3191  // of elements will have their sizes deduced from a dependent
3192  // initializer. We do no canonicalization here at all, which is okay
3193  // because they can't be used in most locations.
3194  if (!numElements) {
3195  auto *newType
3196  = new (*this, TypeAlignment)
3197  DependentSizedArrayType(*this, elementType, QualType(),
3198  numElements, ASM, elementTypeQuals,
3199  brackets);
3200  Types.push_back(newType);
3201  return QualType(newType, 0);
3202  }
3203 
3204  // Otherwise, we actually build a new type every time, but we
3205  // also build a canonical type.
3206 
3207  SplitQualType canonElementType = getCanonicalType(elementType).split();
3208 
3209  void *insertPos = nullptr;
3210  llvm::FoldingSetNodeID ID;
3212  QualType(canonElementType.Ty, 0),
3213  ASM, elementTypeQuals, numElements);
3214 
3215  // Look for an existing type with these properties.
3216  DependentSizedArrayType *canonTy =
3217  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3218 
3219  // If we don't have one, build one.
3220  if (!canonTy) {
3221  canonTy = new (*this, TypeAlignment)
3222  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3223  QualType(), numElements, ASM, elementTypeQuals,
3224  brackets);
3225  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3226  Types.push_back(canonTy);
3227  }
3228 
3229  // Apply qualifiers from the element type to the array.
3230  QualType canon = getQualifiedType(QualType(canonTy,0),
3231  canonElementType.Quals);
3232 
3233  // If we didn't need extra canonicalization for the element type or the size
3234  // expression, then just use that as our result.
3235  if (QualType(canonElementType.Ty, 0) == elementType &&
3236  canonTy->getSizeExpr() == numElements)
3237  return canon;
3238 
3239  // Otherwise, we need to build a type which follows the spelling
3240  // of the element type.
3241  auto *sugaredType
3242  = new (*this, TypeAlignment)
3243  DependentSizedArrayType(*this, elementType, canon, numElements,
3244  ASM, elementTypeQuals, brackets);
3245  Types.push_back(sugaredType);
3246  return QualType(sugaredType, 0);
3247 }
3248 
3251  unsigned elementTypeQuals) const {
3252  llvm::FoldingSetNodeID ID;
3253  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3254 
3255  void *insertPos = nullptr;
3256  if (IncompleteArrayType *iat =
3257  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3258  return QualType(iat, 0);
3259 
3260  // If the element type isn't canonical, this won't be a canonical type
3261  // either, so fill in the canonical type field. We also have to pull
3262  // qualifiers off the element type.
3263  QualType canon;
3264 
3265  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3266  SplitQualType canonSplit = getCanonicalType(elementType).split();
3267  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3268  ASM, elementTypeQuals);
3269  canon = getQualifiedType(canon, canonSplit.Quals);
3270 
3271  // Get the new insert position for the node we care about.
3272  IncompleteArrayType *existing =
3273  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3274  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3275  }
3276 
3277  auto *newType = new (*this, TypeAlignment)
3278  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3279 
3280  IncompleteArrayTypes.InsertNode(newType, insertPos);
3281  Types.push_back(newType);
3282  return QualType(newType, 0);
3283 }
3284 
3285 /// getVectorType - Return the unique reference to a vector type of
3286 /// the specified element type and size. VectorType must be a built-in type.
3287 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
3288  VectorType::VectorKind VecKind) const {
3289  assert(vecType->isBuiltinType());
3290 
3291  // Check if we've already instantiated a vector of this type.
3292  llvm::FoldingSetNodeID ID;
3293  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
3294 
3295  void *InsertPos = nullptr;
3296  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3297  return QualType(VTP, 0);
3298 
3299  // If the element type isn't canonical, this won't be a canonical type either,
3300  // so fill in the canonical type field.
3301  QualType Canonical;
3302  if (!vecType.isCanonical()) {
3303  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
3304 
3305  // Get the new insert position for the node we care about.
3306  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3307  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3308  }
3309  auto *New = new (*this, TypeAlignment)
3310  VectorType(vecType, NumElts, Canonical, VecKind);
3311  VectorTypes.InsertNode(New, InsertPos);
3312  Types.push_back(New);
3313  return QualType(New, 0);
3314 }
3315 
3316 /// getExtVectorType - Return the unique reference to an extended vector type of
3317 /// the specified element type and size. VectorType must be a built-in type.
3318 QualType
3319 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
3320  assert(vecType->isBuiltinType() || vecType->isDependentType());
3321 
3322  // Check if we've already instantiated a vector of this type.
3323  llvm::FoldingSetNodeID ID;
3324  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
3326  void *InsertPos = nullptr;
3327  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3328  return QualType(VTP, 0);
3329 
3330  // If the element type isn't canonical, this won't be a canonical type either,
3331  // so fill in the canonical type field.
3332  QualType Canonical;
3333  if (!vecType.isCanonical()) {
3334  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
3335 
3336  // Get the new insert position for the node we care about.
3337  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3338  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3339  }
3340  auto *New = new (*this, TypeAlignment)
3341  ExtVectorType(vecType, NumElts, Canonical);
3342  VectorTypes.InsertNode(New, InsertPos);
3343  Types.push_back(New);
3344  return QualType(New, 0);
3345 }
3346 
3347 QualType
3349  Expr *SizeExpr,
3350  SourceLocation AttrLoc) const {
3351  llvm::FoldingSetNodeID ID;
3353  SizeExpr);
3354 
3355  void *InsertPos = nullptr;
3357  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3359  if (Canon) {
3360  // We already have a canonical version of this array type; use it as
3361  // the canonical type for a newly-built type.
3362  New = new (*this, TypeAlignment)
3363  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
3364  SizeExpr, AttrLoc);
3365  } else {
3366  QualType CanonVecTy = getCanonicalType(vecType);
3367  if (CanonVecTy == vecType) {
3368  New = new (*this, TypeAlignment)
3369  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
3370  AttrLoc);
3371 
3372  DependentSizedExtVectorType *CanonCheck
3373  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3374  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
3375  (void)CanonCheck;
3376  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
3377  } else {
3378  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3379  SourceLocation());
3380  New = new (*this, TypeAlignment)
3381  DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
3382  }
3383  }
3384 
3385  Types.push_back(New);
3386  return QualType(New, 0);
3387 }
3388 
3390  Expr *AddrSpaceExpr,
3391  SourceLocation AttrLoc) const {
3392  assert(AddrSpaceExpr->isInstantiationDependent());
3393 
3394  QualType canonPointeeType = getCanonicalType(PointeeType);
3395 
3396  void *insertPos = nullptr;
3397  llvm::FoldingSetNodeID ID;
3398  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
3399  AddrSpaceExpr);
3400 
3401  DependentAddressSpaceType *canonTy =
3402  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
3403 
3404  if (!canonTy) {
3405  canonTy = new (*this, TypeAlignment)
3406  DependentAddressSpaceType(*this, canonPointeeType,
3407  QualType(), AddrSpaceExpr, AttrLoc);
3408  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
3409  Types.push_back(canonTy);
3410  }
3411 
3412  if (canonPointeeType == PointeeType &&
3413  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
3414  return QualType(canonTy, 0);
3415 
3416  auto *sugaredType
3417  = new (*this, TypeAlignment)
3418  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
3419  AddrSpaceExpr, AttrLoc);
3420  Types.push_back(sugaredType);
3421  return QualType(sugaredType, 0);
3422 }
3423 
3424 /// Determine whether \p T is canonical as the result type of a function.
3426  return T.isCanonical() &&
3429 }
3430 
3431 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
3432 QualType
3434  const FunctionType::ExtInfo &Info) const {
3435  // Unique functions, to guarantee there is only one function of a particular
3436  // structure.
3437  llvm::FoldingSetNodeID ID;
3438  FunctionNoProtoType::Profile(ID, ResultTy, Info);
3439 
3440  void *InsertPos = nullptr;
3441  if (FunctionNoProtoType *FT =
3442  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
3443  return QualType(FT, 0);
3444 
3445  QualType Canonical;
3446  if (!isCanonicalResultType(ResultTy)) {
3447  Canonical =
3449 
3450  // Get the new insert position for the node we care about.
3451  FunctionNoProtoType *NewIP =
3452  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3453  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3454  }
3455 
3456  auto *New = new (*this, TypeAlignment)
3457  FunctionNoProtoType(ResultTy, Canonical, Info);
3458  Types.push_back(New);
3459  FunctionNoProtoTypes.InsertNode(New, InsertPos);
3460  return QualType(New, 0);
3461 }
3462 
3465  CanQualType CanResultType = getCanonicalType(ResultType);
3466 
3467  // Canonical result types do not have ARC lifetime qualifiers.
3468  if (CanResultType.getQualifiers().hasObjCLifetime()) {
3469  Qualifiers Qs = CanResultType.getQualifiers();
3470  Qs.removeObjCLifetime();
3472  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
3473  }
3474 
3475  return CanResultType;
3476 }
3477 
3479  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
3480  if (ESI.Type == EST_None)
3481  return true;
3482  if (!NoexceptInType)
3483  return false;
3484 
3485  // C++17 onwards: exception specification is part of the type, as a simple
3486  // boolean "can this function type throw".
3487  if (ESI.Type == EST_BasicNoexcept)
3488  return true;
3489 
3490  // A noexcept(expr) specification is (possibly) canonical if expr is
3491  // value-dependent.
3492  if (ESI.Type == EST_DependentNoexcept)
3493  return true;
3494 
3495  // A dynamic exception specification is canonical if it only contains pack
3496  // expansions (so we can't tell whether it's non-throwing) and all its
3497  // contained types are canonical.
3498  if (ESI.Type == EST_Dynamic) {
3499  bool AnyPackExpansions = false;
3500  for (QualType ET : ESI.Exceptions) {
3501  if (!ET.isCanonical())
3502  return false;
3503  if (ET->getAs<PackExpansionType>())
3504  AnyPackExpansions = true;
3505  }
3506  return AnyPackExpansions;
3507  }
3508 
3509  return false;
3510 }
3511 
3512 QualType ASTContext::getFunctionTypeInternal(
3513  QualType ResultTy, ArrayRef<QualType> ArgArray,
3514  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
3515  size_t NumArgs = ArgArray.size();
3516 
3517  // Unique functions, to guarantee there is only one function of a particular
3518  // structure.
3519  llvm::FoldingSetNodeID ID;
3520  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
3521  *this, true);
3522 
3523  QualType Canonical;
3524  bool Unique = false;
3525 
3526  void *InsertPos = nullptr;
3527  if (FunctionProtoType *FPT =
3528  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
3529  QualType Existing = QualType(FPT, 0);
3530 
3531  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
3532  // it so long as our exception specification doesn't contain a dependent
3533  // noexcept expression, or we're just looking for a canonical type.
3534  // Otherwise, we're going to need to create a type
3535  // sugar node to hold the concrete expression.
3536  if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
3537  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
3538  return Existing;
3539 
3540  // We need a new type sugar node for this one, to hold the new noexcept
3541  // expression. We do no canonicalization here, but that's OK since we don't
3542  // expect to see the same noexcept expression much more than once.
3543  Canonical = getCanonicalType(Existing);
3544  Unique = true;
3545  }
3546 
3547  bool NoexceptInType = getLangOpts().CPlusPlus17;
3548  bool IsCanonicalExceptionSpec =
3549  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
3550 
3551  // Determine whether the type being created is already canonical or not.
3552  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
3553  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
3554  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
3555  if (!ArgArray[i].isCanonicalAsParam())
3556  isCanonical = false;
3557 
3558  if (OnlyWantCanonical)
3559  assert(isCanonical &&
3560  "given non-canonical parameters constructing canonical type");
3561 
3562  // If this type isn't canonical, get the canonical version of it if we don't
3563  // already have it. The exception spec is only partially part of the
3564  // canonical type, and only in C++17 onwards.
3565  if (!isCanonical && Canonical.isNull()) {
3566  SmallVector<QualType, 16> CanonicalArgs;
3567  CanonicalArgs.reserve(NumArgs);
3568  for (unsigned i = 0; i != NumArgs; ++i)
3569  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
3570 
3571  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
3572  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
3573  CanonicalEPI.HasTrailingReturn = false;
3574 
3575  if (IsCanonicalExceptionSpec) {
3576  // Exception spec is already OK.
3577  } else if (NoexceptInType) {
3578  switch (EPI.ExceptionSpec.Type) {
3580  // We don't know yet. It shouldn't matter what we pick here; no-one
3581  // should ever look at this.
3582  LLVM_FALLTHROUGH;
3583  case EST_None: case EST_MSAny: case EST_NoexceptFalse:
3584  CanonicalEPI.ExceptionSpec.Type = EST_None;
3585  break;
3586 
3587  // A dynamic exception specification is almost always "not noexcept",
3588  // with the exception that a pack expansion might expand to no types.
3589  case EST_Dynamic: {
3590  bool AnyPacks = false;
3591  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
3592  if (ET->getAs<PackExpansionType>())
3593  AnyPacks = true;
3594  ExceptionTypeStorage.push_back(getCanonicalType(ET));
3595  }
3596  if (!AnyPacks)
3597  CanonicalEPI.ExceptionSpec.Type = EST_None;
3598  else {
3599  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
3600  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
3601  }
3602  break;
3603  }
3604 
3606  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
3607  break;
3608 
3609  case EST_DependentNoexcept:
3610  llvm_unreachable("dependent noexcept is already canonical");
3611  }
3612  } else {
3614  }
3615 
3616  // Adjust the canonical function result type.
3617  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
3618  Canonical =
3619  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
3620 
3621  // Get the new insert position for the node we care about.
3622  FunctionProtoType *NewIP =
3623  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3624  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3625  }
3626 
3627  // FunctionProtoType objects are allocated with extra bytes after
3628  // them for three variable size arrays at the end:
3629  // - parameter types
3630  // - exception types
3631  // - extended parameter information
3632  // Instead of the exception types, there could be a noexcept
3633  // expression, or information used to resolve the exception
3634  // specification.
3635  size_t Size =
3636  sizeof(FunctionProtoType) + NumArgs * sizeof(QualType) +
3637  FunctionProtoType::getExceptionSpecSize(
3638  EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
3639 
3640  // Put the ExtParameterInfos last. If all were equal, it would make
3641  // more sense to put these before the exception specification, because
3642  // it's much easier to skip past them compared to the elaborate switch
3643  // required to skip the exception specification. However, all is not
3644  // equal; ExtParameterInfos are used to model very uncommon features,
3645  // and it's better not to burden the more common paths.
3646  if (EPI.ExtParameterInfos) {
3647  Size += NumArgs * sizeof(FunctionProtoType::ExtParameterInfo);
3648  }
3649 
3650  auto *FTP = (FunctionProtoType *) Allocate(Size, TypeAlignment);
3651  FunctionProtoType::ExtProtoInfo newEPI = EPI;
3652  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
3653  Types.push_back(FTP);
3654  if (!Unique)
3655  FunctionProtoTypes.InsertNode(FTP, InsertPos);
3656  return QualType(FTP, 0);
3657 }
3658 
3659 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
3660  llvm::FoldingSetNodeID ID;
3661  PipeType::Profile(ID, T, ReadOnly);
3662 
3663  void *InsertPos = nullptr;
3664  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
3665  return QualType(PT, 0);
3666 
3667  // If the pipe element type isn't canonical, this won't be a canonical type
3668  // either, so fill in the canonical type field.
3669  QualType Canonical;
3670  if (!T.isCanonical()) {
3671  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
3672 
3673  // Get the new insert position for the node we care about.
3674  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
3675  assert(!NewIP && "Shouldn't be in the map!");
3676  (void)NewIP;
3677  }
3678  auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
3679  Types.push_back(New);
3680  PipeTypes.InsertNode(New, InsertPos);
3681  return QualType(New, 0);
3682 }
3683 
3685  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
3686  return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
3687  : Ty;
3688 }
3689 
3691  return getPipeType(T, true);
3692 }
3693 
3695  return getPipeType(T, false);
3696 }
3697 
3698 #ifndef NDEBUG
3700  if (!isa<CXXRecordDecl>(D)) return false;
3701  const auto *RD = cast<CXXRecordDecl>(D);
3702  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
3703  return true;
3704  if (RD->getDescribedClassTemplate() &&
3705  !isa<ClassTemplateSpecializationDecl>(RD))
3706  return true;
3707  return false;
3708 }
3709 #endif
3710 
3711 /// getInjectedClassNameType - Return the unique reference to the
3712 /// injected class name type for the specified templated declaration.
3714  QualType TST) const {
3715  assert(NeedsInjectedClassNameType(Decl));
3716  if (Decl->TypeForDecl) {
3717  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3718  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
3719  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
3720  Decl->TypeForDecl = PrevDecl->TypeForDecl;
3721  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3722  } else {
3723  Type *newType =
3724  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
3725  Decl->TypeForDecl = newType;
3726  Types.push_back(newType);
3727  }
3728  return QualType(Decl->TypeForDecl, 0);
3729 }
3730 
3731 /// getTypeDeclType - Return the unique reference to the type for the
3732 /// specified type declaration.
3733 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
3734  assert(Decl && "Passed null for Decl param");
3735  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
3736 
3737  if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
3738  return getTypedefType(Typedef);
3739 
3740  assert(!isa<TemplateTypeParmDecl>(Decl) &&
3741  "Template type parameter types are always available.");
3742 
3743  if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
3744  assert(Record->isFirstDecl() && "struct/union has previous declaration");
3745  assert(!NeedsInjectedClassNameType(Record));
3746  return getRecordType(Record);
3747  } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
3748  assert(Enum->isFirstDecl() && "enum has previous declaration");
3749  return getEnumType(Enum);
3750  } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
3751  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
3752  Decl->TypeForDecl = newType;
3753  Types.push_back(newType);
3754  } else
3755  llvm_unreachable("TypeDecl without a type?");
3756 
3757  return QualType(Decl->TypeForDecl, 0);
3758 }
3759 
3760 /// getTypedefType - Return the unique reference to the type for the
3761 /// specified typedef name decl.
3762 QualType
3764  QualType Canonical) const {
3765  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3766 
3767  if (Canonical.isNull())
3768  Canonical = getCanonicalType(Decl->getUnderlyingType());
3769  auto *newType = new (*this, TypeAlignment)
3770  TypedefType(Type::Typedef, Decl, Canonical);
3771  Decl->TypeForDecl = newType;
3772  Types.push_back(newType);
3773  return QualType(newType, 0);
3774 }
3775 
3777  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3778 
3779  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
3780  if (PrevDecl->TypeForDecl)
3781  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3782 
3783  auto *newType = new (*this, TypeAlignment) RecordType(Decl);
3784  Decl->TypeForDecl = newType;
3785  Types.push_back(newType);
3786  return QualType(newType, 0);
3787 }
3788 
3790  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3791 
3792  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
3793  if (PrevDecl->TypeForDecl)
3794  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3795 
3796  auto *newType = new (*this, TypeAlignment) EnumType(Decl);
3797  Decl->TypeForDecl = newType;
3798  Types.push_back(newType);
3799  return QualType(newType, 0);
3800 }
3801 
3803  QualType modifiedType,
3804  QualType equivalentType) {
3805  llvm::FoldingSetNodeID id;
3806  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
3807 
3808  void *insertPos = nullptr;
3809  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
3810  if (type) return QualType(type, 0);
3811 
3812  QualType canon = getCanonicalType(equivalentType);
3813  type = new (*this, TypeAlignment)
3814  AttributedType(canon, attrKind, modifiedType, equivalentType);
3815 
3816  Types.push_back(type);
3817  AttributedTypes.InsertNode(type, insertPos);
3818 
3819  return QualType(type, 0);
3820 }
3821 
3822 /// Retrieve a substitution-result type.
3823 QualType
3825  QualType Replacement) const {
3826  assert(Replacement.isCanonical()
3827  && "replacement types must always be canonical");
3828 
3829  llvm::FoldingSetNodeID ID;
3830  SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
3831  void *InsertPos = nullptr;
3832  SubstTemplateTypeParmType *SubstParm
3833  = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3834 
3835  if (!SubstParm) {
3836  SubstParm = new (*this, TypeAlignment)
3837  SubstTemplateTypeParmType(Parm, Replacement);
3838  Types.push_back(SubstParm);
3839  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
3840  }
3841 
3842  return QualType(SubstParm, 0);
3843 }
3844 
3845 /// Retrieve a
3847  const TemplateTypeParmType *Parm,
3848  const TemplateArgument &ArgPack) {
3849 #ifndef NDEBUG
3850  for (const auto &P : ArgPack.pack_elements()) {
3851  assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
3852  assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
3853  }
3854 #endif
3855 
3856  llvm::FoldingSetNodeID ID;
3857  SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
3858  void *InsertPos = nullptr;
3859  if (SubstTemplateTypeParmPackType *SubstParm
3860  = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
3861  return QualType(SubstParm, 0);
3862 
3863  QualType Canon;
3864  if (!Parm->isCanonicalUnqualified()) {
3865  Canon = getCanonicalType(QualType(Parm, 0));
3866  Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
3867  ArgPack);
3868  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
3869  }
3870 
3871  auto *SubstParm
3872  = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
3873  ArgPack);
3874  Types.push_back(SubstParm);
3875  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
3876  return QualType(SubstParm, 0);
3877 }
3878 
3879 /// Retrieve the template type parameter type for a template
3880 /// parameter or parameter pack with the given depth, index, and (optionally)
3881 /// name.
3883  bool ParameterPack,
3884  TemplateTypeParmDecl *TTPDecl) const {
3885  llvm::FoldingSetNodeID ID;
3886  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
3887  void *InsertPos = nullptr;
3888  TemplateTypeParmType *TypeParm
3889  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3890 
3891  if (TypeParm)
3892  return QualType(TypeParm, 0);
3893 
3894  if (TTPDecl) {
3895  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
3896  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
3897 
3898  TemplateTypeParmType *TypeCheck
3899  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3900  assert(!TypeCheck && "Template type parameter canonical type broken");
3901  (void)TypeCheck;
3902  } else
3903  TypeParm = new (*this, TypeAlignment)
3904  TemplateTypeParmType(Depth, Index, ParameterPack);
3905 
3906  Types.push_back(TypeParm);
3907  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
3908 
3909  return QualType(TypeParm, 0);
3910 }
3911 
3914  SourceLocation NameLoc,
3915  const TemplateArgumentListInfo &Args,
3916  QualType Underlying) const {
3917  assert(!Name.getAsDependentTemplateName() &&
3918  "No dependent template names here!");
3919  QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
3920 
3925  TL.setTemplateNameLoc(NameLoc);
3926  TL.setLAngleLoc(Args.getLAngleLoc());
3927  TL.setRAngleLoc(Args.getRAngleLoc());
3928  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
3929  TL.setArgLocInfo(i, Args[i].getLocInfo());
3930  return DI;
3931 }
3932 
3933 QualType
3935  const TemplateArgumentListInfo &Args,
3936  QualType Underlying) const {
3937  assert(!Template.getAsDependentTemplateName() &&
3938  "No dependent template names here!");
3939 
3941  ArgVec.reserve(Args.size());
3942  for (const TemplateArgumentLoc &Arg : Args.arguments())
3943  ArgVec.push_back(Arg.getArgument());
3944 
3945  return getTemplateSpecializationType(Template, ArgVec, Underlying);
3946 }
3947 
3948 #ifndef NDEBUG
3950  for (const TemplateArgument &Arg : Args)
3951  if (Arg.isPackExpansion())
3952  return true;
3953 
3954  return true;
3955 }
3956 #endif
3957 
3958 QualType
3961  QualType Underlying) const {
3962  assert(!Template.getAsDependentTemplateName() &&
3963  "No dependent template names here!");
3964  // Look through qualified template names.
3965  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
3966  Template = TemplateName(QTN->getTemplateDecl());
3967 
3968  bool IsTypeAlias =
3969  Template.getAsTemplateDecl() &&
3970  isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
3971  QualType CanonType;
3972  if (!Underlying.isNull())
3973  CanonType = getCanonicalType(Underlying);
3974  else {
3975  // We can get here with an alias template when the specialization contains
3976  // a pack expansion that does not match up with a parameter pack.
3977  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
3978  "Caller must compute aliased type");
3979  IsTypeAlias = false;
3980  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
3981  }
3982 
3983  // Allocate the (non-canonical) template specialization type, but don't
3984  // try to unique it: these types typically have location information that
3985  // we don't unique and don't want to lose.
3986  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
3987  sizeof(TemplateArgument) * Args.size() +
3988  (IsTypeAlias? sizeof(QualType) : 0),
3989  TypeAlignment);
3990  auto *Spec
3991  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
3992  IsTypeAlias ? Underlying : QualType());
3993 
3994  Types.push_back(Spec);
3995  return QualType(Spec, 0);
3996 }
3997 
3999  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4000  assert(!Template.getAsDependentTemplateName() &&
4001  "No dependent template names here!");
4002 
4003  // Look through qualified template names.
4004  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4005  Template = TemplateName(QTN->getTemplateDecl());
4006 
4007  // Build the canonical template specialization type.
4008  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4010  unsigned NumArgs = Args.size();
4011  CanonArgs.reserve(NumArgs);
4012  for (const TemplateArgument &Arg : Args)
4013  CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
4014 
4015  // Determine whether this canonical template specialization type already
4016  // exists.
4017  llvm::FoldingSetNodeID ID;
4018  TemplateSpecializationType::Profile(ID, CanonTemplate,
4019  CanonArgs, *this);
4020 
4021  void *InsertPos = nullptr;
4023  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4024 
4025  if (!Spec) {
4026  // Allocate a new canonical template specialization type.
4027  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
4028  sizeof(TemplateArgument) * NumArgs),
4029  TypeAlignment);
4030  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
4031  CanonArgs,
4032  QualType(), QualType());
4033  Types.push_back(Spec);
4034  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
4035  }
4036 
4037  assert(Spec->isDependentType() &&
4038  "Non-dependent template-id type must have a canonical type");
4039  return QualType(Spec, 0);
4040 }
4041 
4043  NestedNameSpecifier *NNS,
4044  QualType NamedType,
4045  TagDecl *OwnedTagDecl) const {
4046  llvm::FoldingSetNodeID ID;
4047  ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
4048 
4049  void *InsertPos = nullptr;
4050  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4051  if (T)
4052  return QualType(T, 0);
4053 
4054  QualType Canon = NamedType;
4055  if (!Canon.isCanonical()) {
4056  Canon = getCanonicalType(NamedType);
4057  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4058  assert(!CheckT && "Elaborated canonical type broken");
4059  (void)CheckT;
4060  }
4061 
4062  T = new (*this, TypeAlignment)
4063  ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
4064  Types.push_back(T);
4065  ElaboratedTypes.InsertNode(T, InsertPos);
4066  return QualType(T, 0);
4067 }
4068 
4069 QualType
4071  llvm::FoldingSetNodeID ID;
4072  ParenType::Profile(ID, InnerType);
4073 
4074  void *InsertPos = nullptr;
4075  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4076  if (T)
4077  return QualType(T, 0);
4078 
4079  QualType Canon = InnerType;
4080  if (!Canon.isCanonical()) {
4081  Canon = getCanonicalType(InnerType);
4082  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4083  assert(!CheckT && "Paren canonical type broken");
4084  (void)CheckT;
4085  }
4086 
4087  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
4088  Types.push_back(T);
4089  ParenTypes.InsertNode(T, InsertPos);
4090  return QualType(T, 0);
4091 }
4092 
4094  NestedNameSpecifier *NNS,
4095  const IdentifierInfo *Name,
4096  QualType Canon) const {
4097  if (Canon.isNull()) {
4099  if (CanonNNS != NNS)
4100  Canon = getDependentNameType(Keyword, CanonNNS, Name);
4101  }
4102 
4103  llvm::FoldingSetNodeID ID;
4104  DependentNameType::Profile(ID, Keyword, NNS, Name);
4105 
4106  void *InsertPos = nullptr;
4108  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
4109  if (T)
4110  return QualType(T, 0);
4111 
4112  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
4113  Types.push_back(T);
4114  DependentNameTypes.InsertNode(T, InsertPos);
4115  return QualType(T, 0);
4116 }
4117 
4118 QualType
4120  ElaboratedTypeKeyword Keyword,
4121  NestedNameSpecifier *NNS,
4122  const IdentifierInfo *Name,
4123  const TemplateArgumentListInfo &Args) const {
4124  // TODO: avoid this copy
4126  for (unsigned I = 0, E = Args.size(); I != E; ++I)
4127  ArgCopy.push_back(Args[I].getArgument());
4128  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
4129 }
4130 
4131 QualType
4133  ElaboratedTypeKeyword Keyword,
4134  NestedNameSpecifier *NNS,
4135  const IdentifierInfo *Name,
4136  ArrayRef<TemplateArgument> Args) const {
4137  assert((!NNS || NNS->isDependent()) &&
4138  "nested-name-specifier must be dependent");
4139 
4140  llvm::FoldingSetNodeID ID;
4141  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
4142  Name, Args);
4143 
4144  void *InsertPos = nullptr;
4146  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4147  if (T)
4148  return QualType(T, 0);
4149 
4151 
4152  ElaboratedTypeKeyword CanonKeyword = Keyword;
4153  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
4154 
4155  bool AnyNonCanonArgs = false;
4156  unsigned NumArgs = Args.size();
4157  SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
4158  for (unsigned I = 0; I != NumArgs; ++I) {
4159  CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
4160  if (!CanonArgs[I].structurallyEquals(Args[I]))
4161  AnyNonCanonArgs = true;
4162  }
4163 
4164  QualType Canon;
4165  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
4166  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
4167  Name,
4168  CanonArgs);
4169 
4170  // Find the insert position again.
4171  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4172  }
4173 
4174  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
4175  sizeof(TemplateArgument) * NumArgs),
4176  TypeAlignment);
4177  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
4178  Name, Args, Canon);
4179  Types.push_back(T);
4180  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
4181  return QualType(T, 0);
4182 }
4183 
4185  TemplateArgument Arg;
4186  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
4187  QualType ArgType = getTypeDeclType(TTP);
4188  if (TTP->isParameterPack())
4189  ArgType = getPackExpansionType(ArgType, None);
4190 
4191  Arg = TemplateArgument(ArgType);
4192  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
4193  Expr *E = new (*this) DeclRefExpr(
4194  NTTP, /*enclosing*/false,
4195  NTTP->getType().getNonLValueExprType(*this),
4196  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
4197 
4198  if (NTTP->isParameterPack())
4199  E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
4200  None);
4201  Arg = TemplateArgument(E);
4202  } else {
4203  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
4204  if (TTP->isParameterPack())
4206  else
4207  Arg = TemplateArgument(TemplateName(TTP));
4208  }
4209 
4210  if (Param->isTemplateParameterPack())
4211  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
4212 
4213  return Arg;
4214 }
4215 
4216 void
4219  Args.reserve(Args.size() + Params->size());
4220 
4221  for (NamedDecl *Param : *Params)
4222  Args.push_back(getInjectedTemplateArg(Param));
4223 }
4224 
4226  Optional<unsigned> NumExpansions) {
4227  llvm::FoldingSetNodeID ID;
4228  PackExpansionType::Profile(ID, Pattern, NumExpansions);
4229 
4230  assert(Pattern->containsUnexpandedParameterPack() &&
4231  "Pack expansions must expand one or more parameter packs");
4232  void *InsertPos = nullptr;
4234  = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4235  if (T)
4236  return QualType(T, 0);
4237 
4238  QualType Canon;
4239  if (!Pattern.isCanonical()) {
4240  Canon = getCanonicalType(Pattern);
4241  // The canonical type might not contain an unexpanded parameter pack, if it
4242  // contains an alias template specialization which ignores one of its
4243  // parameters.
4244  if (Canon->containsUnexpandedParameterPack()) {
4245  Canon = getPackExpansionType(Canon, NumExpansions);
4246 
4247  // Find the insert position again, in case we inserted an element into
4248  // PackExpansionTypes and invalidated our insert position.
4249  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4250  }
4251  }
4252 
4253  T = new (*this, TypeAlignment)
4254  PackExpansionType(Pattern, Canon, NumExpansions);
4255  Types.push_back(T);
4256  PackExpansionTypes.InsertNode(T, InsertPos);
4257  return QualType(T, 0);
4258 }
4259 
4260 /// CmpProtocolNames - Comparison predicate for sorting protocols
4261 /// alphabetically.
4262 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
4263  ObjCProtocolDecl *const *RHS) {
4264  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
4265 }
4266 
4268  if (Protocols.empty()) return true;
4269 
4270  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
4271  return false;
4272 
4273  for (unsigned i = 1; i != Protocols.size(); ++i)
4274  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
4275  Protocols[i]->getCanonicalDecl() != Protocols[i])
4276  return false;
4277  return true;
4278 }
4279 
4280 static void
4282  // Sort protocols, keyed by name.
4283  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
4284 
4285  // Canonicalize.
4286  for (ObjCProtocolDecl *&P : Protocols)
4287  P = P->getCanonicalDecl();
4288 
4289  // Remove duplicates.
4290  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
4291  Protocols.erase(ProtocolsEnd, Protocols.end());
4292 }
4293 
4295  ObjCProtocolDecl * const *Protocols,
4296  unsigned NumProtocols) const {
4297  return getObjCObjectType(BaseType, {},
4298  llvm::makeArrayRef(Protocols, NumProtocols),
4299  /*isKindOf=*/false);
4300 }
4301 
4303  QualType baseType,
4304  ArrayRef<QualType> typeArgs,
4305  ArrayRef<ObjCProtocolDecl *> protocols,
4306  bool isKindOf) const {
4307  // If the base type is an interface and there aren't any protocols or
4308  // type arguments to add, then the interface type will do just fine.
4309  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
4310  isa<ObjCInterfaceType>(baseType))
4311  return baseType;
4312 
4313  // Look in the folding set for an existing type.
4314  llvm::FoldingSetNodeID ID;
4315  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
4316  void *InsertPos = nullptr;
4317  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
4318  return QualType(QT, 0);
4319 
4320  // Determine the type arguments to be used for canonicalization,
4321  // which may be explicitly specified here or written on the base
4322  // type.
4323  ArrayRef<QualType> effectiveTypeArgs = typeArgs;
4324  if (effectiveTypeArgs.empty()) {
4325  if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
4326  effectiveTypeArgs = baseObject->getTypeArgs();
4327  }
4328 
4329  // Build the canonical type, which has the canonical base type and a
4330  // sorted-and-uniqued list of protocols and the type arguments
4331  // canonicalized.
4332  QualType canonical;
4333  bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
4334  effectiveTypeArgs.end(),
4335  [&](QualType type) {
4336  return type.isCanonical();
4337  });
4338  bool protocolsSorted = areSortedAndUniqued(protocols);
4339  if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
4340  // Determine the canonical type arguments.
4341  ArrayRef<QualType> canonTypeArgs;
4342  SmallVector<QualType, 4> canonTypeArgsVec;
4343  if (!typeArgsAreCanonical) {
4344  canonTypeArgsVec.reserve(effectiveTypeArgs.size());
4345  for (auto typeArg : effectiveTypeArgs)
4346  canonTypeArgsVec.push_back(getCanonicalType(typeArg));
4347  canonTypeArgs = canonTypeArgsVec;
4348  } else {
4349  canonTypeArgs = effectiveTypeArgs;
4350  }
4351 
4352  ArrayRef<ObjCProtocolDecl *> canonProtocols;
4353  SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
4354  if (!protocolsSorted) {
4355  canonProtocolsVec.append(protocols.begin(), protocols.end());
4356  SortAndUniqueProtocols(canonProtocolsVec);
4357  canonProtocols = canonProtocolsVec;
4358  } else {
4359  canonProtocols = protocols;
4360  }
4361 
4362  canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
4363  canonProtocols, isKindOf);
4364 
4365  // Regenerate InsertPos.
4366  ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
4367  }
4368 
4369  unsigned size = sizeof(ObjCObjectTypeImpl);
4370  size += typeArgs.size() * sizeof(QualType);
4371  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4372  void *mem = Allocate(size, TypeAlignment);
4373  auto *T =
4374  new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
4375  isKindOf);
4376 
4377  Types.push_back(T);
4378  ObjCObjectTypes.InsertNode(T, InsertPos);
4379  return QualType(T, 0);
4380 }
4381 
4382 /// Apply Objective-C protocol qualifiers to the given type.
4383 /// If this is for the canonical type of a type parameter, we can apply
4384 /// protocol qualifiers on the ObjCObjectPointerType.
4385 QualType
4387  ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
4388  bool allowOnPointerType) const {
4389  hasError = false;
4390 
4391  if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
4392  return getObjCTypeParamType(objT->getDecl(), protocols);
4393  }
4394 
4395  // Apply protocol qualifiers to ObjCObjectPointerType.
4396  if (allowOnPointerType) {
4397  if (const auto *objPtr =
4398  dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
4399  const ObjCObjectType *objT = objPtr->getObjectType();
4400  // Merge protocol lists and construct ObjCObjectType.
4401  SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
4402  protocolsVec.append(objT->qual_begin(),
4403  objT->qual_end());
4404  protocolsVec.append(protocols.begin(), protocols.end());
4405  ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
4406  type = getObjCObjectType(
4407  objT->getBaseType(),
4408  objT->getTypeArgsAsWritten(),
4409  protocols,
4410  objT->isKindOfTypeAsWritten());
4411  return getObjCObjectPointerType(type);
4412  }
4413  }
4414 
4415  // Apply protocol qualifiers to ObjCObjectType.
4416  if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
4417  // FIXME: Check for protocols to which the class type is already
4418  // known to conform.
4419 
4420  return getObjCObjectType(objT->getBaseType(),
4421  objT->getTypeArgsAsWritten(),
4422  protocols,
4423  objT->isKindOfTypeAsWritten());
4424  }
4425 
4426  // If the canonical type is ObjCObjectType, ...
4427  if (type->isObjCObjectType()) {
4428  // Silently overwrite any existing protocol qualifiers.
4429  // TODO: determine whether that's the right thing to do.
4430 
4431  // FIXME: Check for protocols to which the class type is already
4432  // known to conform.
4433  return getObjCObjectType(type, {}, protocols, false);
4434  }
4435 
4436  // id<protocol-list>
4437  if (type->isObjCIdType()) {
4438  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4439  type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
4440  objPtr->isKindOfType());
4441  return getObjCObjectPointerType(type);
4442  }
4443 
4444  // Class<protocol-list>
4445  if (type->isObjCClassType()) {
4446  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4447  type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
4448  objPtr->isKindOfType());
4449  return getObjCObjectPointerType(type);
4450  }
4451 
4452  hasError = true;
4453  return type;
4454 }
4455 
4456 QualType
4458  ArrayRef<ObjCProtocolDecl *> protocols,
4459  QualType Canonical) const {
4460  // Look in the folding set for an existing type.
4461  llvm::FoldingSetNodeID ID;
4462  ObjCTypeParamType::Profile(ID, Decl, protocols);
4463  void *InsertPos = nullptr;
4464  if (ObjCTypeParamType *TypeParam =
4465  ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
4466  return QualType(TypeParam, 0);
4467 
4468  if (Canonical.isNull()) {
4469  // We canonicalize to the underlying type.
4470  Canonical = getCanonicalType(Decl->getUnderlyingType());
4471  if (!protocols.empty()) {
4472  // Apply the protocol qualifers.
4473  bool hasError;
4474  Canonical = applyObjCProtocolQualifiers(Canonical, protocols, hasError,
4475  true/*allowOnPointerType*/);
4476  assert(!hasError && "Error when apply protocol qualifier to bound type");
4477  }
4478  }
4479 
4480  unsigned size = sizeof(ObjCTypeParamType);
4481  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4482  void *mem = Allocate(size, TypeAlignment);
4483  auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
4484 
4485  Types.push_back(newType);
4486  ObjCTypeParamTypes.InsertNode(newType, InsertPos);
4487  return QualType(newType, 0);
4488 }
4489 
4490 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
4491 /// protocol list adopt all protocols in QT's qualified-id protocol
4492 /// list.
4494  ObjCInterfaceDecl *IC) {
4495  if (!QT->isObjCQualifiedIdType())
4496  return false;
4497 
4498  if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
4499  // If both the right and left sides have qualifiers.
4500  for (auto *Proto : OPT->quals()) {
4501  if (!IC->ClassImplementsProtocol(Proto, false))
4502  return false;
4503  }
4504  return true;
4505  }
4506  return false;
4507 }
4508 
4509 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
4510 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
4511 /// of protocols.
4513  ObjCInterfaceDecl *IDecl) {
4514  if (!QT->isObjCQualifiedIdType())
4515  return false;
4516  const auto *OPT = QT->getAs<ObjCObjectPointerType>();
4517  if (!OPT)
4518  return false;
4519  if (!IDecl->hasDefinition())
4520  return false;
4521  llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
4522  CollectInheritedProtocols(IDecl, InheritedProtocols);
4523  if (InheritedProtocols.empty())
4524  return false;
4525  // Check that if every protocol in list of id<plist> conforms to a protocol
4526  // of IDecl's, then bridge casting is ok.
4527  bool Conforms = false;
4528  for (auto *Proto : OPT->quals()) {
4529  Conforms = false;
4530  for (auto *PI : InheritedProtocols) {
4531  if (ProtocolCompatibleWithProtocol(Proto, PI)) {
4532  Conforms = true;
4533  break;
4534  }
4535  }
4536  if (!Conforms)
4537  break;
4538  }
4539  if (Conforms)
4540  return true;
4541 
4542  for (auto *PI : InheritedProtocols) {
4543  // If both the right and left sides have qualifiers.
4544  bool Adopts = false;
4545  for (auto *Proto : OPT->quals()) {
4546  // return 'true' if 'PI' is in the inheritance hierarchy of Proto
4547  if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
4548  break;
4549  }
4550  if (!Adopts)
4551  return false;
4552  }
4553  return true;
4554 }
4555 
4556 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
4557 /// the given object type.
4559  llvm::FoldingSetNodeID ID;
4560  ObjCObjectPointerType::Profile(ID, ObjectT);
4561 
4562  void *InsertPos = nullptr;
4563  if (ObjCObjectPointerType *QT =
4564  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4565  return QualType(QT, 0);
4566 
4567  // Find the canonical object type.
4568  QualType Canonical;
4569  if (!ObjectT.isCanonical()) {
4570  Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
4571 
4572  // Regenerate InsertPos.
4573  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4574  }
4575 
4576  // No match.
4577  void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
4578  auto *QType =
4579  new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
4580 
4581  Types.push_back(QType);
4582  ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
4583  return QualType(QType, 0);
4584 }
4585 
4586 /// getObjCInterfaceType - Return the unique reference to the type for the
4587 /// specified ObjC interface decl. The list of protocols is optional.
4589  ObjCInterfaceDecl *PrevDecl) const {
4590  if (Decl->TypeForDecl)
4591  return QualType(Decl->TypeForDecl, 0);
4592 
4593  if (PrevDecl) {
4594  assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
4595  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4596  return QualType(PrevDecl->TypeForDecl, 0);
4597  }
4598 
4599  // Prefer the definition, if there is one.
4600  if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
4601  Decl = Def;
4602 
4603  void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
4604  auto *T = new (Mem) ObjCInterfaceType(Decl);
4605  Decl->TypeForDecl = T;
4606  Types.push_back(T);
4607  return QualType(T, 0);
4608 }
4609 
4610 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
4611 /// TypeOfExprType AST's (since expression's are never shared). For example,
4612 /// multiple declarations that refer to "typeof(x)" all contain different
4613 /// DeclRefExpr's. This doesn't effect the type checker, since it operates
4614 /// on canonical type's (which are always unique).
4616  TypeOfExprType *toe;
4617  if (tofExpr->isTypeDependent()) {
4618  llvm::FoldingSetNodeID ID;
4619  DependentTypeOfExprType::Profile(ID, *this, tofExpr);
4620 
4621  void *InsertPos = nullptr;
4623  = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
4624  if (Canon) {
4625  // We already have a "canonical" version of an identical, dependent
4626  // typeof(expr) type. Use that as our canonical type.
4627  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
4628  QualType((TypeOfExprType*)Canon, 0));
4629  } else {
4630  // Build a new, canonical typeof(expr) type.
4631  Canon
4632  = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
4633  DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
4634  toe = Canon;
4635  }
4636  } else {
4637  QualType Canonical = getCanonicalType(tofExpr->getType());
4638  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
4639  }
4640  Types.push_back(toe);
4641  return QualType(toe, 0);
4642 }
4643 
4644 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
4645 /// TypeOfType nodes. The only motivation to unique these nodes would be
4646 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
4647 /// an issue. This doesn't affect the type checker, since it operates
4648 /// on canonical types (which are always unique).
4650  QualType Canonical = getCanonicalType(tofType);
4651  auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
4652  Types.push_back(tot);
4653  return QualType(tot, 0);
4654 }
4655 
4656 /// Unlike many "get<Type>" functions, we don't unique DecltypeType
4657 /// nodes. This would never be helpful, since each such type has its own
4658 /// expression, and would not give a significant memory saving, since there
4659 /// is an Expr tree under each such type.
4661  DecltypeType *dt;
4662 
4663  // C++11 [temp.type]p2:
4664  // If an expression e involves a template parameter, decltype(e) denotes a
4665  // unique dependent type. Two such decltype-specifiers refer to the same
4666  // type only if their expressions are equivalent (14.5.6.1).
4667  if (e->isInstantiationDependent()) {
4668  llvm::FoldingSetNodeID ID;
4669  DependentDecltypeType::Profile(ID, *this, e);
4670 
4671  void *InsertPos = nullptr;
4672  DependentDecltypeType *Canon
4673  = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
4674  if (!Canon) {
4675  // Build a new, canonical decltype(expr) type.
4676  Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
4677  DependentDecltypeTypes.InsertNode(Canon, InsertPos);
4678  }
4679  dt = new (*this, TypeAlignment)
4680  DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
4681  } else {
4682  dt = new (*this, TypeAlignment)
4683  DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
4684  }
4685  Types.push_back(dt);
4686  return QualType(dt, 0);
4687 }
4688 
4689 /// getUnaryTransformationType - We don't unique these, since the memory
4690 /// savings are minimal and these are rare.
4692  QualType UnderlyingType,
4694  const {
4695  UnaryTransformType *ut = nullptr;
4696 
4697  if (BaseType->isDependentType()) {
4698  // Look in the folding set for an existing type.
4699  llvm::FoldingSetNodeID ID;
4701 
4702  void *InsertPos = nullptr;
4704  = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
4705 
4706  if (!Canon) {
4707  // Build a new, canonical __underlying_type(type) type.
4708  Canon = new (*this, TypeAlignment)
4710  Kind);
4711  DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
4712  }
4713  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4714  QualType(), Kind,
4715  QualType(Canon, 0));
4716  } else {
4717  QualType CanonType = getCanonicalType(UnderlyingType);
4718  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4719  UnderlyingType, Kind,
4720  CanonType);
4721  }
4722  Types.push_back(ut);
4723  return QualType(ut, 0);
4724 }
4725 
4726 /// getAutoType - Return the uniqued reference to the 'auto' type which has been
4727 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the
4728 /// canonical deduced-but-dependent 'auto' type.
4730  bool IsDependent) const {
4731  if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
4732  return getAutoDeductType();
4733 
4734  // Look in the folding set for an existing type.
4735  void *InsertPos = nullptr;
4736  llvm::FoldingSetNodeID ID;
4737  AutoType::Profile(ID, DeducedType, Keyword, IsDependent);
4738  if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
4739  return QualType(AT, 0);
4740 
4741  auto *AT = new (*this, TypeAlignment)
4742  AutoType(DeducedType, Keyword, IsDependent);
4743  Types.push_back(AT);
4744  if (InsertPos)
4745  AutoTypes.InsertNode(AT, InsertPos);
4746  return QualType(AT, 0);
4747 }
4748 
4749 /// Return the uniqued reference to the deduced template specialization type
4750 /// which has been deduced to the given type, or to the canonical undeduced
4751 /// such type, or the canonical deduced-but-dependent such type.
4753  TemplateName Template, QualType DeducedType, bool IsDependent) const {
4754  // Look in the folding set for an existing type.
4755  void *InsertPos = nullptr;
4756  llvm::FoldingSetNodeID ID;
4757  DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
4758  IsDependent);
4760  DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
4761  return QualType(DTST, 0);
4762 
4763  auto *DTST = new (*this, TypeAlignment)
4764  DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
4765  Types.push_back(DTST);
4766  if (InsertPos)
4767  DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
4768  return QualType(DTST, 0);
4769 }
4770 
4771 /// getAtomicType - Return the uniqued reference to the atomic type for
4772 /// the given value type.
4774  // Unique pointers, to guarantee there is only one pointer of a particular
4775  // structure.
4776  llvm::FoldingSetNodeID ID;
4777  AtomicType::Profile(ID, T);
4778 
4779  void *InsertPos = nullptr;
4780  if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
4781  return QualType(AT, 0);
4782 
4783  // If the atomic value type isn't canonical, this won't be a canonical type
4784  // either, so fill in the canonical type field.
4785  QualType Canonical;
4786  if (!T.isCanonical()) {
4787  Canonical = getAtomicType(getCanonicalType(T));
4788 
4789  // Get the new insert position for the node we care about.
4790  AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
4791  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4792  }
4793  auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
4794  Types.push_back(New);
4795  AtomicTypes.InsertNode(New, InsertPos);
4796  return QualType(New, 0);
4797 }
4798 
4799 /// getAutoDeductType - Get type pattern for deducing against 'auto'.
4801  if (AutoDeductTy.isNull())
4804  /*dependent*/false),
4805  0);
4806  return AutoDeductTy;
4807 }
4808 
4809 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
4811  if (AutoRRefDeductTy.isNull())
4813  assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
4814  return AutoRRefDeductTy;
4815 }
4816 
4817 /// getTagDeclType - Return the unique reference to the type for the
4818 /// specified TagDecl (struct/union/class/enum) decl.
4820  assert(Decl);
4821  // FIXME: What is the design on getTagDeclType when it requires casting
4822  // away const? mutable?
4823  return getTypeDeclType(const_cast<TagDecl*>(Decl));
4824 }
4825 
4826 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
4827 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
4828 /// needs to agree with the definition in <stddef.h>.
4830  return getFromTargetType(Target->getSizeType());
4831 }
4832 
4833 /// Return the unique signed counterpart of the integer type
4834 /// corresponding to size_t.
4836  return getFromTargetType(Target->getSignedSizeType());
4837 }
4838 
4839 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
4841  return getFromTargetType(Target->getIntMaxType());
4842 }
4843 
4844 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
4846  return getFromTargetType(Target->getUIntMaxType());
4847 }
4848 
4849 /// getSignedWCharType - Return the type of "signed wchar_t".
4850 /// Used when in C++, as a GCC extension.
4852  // FIXME: derive from "Target" ?
4853  return WCharTy;
4854 }
4855 
4856 /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
4857 /// Used when in C++, as a GCC extension.
4859  // FIXME: derive from "Target" ?
4860  return UnsignedIntTy;
4861 }
4862 
4864  return getFromTargetType(Target->getIntPtrType());
4865 }
4866 
4869 }
4870 
4871 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
4872 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
4874  return getFromTargetType(Target->getPtrDiffType(0));
4875 }
4876 
4877 /// Return the unique unsigned counterpart of "ptrdiff_t"
4878 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
4879 /// in the definition of %tu format specifier.
4881  return getFromTargetType(Target->getUnsignedPtrDiffType(0));
4882 }
4883 
4884 /// Return the unique type for "pid_t" defined in
4885 /// <sys/types.h>. We need this to compute the correct type for vfork().
4887  return getFromTargetType(Target->getProcessIDType());
4888 }
4889 
4890 //===----------------------------------------------------------------------===//
4891 // Type Operators
4892 //===----------------------------------------------------------------------===//
4893 
4895  // Push qualifiers into arrays, and then discard any remaining
4896  // qualifiers.
4897  T = getCanonicalType(T);
4899  const Type *Ty = T.getTypePtr();
4900  QualType Result;
4901  if (isa<ArrayType>(Ty)) {
4902  Result = getArrayDecayedType(QualType(Ty,0));
4903  } else if (isa<FunctionType>(Ty)) {
4904  Result = getPointerType(QualType(Ty, 0));
4905  } else {
4906  Result = QualType(Ty, 0);
4907  }
4908 
4909  return CanQualType::CreateUnsafe(Result);
4910 }
4911 
4913  Qualifiers &quals) {
4914  SplitQualType splitType = type.getSplitUnqualifiedType();
4915 
4916  // FIXME: getSplitUnqualifiedType() actually walks all the way to
4917  // the unqualified desugared type and then drops it on the floor.
4918  // We then have to strip that sugar back off with
4919  // getUnqualifiedDesugaredType(), which is silly.
4920  const auto *AT =
4921  dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
4922 
4923  // If we don't have an array, just use the results in splitType.
4924  if (!AT) {
4925  quals = splitType.Quals;
4926  return QualType(splitType.Ty, 0);
4927  }
4928 
4929  // Otherwise, recurse on the array's element type.
4930  QualType elementType = AT->getElementType();
4931  QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
4932 
4933  // If that didn't change the element type, AT has no qualifiers, so we
4934  // can just use the results in splitType.
4935  if (elementType == unqualElementType) {
4936  assert(quals.empty()); // from the recursive call
4937  quals = splitType.Quals;
4938  return QualType(splitType.Ty, 0);
4939  }
4940 
4941  // Otherwise, add in the qualifiers from the outermost type, then
4942  // build the type back up.
4943  quals.addConsistentQualifiers(splitType.Quals);
4944 
4945  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
4946  return getConstantArrayType(unqualElementType, CAT->getSize(),
4947  CAT->getSizeModifier(), 0);
4948  }
4949 
4950  if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
4951  return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
4952  }
4953 
4954  if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
4955  return getVariableArrayType(unqualElementType,
4956  VAT->getSizeExpr(),
4957  VAT->getSizeModifier(),
4958  VAT->getIndexTypeCVRQualifiers(),
4959  VAT->getBracketsRange());
4960  }
4961 
4962  const auto *DSAT = cast<DependentSizedArrayType>(AT);
4963  return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
4964  DSAT->getSizeModifier(), 0,
4965  SourceRange());
4966 }
4967 
4968 /// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
4969 /// may be similar (C++ 4.4), replaces T1 and T2 with the type that
4970 /// they point to and return true. If T1 and T2 aren't pointer types
4971 /// or pointer-to-member types, or if they are not similar at this
4972 /// level, returns false and leaves T1 and T2 unchanged. Top-level
4973 /// qualifiers on T1 and T2 are ignored. This function will typically
4974 /// be called in a loop that successively "unwraps" pointer and
4975 /// pointer-to-member types to compare them at each level.
4977  const auto *T1PtrType = T1->getAs<PointerType>();
4978  const auto *T2PtrType = T2->getAs<PointerType>();
4979  if (T1PtrType && T2PtrType) {
4980  T1 = T1PtrType->getPointeeType();
4981  T2 = T2PtrType->getPointeeType();
4982  return true;
4983  }
4984 
4985  const auto *T1MPType = T1->getAs<MemberPointerType>();
4986  const auto *T2MPType = T2->getAs<MemberPointerType>();
4987  if (T1MPType && T2MPType &&
4988  hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
4989  QualType(T2MPType->getClass(), 0))) {
4990  T1 = T1MPType->getPointeeType();
4991  T2 = T2MPType->getPointeeType();
4992  return true;
4993  }
4994 
4995  if (getLangOpts().ObjC1) {
4996  const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
4997  const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
4998  if (T1OPType && T2OPType) {
4999  T1 = T1OPType->getPointeeType();
5000  T2 = T2OPType->getPointeeType();
5001  return true;
5002  }
5003  }
5004 
5005  // FIXME: Block pointers, too?
5006 
5007  return false;
5008 }
5009 
5012  SourceLocation NameLoc) const {
5013  switch (Name.getKind()) {
5016  // DNInfo work in progress: CHECKME: what about DNLoc?
5018  NameLoc);
5019 
5022  // DNInfo work in progress: CHECKME: what about DNLoc?
5023  return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
5024  }
5025 
5028  DeclarationName DName;
5029  if (DTN->isIdentifier()) {
5031  return DeclarationNameInfo(DName, NameLoc);
5032  } else {
5034  // DNInfo work in progress: FIXME: source locations?
5035  DeclarationNameLoc DNLoc;
5038  return DeclarationNameInfo(DName, NameLoc, DNLoc);
5039  }
5040  }
5041 
5045  return DeclarationNameInfo(subst->getParameter()->getDeclName(),
5046  NameLoc);
5047  }
5048 
5053  NameLoc);
5054  }
5055  }
5056 
5057  llvm_unreachable("bad template name kind!");
5058 }
5059 
5061  switch (Name.getKind()) {
5063  case TemplateName::Template: {
5064  TemplateDecl *Template = Name.getAsTemplateDecl();
5065  if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
5066  Template = getCanonicalTemplateTemplateParmDecl(TTP);
5067 
5068  // The canonical template name is the canonical template declaration.
5069  return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
5070  }
5071 
5073  llvm_unreachable("cannot canonicalize overloaded template");
5074 
5077  assert(DTN && "Non-dependent template names must refer to template decls.");
5078  return DTN->CanonicalTemplateName;
5079  }
5080 
5084  return getCanonicalTemplateName(subst->getReplacement());
5085  }
5086 
5090  TemplateTemplateParmDecl *canonParameter
5091  = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
5092  TemplateArgument canonArgPack
5094  return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
5095  }
5096  }
5097 
5098  llvm_unreachable("bad template name!");
5099 }
5100 
5102  X = getCanonicalTemplateName(X);
5103  Y = getCanonicalTemplateName(Y);
5104  return X.getAsVoidPointer() == Y.getAsVoidPointer();
5105 }
5106 
5109  switch (Arg.getKind()) {
5111  return Arg;
5112 
5114  return Arg;
5115 
5117  auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
5118  return TemplateArgument(D, Arg.getParamTypeForDecl());
5119  }
5120 
5123  /*isNullPtr*/true);
5124 
5127 
5131  Arg.getNumTemplateExpansions());
5132 
5135 
5138 
5139  case TemplateArgument::Pack: {
5140  if (Arg.pack_size() == 0)
5141  return Arg;
5142 
5143  auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
5144  unsigned Idx = 0;
5146  AEnd = Arg.pack_end();
5147  A != AEnd; (void)++A, ++Idx)
5148  CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
5149 
5150  return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
5151  }
5152  }
5153 
5154  // Silence GCC warning
5155  llvm_unreachable("Unhandled template argument kind");
5156 }
5157 
5160  if (!NNS)
5161  return nullptr;
5162 
5163  switch (NNS->getKind()) {
5165  // Canonicalize the prefix but keep the identifier the same.
5166  return NestedNameSpecifier::Create(*this,
5168  NNS->getAsIdentifier());
5169 
5171  // A namespace is canonical; build a nested-name-specifier with
5172  // this namespace and no prefix.
5173  return NestedNameSpecifier::Create(*this, nullptr,
5175 
5177  // A namespace is canonical; build a nested-name-specifier with
5178  // this namespace and no prefix.
5179  return NestedNameSpecifier::Create(*this, nullptr,
5181  ->getOriginalNamespace());
5182 
5185  QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
5186 
5187  // If we have some kind of dependent-named type (e.g., "typename T::type"),
5188  // break it apart into its prefix and identifier, then reconsititute those
5189  // as the canonical nested-name-specifier. This is required to canonicalize
5190  // a dependent nested-name-specifier involving typedefs of dependent-name
5191  // types, e.g.,
5192  // typedef typename T::type T1;
5193  // typedef typename T1::type T2;
5194  if (const auto *DNT = T->getAs<DependentNameType>())
5195  return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
5196  const_cast<IdentifierInfo *>(DNT->getIdentifier()));
5197 
5198  // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
5199  // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
5200  // first place?
5201  return NestedNameSpecifier::Create(*this, nullptr, false,
5202  const_cast<Type *>(T.getTypePtr()));
5203  }
5204 
5207  // The global specifier and __super specifer are canonical and unique.
5208  return NNS;
5209  }
5210 
5211  llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
5212 }
5213 
5215  // Handle the non-qualified case efficiently.
5216  if (!T.hasLocalQualifiers()) {
5217  // Handle the common positive case fast.
5218  if (const auto *AT = dyn_cast<ArrayType>(T))
5219  return AT;
5220  }
5221 
5222  // Handle the common negative case fast.
5223  if (!isa<ArrayType>(T.getCanonicalType()))
5224  return nullptr;
5225 
5226  // Apply any qualifiers from the array type to the element type. This
5227  // implements C99 6.7.3p8: "If the specification of an array type includes
5228  // any type qualifiers, the element type is so qualified, not the array type."
5229 
5230  // If we get here, we either have type qualifiers on the type, or we have
5231  // sugar such as a typedef in the way. If we have type qualifiers on the type
5232  // we must propagate them down into the element type.
5233 
5235  Qualifiers qs = split.Quals;
5236 
5237  // If we have a simple case, just return now.
5238  const auto *ATy = dyn_cast<ArrayType>(split.Ty);
5239  if (!ATy || qs.empty())
5240  return ATy;
5241 
5242  // Otherwise, we have an array and we have qualifiers on it. Push the
5243  // qualifiers into the array element type and return a new array type.
5244  QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
5245 
5246  if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy))
5247  return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
5248  CAT->getSizeModifier(),
5249  CAT->getIndexTypeCVRQualifiers()));
5250  if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy))
5251  return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
5252  IAT->getSizeModifier(),
5253  IAT->getIndexTypeCVRQualifiers()));
5254 
5255  if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy))
5256  return cast<ArrayType>(
5257  getDependentSizedArrayType(NewEltTy,
5258  DSAT->getSizeExpr(),
5259  DSAT->getSizeModifier(),
5260  DSAT->getIndexTypeCVRQualifiers(),
5261  DSAT->getBracketsRange()));
5262 
5263  const auto *VAT = cast<VariableArrayType>(ATy);
5264  return cast<ArrayType>(getVariableArrayType(NewEltTy,
5265  VAT->getSizeExpr(),
5266  VAT->getSizeModifier(),
5267  VAT->getIndexTypeCVRQualifiers(),
5268  VAT->getBracketsRange()));
5269 }
5270 
5272  if (T->isArrayType() || T->isFunctionType())
5273  return getDecayedType(T);
5274  return T;
5275 }
5276 
5279  T = getAdjustedParameterType(T);
5280  return T.getUnqualifiedType();
5281 }
5282 
5284  // C++ [except.throw]p3:
5285  // A throw-expression initializes a temporary object, called the exception
5286  // object, the type of which is determined by removing any top-level
5287  // cv-qualifiers from the static type of the operand of throw and adjusting
5288  // the type from "array of T" or "function returning T" to "pointer to T"
5289  // or "pointer to function returning T", [...]
5291  if (T->isArrayType() || T->isFunctionType())
5292  T = getDecayedType(T);
5293  return T.getUnqualifiedType();
5294 }
5295 
5296 /// getArrayDecayedType - Return the properly qualified result of decaying the
5297 /// specified array type to a pointer. This operation is non-trivial when
5298 /// handling typedefs etc. The canonical type of "T" must be an array type,
5299 /// this returns a pointer to a properly qualified element of the array.
5300 ///
5301 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
5303  // Get the element type with 'getAsArrayType' so that we don't lose any
5304  // typedefs in the element type of the array. This also handles propagation
5305  // of type qualifiers from the array type into the element type if present
5306  // (C99 6.7.3p8).
5307  const ArrayType *PrettyArrayType = getAsArrayType(Ty);
5308  assert(PrettyArrayType && "Not an array type!");
5309 
5310  QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
5311 
5312  // int x[restrict 4] -> int *restrict
5314  PrettyArrayType->getIndexTypeQualifiers());
5315 
5316  // int x[_Nullable] -> int * _Nullable
5317  if (auto Nullability = Ty->getNullability(*this)) {
5318  Result = const_cast<ASTContext *>(this)->getAttributedType(
5320  }
5321  return Result;
5322 }
5323 
5325  return getBaseElementType(array->getElementType());
5326 }
5327 
5329  Qualifiers qs;
5330  while (true) {
5331  SplitQualType split = type.getSplitDesugaredType();
5332  const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
5333  if (!array) break;
5334 
5335  type = array->getElementType();
5336  qs.addConsistentQualifiers(split.Quals);
5337  }
5338 
5339  return getQualifiedType(type, qs);
5340 }
5341 
5342 /// getConstantArrayElementCount - Returns number of constant array elements.
5343 uint64_t
5345  uint64_t ElementCount = 1;
5346  do {
5347  ElementCount *= CA->getSize().getZExtValue();
5348  CA = dyn_cast_or_null<ConstantArrayType>(
5350  } while (CA);
5351  return ElementCount;
5352 }
5353 
5354 /// getFloatingRank - Return a relative rank for floating point types.
5355 /// This routine will assert if passed a built-in type that isn't a float.
5357  if (const auto *CT = T->getAs<ComplexType>())
5358  return getFloatingRank(CT->getElementType());
5359 
5360  assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type");
5361  switch (T->getAs<BuiltinType>()->getKind()) {
5362  default: llvm_unreachable("getFloatingRank(): not a floating type");
5363  case BuiltinType::Float16: return Float16Rank;
5364  case BuiltinType::Half: return HalfRank;
5365  case BuiltinType::Float: return FloatRank;
5366  case BuiltinType::Double: return DoubleRank;
5367  case BuiltinType::LongDouble: return LongDoubleRank;
5368  case BuiltinType::Float128: return Float128Rank;
5369  }
5370 }
5371 
5372 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating
5373 /// point or a complex type (based on typeDomain/typeSize).
5374 /// 'typeDomain' is a real floating point or complex type.
5375 /// 'typeSize' is a real floating point or complex type.
5377  QualType Domain) const {
5378  FloatingRank EltRank = getFloatingRank(Size);
5379  if (Domain->isComplexType()) {
5380  switch (EltRank) {
5381  case Float16Rank:
5382  case HalfRank: llvm_unreachable("Complex half is not supported");
5383  case FloatRank: return FloatComplexTy;
5384  case DoubleRank: return DoubleComplexTy;
5385  case LongDoubleRank: return LongDoubleComplexTy;
5386  case Float128Rank: return Float128ComplexTy;
5387  }
5388  }
5389 
5390  assert(Domain->isRealFloatingType() && "Unknown domain!");
5391  switch (EltRank) {
5392  case Float16Rank: return HalfTy;
5393  case HalfRank: return HalfTy;
5394  case FloatRank: return FloatTy;
5395  case DoubleRank: return DoubleTy;
5396  case LongDoubleRank: return LongDoubleTy;
5397  case Float128Rank: return Float128Ty;
5398  }
5399  llvm_unreachable("getFloatingRank(): illegal value for rank");
5400 }
5401 
5402 /// getFloatingTypeOrder - Compare the rank of the two specified floating
5403 /// point types, ignoring the domain of the type (i.e. 'double' ==
5404 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
5405 /// LHS < RHS, return -1.
5407  FloatingRank LHSR = getFloatingRank(LHS);
5408  FloatingRank RHSR = getFloatingRank(RHS);
5409 
5410  if (LHSR == RHSR)
5411  return 0;
5412  if (LHSR > RHSR)
5413  return 1;
5414  return -1;
5415 }
5416 
5417 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
5418 /// routine will assert if passed a built-in type that isn't an integer or enum,
5419 /// or if it is not canonicalized.
5420 unsigned ASTContext::getIntegerRank(const Type *T) const {
5421  assert(T->isCanonicalUnqualified() && "T should be canonicalized");
5422 
5423  switch (cast<BuiltinType>(T)->getKind()) {
5424  default: llvm_unreachable("getIntegerRank(): not a built-in integer");
5425  case BuiltinType::Bool:
5426  return 1 + (getIntWidth(BoolTy) << 3);
5427  case BuiltinType::Char_S:
5428  case BuiltinType::Char_U:
5429  case BuiltinType::SChar:
5430  case BuiltinType::UChar:
5431  return 2 + (getIntWidth(CharTy) << 3);
5432  case BuiltinType::Short:
5433  case BuiltinType::UShort:
5434  return 3 + (getIntWidth(ShortTy) << 3);
5435  case BuiltinType::Int:
5436  case BuiltinType::UInt:
5437  return 4 + (getIntWidth(IntTy) << 3);
5438  case BuiltinType::Long:
5439  case BuiltinType::ULong:
5440  return 5 + (getIntWidth(LongTy) << 3);
5441  case BuiltinType::LongLong:
5442  case BuiltinType::ULongLong:
5443  return 6 + (getIntWidth(LongLongTy) << 3);
5444  case BuiltinType::Int128:
5445  case BuiltinType::UInt128:
5446  return 7 + (getIntWidth(Int128Ty) << 3);
5447  }
5448 }
5449 
5450 /// Whether this is a promotable bitfield reference according
5451 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
5452 ///
5453 /// \returns the type this bit-field will promote to, or NULL if no
5454 /// promotion occurs.
5456  if (E->isTypeDependent() || E->isValueDependent())
5457  return {};
5458 
5459  // FIXME: We should not do this unless E->refersToBitField() is true. This
5460  // matters in C where getSourceBitField() will find bit-fields for various
5461  // cases where the source expression is not a bit-field designator.
5462 
5463  FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
5464  if (!Field)
5465  return {};
5466 
5467  QualType FT = Field->getType();
5468 
5469  uint64_t BitWidth = Field->getBitWidthValue(*this);
5470  uint64_t IntSize = getTypeSize(IntTy);
5471  // C++ [conv.prom]p5:
5472  // A prvalue for an integral bit-field can be converted to a prvalue of type
5473  // int if int can represent all the values of the bit-field; otherwise, it
5474  // can be converted to unsigned int if unsigned int can represent all the
5475  // values of the bit-field. If the bit-field is larger yet, no integral
5476  // promotion applies to it.
5477  // C11 6.3.1.1/2:
5478  // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
5479  // If an int can represent all values of the original type (as restricted by
5480  // the width, for a bit-field), the value is converted to an int; otherwise,
5481  // it is converted to an unsigned int.
5482  //
5483  // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
5484  // We perform that promotion here to match GCC and C++.
5485  if (BitWidth < IntSize)
5486  return IntTy;
5487 
5488  if (BitWidth == IntSize)
5489  return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
5490 
5491  // Types bigger than int are not subject to promotions, and therefore act
5492  // like the base type. GCC has some weird bugs in this area that we
5493  // deliberately do not follow (GCC follows a pre-standard resolution to
5494  // C's DR315 which treats bit-width as being part of the type, and this leaks
5495  // into their semantics in some cases).
5496  return {};
5497 }
5498 
5499 /// getPromotedIntegerType - Returns the type that Promotable will
5500 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
5501 /// integer type.
5503  assert(!Promotable.isNull());
5504  assert(Promotable->isPromotableIntegerType());
5505  if (const auto *ET = Promotable->getAs<EnumType>())
5506  return ET->getDecl()->getPromotionType();
5507 
5508  if (const auto *BT = Promotable->getAs<BuiltinType>()) {
5509  // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
5510  // (3.9.1) can be converted to a prvalue of the first of the following
5511  // types that can represent all the values of its underlying type:
5512  // int,