clang  8.0.0svn
ASTContext.cpp
Go to the documentation of this file.
1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the ASTContext interface.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/AST/ASTContext.h"
15 #include "CXXABI.h"
16 #include "clang/AST/APValue.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/AttrIterator.h"
21 #include "clang/AST/CharUnits.h"
22 #include "clang/AST/Comment.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclBase.h"
25 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/DeclOpenMP.h"
29 #include "clang/AST/DeclTemplate.h"
31 #include "clang/AST/Expr.h"
32 #include "clang/AST/ExprCXX.h"
34 #include "clang/AST/Mangle.h"
38 #include "clang/AST/RecordLayout.h"
40 #include "clang/AST/Stmt.h"
41 #include "clang/AST/TemplateBase.h"
42 #include "clang/AST/TemplateName.h"
43 #include "clang/AST/Type.h"
44 #include "clang/AST/TypeLoc.h"
48 #include "clang/Basic/Builtins.h"
51 #include "clang/Basic/FixedPoint.h"
53 #include "clang/Basic/LLVM.h"
55 #include "clang/Basic/Linkage.h"
60 #include "clang/Basic/Specifiers.h"
62 #include "clang/Basic/TargetInfo.h"
63 #include "clang/Basic/XRayLists.h"
64 #include "llvm/ADT/APInt.h"
65 #include "llvm/ADT/APSInt.h"
66 #include "llvm/ADT/ArrayRef.h"
67 #include "llvm/ADT/DenseMap.h"
68 #include "llvm/ADT/DenseSet.h"
69 #include "llvm/ADT/FoldingSet.h"
70 #include "llvm/ADT/None.h"
71 #include "llvm/ADT/Optional.h"
72 #include "llvm/ADT/PointerUnion.h"
73 #include "llvm/ADT/STLExtras.h"
74 #include "llvm/ADT/SmallPtrSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/StringExtras.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/ADT/Triple.h"
79 #include "llvm/Support/Capacity.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/MathExtras.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include <algorithm>
86 #include <cassert>
87 #include <cstddef>
88 #include <cstdint>
89 #include <cstdlib>
90 #include <map>
91 #include <memory>
92 #include <string>
93 #include <tuple>
94 #include <utility>
95 
96 using namespace clang;
97 
110 
113 };
114 
116  if (!CommentsLoaded && ExternalSource) {
117  ExternalSource->ReadComments();
118 
119 #ifndef NDEBUG
121  assert(std::is_sorted(RawComments.begin(), RawComments.end(),
122  BeforeThanCompare<RawComment>(SourceMgr)));
123 #endif
124 
125  CommentsLoaded = true;
126  }
127 
128  assert(D);
129 
130  // User can not attach documentation to implicit declarations.
131  if (D->isImplicit())
132  return nullptr;
133 
134  // User can not attach documentation to implicit instantiations.
135  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
136  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
137  return nullptr;
138  }
139 
140  if (const auto *VD = dyn_cast<VarDecl>(D)) {
141  if (VD->isStaticDataMember() &&
142  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
143  return nullptr;
144  }
145 
146  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
147  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
148  return nullptr;
149  }
150 
151  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
152  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
153  if (TSK == TSK_ImplicitInstantiation ||
154  TSK == TSK_Undeclared)
155  return nullptr;
156  }
157 
158  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
159  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
160  return nullptr;
161  }
162  if (const auto *TD = dyn_cast<TagDecl>(D)) {
163  // When tag declaration (but not definition!) is part of the
164  // decl-specifier-seq of some other declaration, it doesn't get comment
165  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
166  return nullptr;
167  }
168  // TODO: handle comments for function parameters properly.
169  if (isa<ParmVarDecl>(D))
170  return nullptr;
171 
172  // TODO: we could look up template parameter documentation in the template
173  // documentation.
174  if (isa<TemplateTypeParmDecl>(D) ||
175  isa<NonTypeTemplateParmDecl>(D) ||
176  isa<TemplateTemplateParmDecl>(D))
177  return nullptr;
178 
180 
181  // If there are no comments anywhere, we won't find anything.
182  if (RawComments.empty())
183  return nullptr;
184 
185  // Find declaration location.
186  // For Objective-C declarations we generally don't expect to have multiple
187  // declarators, thus use declaration starting location as the "declaration
188  // location".
189  // For all other declarations multiple declarators are used quite frequently,
190  // so we use the location of the identifier as the "declaration location".
191  SourceLocation DeclLoc;
192  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
193  isa<ObjCPropertyDecl>(D) ||
194  isa<RedeclarableTemplateDecl>(D) ||
195  isa<ClassTemplateSpecializationDecl>(D))
196  DeclLoc = D->getBeginLoc();
197  else {
198  DeclLoc = D->getLocation();
199  if (DeclLoc.isMacroID()) {
200  if (isa<TypedefDecl>(D)) {
201  // If location of the typedef name is in a macro, it is because being
202  // declared via a macro. Try using declaration's starting location as
203  // the "declaration location".
204  DeclLoc = D->getBeginLoc();
205  } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
206  // If location of the tag decl is inside a macro, but the spelling of
207  // the tag name comes from a macro argument, it looks like a special
208  // macro like NS_ENUM is being used to define the tag decl. In that
209  // case, adjust the source location to the expansion loc so that we can
210  // attach the comment to the tag decl.
211  if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
212  TD->isCompleteDefinition())
213  DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
214  }
215  }
216  }
217 
218  // If the declaration doesn't map directly to a location in a file, we
219  // can't find the comment.
220  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
221  return nullptr;
222 
223  // Find the comment that occurs just after this declaration.
225  {
226  // When searching for comments during parsing, the comment we are looking
227  // for is usually among the last two comments we parsed -- check them
228  // first.
229  RawComment CommentAtDeclLoc(
230  SourceMgr, SourceRange(DeclLoc), LangOpts.CommentOpts, false);
231  BeforeThanCompare<RawComment> Compare(SourceMgr);
232  ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
233  bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
234  if (!Found && RawComments.size() >= 2) {
235  MaybeBeforeDecl--;
236  Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
237  }
238 
239  if (Found) {
240  Comment = MaybeBeforeDecl + 1;
241  assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
242  &CommentAtDeclLoc, Compare));
243  } else {
244  // Slow path.
245  Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
246  &CommentAtDeclLoc, Compare);
247  }
248  }
249 
250  // Decompose the location for the declaration and find the beginning of the
251  // file buffer.
252  std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc);
253 
254  // First check whether we have a trailing comment.
255  if (Comment != RawComments.end() &&
256  ((*Comment)->isDocumentation() || LangOpts.CommentOpts.ParseAllComments)
257  && (*Comment)->isTrailingComment() &&
258  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
259  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
260  std::pair<FileID, unsigned> CommentBeginDecomp
261  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin());
262  // Check that Doxygen trailing comment comes after the declaration, starts
263  // on the same line and in the same file as the declaration.
264  if (DeclLocDecomp.first == CommentBeginDecomp.first &&
265  SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
266  == SourceMgr.getLineNumber(CommentBeginDecomp.first,
267  CommentBeginDecomp.second)) {
268  return *Comment;
269  }
270  }
271 
272  // The comment just after the declaration was not a trailing comment.
273  // Let's look at the previous comment.
274  if (Comment == RawComments.begin())
275  return nullptr;
276  --Comment;
277 
278  // Check that we actually have a non-member Doxygen comment.
279  if (!((*Comment)->isDocumentation() ||
280  LangOpts.CommentOpts.ParseAllComments) ||
281  (*Comment)->isTrailingComment())
282  return nullptr;
283 
284  // Decompose the end of the comment.
285  std::pair<FileID, unsigned> CommentEndDecomp
286  = SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd());
287 
288  // If the comment and the declaration aren't in the same file, then they
289  // aren't related.
290  if (DeclLocDecomp.first != CommentEndDecomp.first)
291  return nullptr;
292 
293  // Get the corresponding buffer.
294  bool Invalid = false;
295  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
296  &Invalid).data();
297  if (Invalid)
298  return nullptr;
299 
300  // Extract text between the comment and declaration.
301  StringRef Text(Buffer + CommentEndDecomp.second,
302  DeclLocDecomp.second - CommentEndDecomp.second);
303 
304  // There should be no other declarations or preprocessor directives between
305  // comment and declaration.
306  if (Text.find_first_of(";{}#@") != StringRef::npos)
307  return nullptr;
308 
309  return *Comment;
310 }
311 
312 /// If we have a 'templated' declaration for a template, adjust 'D' to
313 /// refer to the actual template.
314 /// If we have an implicit instantiation, adjust 'D' to refer to template.
315 static const Decl *adjustDeclToTemplate(const Decl *D) {
316  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
317  // Is this function declaration part of a function template?
318  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
319  return FTD;
320 
321  // Nothing to do if function is not an implicit instantiation.
322  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
323  return D;
324 
325  // Function is an implicit instantiation of a function template?
326  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
327  return FTD;
328 
329  // Function is instantiated from a member definition of a class template?
330  if (const FunctionDecl *MemberDecl =
332  return MemberDecl;
333 
334  return D;
335  }
336  if (const auto *VD = dyn_cast<VarDecl>(D)) {
337  // Static data member is instantiated from a member definition of a class
338  // template?
339  if (VD->isStaticDataMember())
340  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
341  return MemberDecl;
342 
343  return D;
344  }
345  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
346  // Is this class declaration part of a class template?
347  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
348  return CTD;
349 
350  // Class is an implicit instantiation of a class template or partial
351  // specialization?
352  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
353  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
354  return D;
355  llvm::PointerUnion<ClassTemplateDecl *,
357  PU = CTSD->getSpecializedTemplateOrPartial();
358  return PU.is<ClassTemplateDecl*>() ?
359  static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) :
360  static_cast<const Decl*>(
362  }
363 
364  // Class is instantiated from a member definition of a class template?
365  if (const MemberSpecializationInfo *Info =
366  CRD->getMemberSpecializationInfo())
367  return Info->getInstantiatedFrom();
368 
369  return D;
370  }
371  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
372  // Enum is instantiated from a member definition of a class template?
373  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
374  return MemberDecl;
375 
376  return D;
377  }
378  // FIXME: Adjust alias templates?
379  return D;
380 }
381 
383  const Decl *D,
384  const Decl **OriginalDecl) const {
385  D = adjustDeclToTemplate(D);
386 
387  // Check whether we have cached a comment for this declaration already.
388  {
389  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
390  RedeclComments.find(D);
391  if (Pos != RedeclComments.end()) {
392  const RawCommentAndCacheFlags &Raw = Pos->second;
394  if (OriginalDecl)
395  *OriginalDecl = Raw.getOriginalDecl();
396  return Raw.getRaw();
397  }
398  }
399  }
400 
401  // Search for comments attached to declarations in the redeclaration chain.
402  const RawComment *RC = nullptr;
403  const Decl *OriginalDeclForRC = nullptr;
404  for (auto I : D->redecls()) {
405  llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
406  RedeclComments.find(I);
407  if (Pos != RedeclComments.end()) {
408  const RawCommentAndCacheFlags &Raw = Pos->second;
410  RC = Raw.getRaw();
411  OriginalDeclForRC = Raw.getOriginalDecl();
412  break;
413  }
414  } else {
416  OriginalDeclForRC = I;
418  if (RC) {
419  // Call order swapped to work around ICE in VS2015 RTM (Release Win32)
420  // https://connect.microsoft.com/VisualStudio/feedback/details/1741530
422  Raw.setRaw(RC);
423  } else
425  Raw.setOriginalDecl(I);
426  RedeclComments[I] = Raw;
427  if (RC)
428  break;
429  }
430  }
431 
432  // If we found a comment, it should be a documentation comment.
433  assert(!RC || RC->isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
434 
435  if (OriginalDecl)
436  *OriginalDecl = OriginalDeclForRC;
437 
438  // Update cache for every declaration in the redeclaration chain.
440  Raw.setRaw(RC);
442  Raw.setOriginalDecl(OriginalDeclForRC);
443 
444  for (auto I : D->redecls()) {
447  R = Raw;
448  }
449 
450  return RC;
451 }
452 
453 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
455  const DeclContext *DC = ObjCMethod->getDeclContext();
456  if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
457  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
458  if (!ID)
459  return;
460  // Add redeclared method here.
461  for (const auto *Ext : ID->known_extensions()) {
462  if (ObjCMethodDecl *RedeclaredMethod =
463  Ext->getMethod(ObjCMethod->getSelector(),
464  ObjCMethod->isInstanceMethod()))
465  Redeclared.push_back(RedeclaredMethod);
466  }
467  }
468 }
469 
471  const Decl *D) const {
472  auto *ThisDeclInfo = new (*this) comments::DeclInfo;
473  ThisDeclInfo->CommentDecl = D;
474  ThisDeclInfo->IsFilled = false;
475  ThisDeclInfo->fill();
476  ThisDeclInfo->CommentDecl = FC->getDecl();
477  if (!ThisDeclInfo->TemplateParameters)
478  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
479  comments::FullComment *CFC =
480  new (*this) comments::FullComment(FC->getBlocks(),
481  ThisDeclInfo);
482  return CFC;
483 }
484 
487  return RC ? RC->parse(*this, nullptr, D) : nullptr;
488 }
489 
491  const Decl *D,
492  const Preprocessor *PP) const {
493  if (D->isInvalidDecl())
494  return nullptr;
495  D = adjustDeclToTemplate(D);
496 
497  const Decl *Canonical = D->getCanonicalDecl();
498  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
499  ParsedComments.find(Canonical);
500 
501  if (Pos != ParsedComments.end()) {
502  if (Canonical != D) {
503  comments::FullComment *FC = Pos->second;
505  return CFC;
506  }
507  return Pos->second;
508  }
509 
510  const Decl *OriginalDecl;
511 
512  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
513  if (!RC) {
514  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
516  const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
517  if (OMD && OMD->isPropertyAccessor())
518  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
519  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
520  return cloneFullComment(FC, D);
521  if (OMD)
522  addRedeclaredMethods(OMD, Overridden);
523  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
524  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
525  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
526  return cloneFullComment(FC, D);
527  }
528  else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
529  // Attach any tag type's documentation to its typedef if latter
530  // does not have one of its own.
531  QualType QT = TD->getUnderlyingType();
532  if (const auto *TT = QT->getAs<TagType>())
533  if (const Decl *TD = TT->getDecl())
534  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
535  return cloneFullComment(FC, D);
536  }
537  else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
538  while (IC->getSuperClass()) {
539  IC = IC->getSuperClass();
540  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
541  return cloneFullComment(FC, D);
542  }
543  }
544  else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
545  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
546  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
547  return cloneFullComment(FC, D);
548  }
549  else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
550  if (!(RD = RD->getDefinition()))
551  return nullptr;
552  // Check non-virtual bases.
553  for (const auto &I : RD->bases()) {
554  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
555  continue;
556  QualType Ty = I.getType();
557  if (Ty.isNull())
558  continue;
559  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
560  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
561  continue;
562 
563  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
564  return cloneFullComment(FC, D);
565  }
566  }
567  // Check virtual bases.
568  for (const auto &I : RD->vbases()) {
569  if (I.getAccessSpecifier() != AS_public)
570  continue;
571  QualType Ty = I.getType();
572  if (Ty.isNull())
573  continue;
574  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
575  if (!(VirtualBase= VirtualBase->getDefinition()))
576  continue;
577  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
578  return cloneFullComment(FC, D);
579  }
580  }
581  }
582  return nullptr;
583  }
584 
585  // If the RawComment was attached to other redeclaration of this Decl, we
586  // should parse the comment in context of that other Decl. This is important
587  // because comments can contain references to parameter names which can be
588  // different across redeclarations.
589  if (D != OriginalDecl)
590  return getCommentForDecl(OriginalDecl, PP);
591 
592  comments::FullComment *FC = RC->parse(*this, PP, D);
593  ParsedComments[Canonical] = FC;
594  return FC;
595 }
596 
597 void
598 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
599  TemplateTemplateParmDecl *Parm) {
600  ID.AddInteger(Parm->getDepth());
601  ID.AddInteger(Parm->getPosition());
602  ID.AddBoolean(Parm->isParameterPack());
603 
605  ID.AddInteger(Params->size());
607  PEnd = Params->end();
608  P != PEnd; ++P) {
609  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
610  ID.AddInteger(0);
611  ID.AddBoolean(TTP->isParameterPack());
612  continue;
613  }
614 
615  if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
616  ID.AddInteger(1);
617  ID.AddBoolean(NTTP->isParameterPack());
618  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
619  if (NTTP->isExpandedParameterPack()) {
620  ID.AddBoolean(true);
621  ID.AddInteger(NTTP->getNumExpansionTypes());
622  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
623  QualType T = NTTP->getExpansionType(I);
624  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
625  }
626  } else
627  ID.AddBoolean(false);
628  continue;
629  }
630 
631  auto *TTP = cast<TemplateTemplateParmDecl>(*P);
632  ID.AddInteger(2);
633  Profile(ID, TTP);
634  }
635 }
636 
638 ASTContext::getCanonicalTemplateTemplateParmDecl(
639  TemplateTemplateParmDecl *TTP) const {
640  // Check if we already have a canonical template template parameter.
641  llvm::FoldingSetNodeID ID;
642  CanonicalTemplateTemplateParm::Profile(ID, TTP);
643  void *InsertPos = nullptr;
644  CanonicalTemplateTemplateParm *Canonical
645  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
646  if (Canonical)
647  return Canonical->getParam();
648 
649  // Build a canonical template parameter list.
651  SmallVector<NamedDecl *, 4> CanonParams;
652  CanonParams.reserve(Params->size());
654  PEnd = Params->end();
655  P != PEnd; ++P) {
656  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
657  CanonParams.push_back(
659  SourceLocation(),
660  SourceLocation(),
661  TTP->getDepth(),
662  TTP->getIndex(), nullptr, false,
663  TTP->isParameterPack()));
664  else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
665  QualType T = getCanonicalType(NTTP->getType());
668  if (NTTP->isExpandedParameterPack()) {
669  SmallVector<QualType, 2> ExpandedTypes;
670  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
671  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
672  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
673  ExpandedTInfos.push_back(
674  getTrivialTypeSourceInfo(ExpandedTypes.back()));
675  }
676 
678  SourceLocation(),
679  SourceLocation(),
680  NTTP->getDepth(),
681  NTTP->getPosition(), nullptr,
682  T,
683  TInfo,
684  ExpandedTypes,
685  ExpandedTInfos);
686  } else {
688  SourceLocation(),
689  SourceLocation(),
690  NTTP->getDepth(),
691  NTTP->getPosition(), nullptr,
692  T,
693  NTTP->isParameterPack(),
694  TInfo);
695  }
696  CanonParams.push_back(Param);
697 
698  } else
699  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
700  cast<TemplateTemplateParmDecl>(*P)));
701  }
702 
703  assert(!TTP->getRequiresClause() &&
704  "Unexpected requires-clause on template template-parameter");
705  Expr *const CanonRequiresClause = nullptr;
706 
707  TemplateTemplateParmDecl *CanonTTP
709  SourceLocation(), TTP->getDepth(),
710  TTP->getPosition(),
711  TTP->isParameterPack(),
712  nullptr,
714  SourceLocation(),
715  CanonParams,
716  SourceLocation(),
717  CanonRequiresClause));
718 
719  // Get the new insert position for the node we care about.
720  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
721  assert(!Canonical && "Shouldn't be in the map!");
722  (void)Canonical;
723 
724  // Create the canonical template template parameter entry.
725  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
726  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
727  return CanonTTP;
728 }
729 
730 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
731  if (!LangOpts.CPlusPlus) return nullptr;
732 
733  switch (T.getCXXABI().getKind()) {
734  case TargetCXXABI::GenericARM: // Same as Itanium at this level
735  case TargetCXXABI::iOS:
736  case TargetCXXABI::iOS64:
742  return CreateItaniumCXXABI(*this);
744  return CreateMicrosoftCXXABI(*this);
745  }
746  llvm_unreachable("Invalid CXXABI type!");
747 }
748 
749 static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
750  const LangOptions &LOpts) {
751  if (LOpts.FakeAddressSpaceMap) {
752  // The fake address space map must have a distinct entry for each
753  // language-specific address space.
754  static const unsigned FakeAddrSpaceMap[] = {
755  0, // Default
756  1, // opencl_global
757  3, // opencl_local
758  2, // opencl_constant
759  0, // opencl_private
760  4, // opencl_generic
761  5, // cuda_device
762  6, // cuda_constant
763  7 // cuda_shared
764  };
765  return &FakeAddrSpaceMap;
766  } else {
767  return &T.getAddressSpaceMap();
768  }
769 }
770 
772  const LangOptions &LangOpts) {
773  switch (LangOpts.getAddressSpaceMapMangling()) {
775  return TI.useAddressSpaceMapMangling();
777  return true;
779  return false;
780  }
781  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
782 }
783 
785  IdentifierTable &idents, SelectorTable &sels,
786  Builtin::Context &builtins)
787  : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
788  DependentTemplateSpecializationTypes(this_()),
789  SubstTemplateTemplateParmPacks(this_()), SourceMgr(SM), LangOpts(LOpts),
790  SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
791  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
792  LangOpts.XRayNeverInstrumentFiles,
793  LangOpts.XRayAttrListFiles, SM)),
794  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
795  BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
796  CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
797  CompCategories(this_()), LastSDM(nullptr, 0) {
798  TUDecl = TranslationUnitDecl::Create(*this);
799 }
800 
802  ReleaseParentMapEntries();
803 
804  // Release the DenseMaps associated with DeclContext objects.
805  // FIXME: Is this the ideal solution?
806  ReleaseDeclContextMaps();
807 
808  // Call all of the deallocation functions on all of their targets.
809  for (auto &Pair : Deallocations)
810  (Pair.first)(Pair.second);
811 
812  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
813  // because they can contain DenseMaps.
814  for (llvm::DenseMap<const ObjCContainerDecl*,
815  const ASTRecordLayout*>::iterator
816  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
817  // Increment in loop to prevent using deallocated memory.
818  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
819  R->Destroy(*this);
820 
821  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
822  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
823  // Increment in loop to prevent using deallocated memory.
824  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
825  R->Destroy(*this);
826  }
827 
828  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
829  AEnd = DeclAttrs.end();
830  A != AEnd; ++A)
831  A->second->~AttrVec();
832 
833  for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
834  MaterializedTemporaryValues)
835  MTVPair.second->~APValue();
836 
837  for (const auto &Value : ModuleInitializers)
838  Value.second->~PerModuleInitializers();
839 }
840 
841 void ASTContext::ReleaseParentMapEntries() {
842  if (!PointerParents) return;
843  for (const auto &Entry : *PointerParents) {
844  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
845  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
846  } else if (Entry.second.is<ParentVector *>()) {
847  delete Entry.second.get<ParentVector *>();
848  }
849  }
850  for (const auto &Entry : *OtherParents) {
851  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
852  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
853  } else if (Entry.second.is<ParentVector *>()) {
854  delete Entry.second.get<ParentVector *>();
855  }
856  }
857 }
858 
859 void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
860  Deallocations.push_back({Callback, Data});
861 }
862 
863 void
865  ExternalSource = std::move(Source);
866 }
867 
869  llvm::errs() << "\n*** AST Context Stats:\n";
870  llvm::errs() << " " << Types.size() << " types total.\n";
871 
872  unsigned counts[] = {
873 #define TYPE(Name, Parent) 0,
874 #define ABSTRACT_TYPE(Name, Parent)
875 #include "clang/AST/TypeNodes.def"
876  0 // Extra
877  };
878 
879  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
880  Type *T = Types[i];
881  counts[(unsigned)T->getTypeClass()]++;
882  }
883 
884  unsigned Idx = 0;
885  unsigned TotalBytes = 0;
886 #define TYPE(Name, Parent) \
887  if (counts[Idx]) \
888  llvm::errs() << " " << counts[Idx] << " " << #Name \
889  << " types, " << sizeof(Name##Type) << " each " \
890  << "(" << counts[Idx] * sizeof(Name##Type) \
891  << " bytes)\n"; \
892  TotalBytes += counts[Idx] * sizeof(Name##Type); \
893  ++Idx;
894 #define ABSTRACT_TYPE(Name, Parent)
895 #include "clang/AST/TypeNodes.def"
896 
897  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
898 
899  // Implicit special member functions.
900  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
902  << " implicit default constructors created\n";
903  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
905  << " implicit copy constructors created\n";
906  if (getLangOpts().CPlusPlus)
907  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
909  << " implicit move constructors created\n";
910  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
912  << " implicit copy assignment operators created\n";
913  if (getLangOpts().CPlusPlus)
914  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
916  << " implicit move assignment operators created\n";
917  llvm::errs() << NumImplicitDestructorsDeclared << "/"
919  << " implicit destructors created\n";
920 
921  if (ExternalSource) {
922  llvm::errs() << "\n";
923  ExternalSource->PrintStats();
924  }
925 
926  BumpAlloc.PrintStats();
927 }
928 
930  bool NotifyListeners) {
931  if (NotifyListeners)
932  if (auto *Listener = getASTMutationListener())
934 
935  if (getLangOpts().ModulesLocalVisibility)
936  MergedDefModules[ND].push_back(M);
937  else
939 }
940 
942  auto It = MergedDefModules.find(ND);
943  if (It == MergedDefModules.end())
944  return;
945 
946  auto &Merged = It->second;
948  for (Module *&M : Merged)
949  if (!Found.insert(M).second)
950  M = nullptr;
951  Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
952 }
953 
954 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
955  if (LazyInitializers.empty())
956  return;
957 
958  auto *Source = Ctx.getExternalSource();
959  assert(Source && "lazy initializers but no external source");
960 
961  auto LazyInits = std::move(LazyInitializers);
962  LazyInitializers.clear();
963 
964  for (auto ID : LazyInits)
965  Initializers.push_back(Source->GetExternalDecl(ID));
966 
967  assert(LazyInitializers.empty() &&
968  "GetExternalDecl for lazy module initializer added more inits");
969 }
970 
972  // One special case: if we add a module initializer that imports another
973  // module, and that module's only initializer is an ImportDecl, simplify.
974  if (const auto *ID = dyn_cast<ImportDecl>(D)) {
975  auto It = ModuleInitializers.find(ID->getImportedModule());
976 
977  // Maybe the ImportDecl does nothing at all. (Common case.)
978  if (It == ModuleInitializers.end())
979  return;
980 
981  // Maybe the ImportDecl only imports another ImportDecl.
982  auto &Imported = *It->second;
983  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
984  Imported.resolve(*this);
985  auto *OnlyDecl = Imported.Initializers.front();
986  if (isa<ImportDecl>(OnlyDecl))
987  D = OnlyDecl;
988  }
989  }
990 
991  auto *&Inits = ModuleInitializers[M];
992  if (!Inits)
993  Inits = new (*this) PerModuleInitializers;
994  Inits->Initializers.push_back(D);
995 }
996 
998  auto *&Inits = ModuleInitializers[M];
999  if (!Inits)
1000  Inits = new (*this) PerModuleInitializers;
1001  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1002  IDs.begin(), IDs.end());
1003 }
1004 
1006  auto It = ModuleInitializers.find(M);
1007  if (It == ModuleInitializers.end())
1008  return None;
1009 
1010  auto *Inits = It->second;
1011  Inits->resolve(*this);
1012  return Inits->Initializers;
1013 }
1014 
1016  if (!ExternCContext)
1017  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1018 
1019  return ExternCContext;
1020 }
1021 
1024  const IdentifierInfo *II) const {
1025  auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
1026  BuiltinTemplate->setImplicit();
1027  TUDecl->addDecl(BuiltinTemplate);
1028 
1029  return BuiltinTemplate;
1030 }
1031 
1034  if (!MakeIntegerSeqDecl)
1035  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1037  return MakeIntegerSeqDecl;
1038 }
1039 
1042  if (!TypePackElementDecl)
1043  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1045  return TypePackElementDecl;
1046 }
1047 
1049  RecordDecl::TagKind TK) const {
1050  SourceLocation Loc;
1051  RecordDecl *NewDecl;
1052  if (getLangOpts().CPlusPlus)
1053  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1054  Loc, &Idents.get(Name));
1055  else
1056  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1057  &Idents.get(Name));
1058  NewDecl->setImplicit();
1059  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1060  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1061  return NewDecl;
1062 }
1063 
1065  StringRef Name) const {
1067  TypedefDecl *NewDecl = TypedefDecl::Create(
1068  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1069  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1070  NewDecl->setImplicit();
1071  return NewDecl;
1072 }
1073 
1075  if (!Int128Decl)
1076  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1077  return Int128Decl;
1078 }
1079 
1081  if (!UInt128Decl)
1082  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1083  return UInt128Decl;
1084 }
1085 
1086 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1087  auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1088  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1089  Types.push_back(Ty);
1090 }
1091 
1093  const TargetInfo *AuxTarget) {
1094  assert((!this->Target || this->Target == &Target) &&
1095  "Incorrect target reinitialization");
1096  assert(VoidTy.isNull() && "Context reinitialized?");
1097 
1098  this->Target = &Target;
1099  this->AuxTarget = AuxTarget;
1100 
1101  ABI.reset(createCXXABI(Target));
1102  AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1103  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1104 
1105  // C99 6.2.5p19.
1106  InitBuiltinType(VoidTy, BuiltinType::Void);
1107 
1108  // C99 6.2.5p2.
1109  InitBuiltinType(BoolTy, BuiltinType::Bool);
1110  // C99 6.2.5p3.
1111  if (LangOpts.CharIsSigned)
1112  InitBuiltinType(CharTy, BuiltinType::Char_S);
1113  else
1114  InitBuiltinType(CharTy, BuiltinType::Char_U);
1115  // C99 6.2.5p4.
1116  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1117  InitBuiltinType(ShortTy, BuiltinType::Short);
1118  InitBuiltinType(IntTy, BuiltinType::Int);
1119  InitBuiltinType(LongTy, BuiltinType::Long);
1120  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1121 
1122  // C99 6.2.5p6.
1123  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1124  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1125  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1126  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1127  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1128 
1129  // C99 6.2.5p10.
1130  InitBuiltinType(FloatTy, BuiltinType::Float);
1131  InitBuiltinType(DoubleTy, BuiltinType::Double);
1132  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1133 
1134  // GNU extension, __float128 for IEEE quadruple precision
1135  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1136 
1137  // C11 extension ISO/IEC TS 18661-3
1138  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1139 
1140  // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1141  InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1142  InitBuiltinType(AccumTy, BuiltinType::Accum);
1143  InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1144  InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1145  InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1146  InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1147  InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1148  InitBuiltinType(FractTy, BuiltinType::Fract);
1149  InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1150  InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1151  InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1152  InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1153  InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1154  InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1155  InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1156  InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1157  InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1158  InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1159  InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1160  InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1161  InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1162  InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1163  InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1164  InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1165 
1166  // GNU extension, 128-bit integers.
1167  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1168  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1169 
1170  // C++ 3.9.1p5
1171  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1172  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1173  else // -fshort-wchar makes wchar_t be unsigned.
1174  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1175  if (LangOpts.CPlusPlus && LangOpts.WChar)
1176  WideCharTy = WCharTy;
1177  else {
1178  // C99 (or C++ using -fno-wchar).
1179  WideCharTy = getFromTargetType(Target.getWCharType());
1180  }
1181 
1182  WIntTy = getFromTargetType(Target.getWIntType());
1183 
1184  // C++20 (proposed)
1185  InitBuiltinType(Char8Ty, BuiltinType::Char8);
1186 
1187  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1188  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1189  else // C99
1190  Char16Ty = getFromTargetType(Target.getChar16Type());
1191 
1192  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1193  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1194  else // C99
1195  Char32Ty = getFromTargetType(Target.getChar32Type());
1196 
1197  // Placeholder type for type-dependent expressions whose type is
1198  // completely unknown. No code should ever check a type against
1199  // DependentTy and users should never see it; however, it is here to
1200  // help diagnose failures to properly check for type-dependent
1201  // expressions.
1202  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1203 
1204  // Placeholder type for functions.
1205  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1206 
1207  // Placeholder type for bound members.
1208  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1209 
1210  // Placeholder type for pseudo-objects.
1211  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1212 
1213  // "any" type; useful for debugger-like clients.
1214  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1215 
1216  // Placeholder type for unbridged ARC casts.
1217  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1218 
1219  // Placeholder type for builtin functions.
1220  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1221 
1222  // Placeholder type for OMP array sections.
1223  if (LangOpts.OpenMP)
1224  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1225 
1226  // C99 6.2.5p11.
1231 
1232  // Builtin types for 'id', 'Class', and 'SEL'.
1233  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1234  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1235  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1236 
1237  if (LangOpts.OpenCL) {
1238 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1239  InitBuiltinType(SingletonId, BuiltinType::Id);
1240 #include "clang/Basic/OpenCLImageTypes.def"
1241 
1242  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1243  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1244  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1245  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1246  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1247  }
1248 
1249  // Builtin type for __objc_yes and __objc_no
1250  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1251  SignedCharTy : BoolTy);
1252 
1253  ObjCConstantStringType = QualType();
1254 
1255  ObjCSuperType = QualType();
1256 
1257  // void * type
1258  if (LangOpts.OpenCLVersion >= 200) {
1259  auto Q = VoidTy.getQualifiers();
1263  } else {
1265  }
1266 
1267  // nullptr type (C++0x 2.14.7)
1268  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1269 
1270  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1271  InitBuiltinType(HalfTy, BuiltinType::Half);
1272 
1273  // Builtin type used to help define __builtin_va_list.
1274  VaListTagDecl = nullptr;
1275 }
1276 
1278  return SourceMgr.getDiagnostics();
1279 }
1280 
1282  AttrVec *&Result = DeclAttrs[D];
1283  if (!Result) {
1284  void *Mem = Allocate(sizeof(AttrVec));
1285  Result = new (Mem) AttrVec;
1286  }
1287 
1288  return *Result;
1289 }
1290 
1291 /// Erase the attributes corresponding to the given declaration.
1293  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1294  if (Pos != DeclAttrs.end()) {
1295  Pos->second->~AttrVec();
1296  DeclAttrs.erase(Pos);
1297  }
1298 }
1299 
1300 // FIXME: Remove ?
1303  assert(Var->isStaticDataMember() && "Not a static data member");
1305  .dyn_cast<MemberSpecializationInfo *>();
1306 }
1307 
1310  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1311  TemplateOrInstantiation.find(Var);
1312  if (Pos == TemplateOrInstantiation.end())
1313  return {};
1314 
1315  return Pos->second;
1316 }
1317 
1318 void
1321  SourceLocation PointOfInstantiation) {
1322  assert(Inst->isStaticDataMember() && "Not a static data member");
1323  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1325  Tmpl, TSK, PointOfInstantiation));
1326 }
1327 
1328 void
1331  assert(!TemplateOrInstantiation[Inst] &&
1332  "Already noted what the variable was instantiated from");
1333  TemplateOrInstantiation[Inst] = TSI;
1334 }
1335 
1337  const FunctionDecl *FD){
1338  assert(FD && "Specialization is 0");
1339  llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
1340  = ClassScopeSpecializationPattern.find(FD);
1341  if (Pos == ClassScopeSpecializationPattern.end())
1342  return nullptr;
1343 
1344  return Pos->second;
1345 }
1346 
1348  FunctionDecl *Pattern) {
1349  assert(FD && "Specialization is 0");
1350  assert(Pattern && "Class scope specialization pattern is 0");
1351  ClassScopeSpecializationPattern[FD] = Pattern;
1352 }
1353 
1354 NamedDecl *
1356  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1357  if (Pos == InstantiatedFromUsingDecl.end())
1358  return nullptr;
1359 
1360  return Pos->second;
1361 }
1362 
1363 void
1365  assert((isa<UsingDecl>(Pattern) ||
1366  isa<UnresolvedUsingValueDecl>(Pattern) ||
1367  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1368  "pattern decl is not a using decl");
1369  assert((isa<UsingDecl>(Inst) ||
1370  isa<UnresolvedUsingValueDecl>(Inst) ||
1371  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1372  "instantiation did not produce a using decl");
1373  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1374  InstantiatedFromUsingDecl[Inst] = Pattern;
1375 }
1376 
1379  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1380  = InstantiatedFromUsingShadowDecl.find(Inst);
1381  if (Pos == InstantiatedFromUsingShadowDecl.end())
1382  return nullptr;
1383 
1384  return Pos->second;
1385 }
1386 
1387 void
1389  UsingShadowDecl *Pattern) {
1390  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1391  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1392 }
1393 
1395  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1396  = InstantiatedFromUnnamedFieldDecl.find(Field);
1397  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1398  return nullptr;
1399 
1400  return Pos->second;
1401 }
1402 
1404  FieldDecl *Tmpl) {
1405  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1406  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1407  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1408  "Already noted what unnamed field was instantiated from");
1409 
1410  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1411 }
1412 
1415  return overridden_methods(Method).begin();
1416 }
1417 
1420  return overridden_methods(Method).end();
1421 }
1422 
1423 unsigned
1425  auto Range = overridden_methods(Method);
1426  return Range.end() - Range.begin();
1427 }
1428 
1431  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1432  OverriddenMethods.find(Method->getCanonicalDecl());
1433  if (Pos == OverriddenMethods.end())
1434  return overridden_method_range(nullptr, nullptr);
1435  return overridden_method_range(Pos->second.begin(), Pos->second.end());
1436 }
1437 
1439  const CXXMethodDecl *Overridden) {
1440  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1441  OverriddenMethods[Method].push_back(Overridden);
1442 }
1443 
1445  const NamedDecl *D,
1446  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1447  assert(D);
1448 
1449  if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1450  Overridden.append(overridden_methods_begin(CXXMethod),
1451  overridden_methods_end(CXXMethod));
1452  return;
1453  }
1454 
1455  const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1456  if (!Method)
1457  return;
1458 
1460  Method->getOverriddenMethods(OverDecls);
1461  Overridden.append(OverDecls.begin(), OverDecls.end());
1462 }
1463 
1465  assert(!Import->NextLocalImport && "Import declaration already in the chain");
1466  assert(!Import->isFromASTFile() && "Non-local import declaration");
1467  if (!FirstLocalImport) {
1468  FirstLocalImport = Import;
1469  LastLocalImport = Import;
1470  return;
1471  }
1472 
1473  LastLocalImport->NextLocalImport = Import;
1474  LastLocalImport = Import;
1475 }
1476 
1477 //===----------------------------------------------------------------------===//
1478 // Type Sizing and Analysis
1479 //===----------------------------------------------------------------------===//
1480 
1481 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1482 /// scalar floating point type.
1483 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1484  const auto *BT = T->getAs<BuiltinType>();
1485  assert(BT && "Not a floating point type!");
1486  switch (BT->getKind()) {
1487  default: llvm_unreachable("Not a floating point type!");
1488  case BuiltinType::Float16:
1489  case BuiltinType::Half:
1490  return Target->getHalfFormat();
1491  case BuiltinType::Float: return Target->getFloatFormat();
1492  case BuiltinType::Double: return Target->getDoubleFormat();
1493  case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
1494  case BuiltinType::Float128: return Target->getFloat128Format();
1495  }
1496 }
1497 
1498 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1499  unsigned Align = Target->getCharWidth();
1500 
1501  bool UseAlignAttrOnly = false;
1502  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1503  Align = AlignFromAttr;
1504 
1505  // __attribute__((aligned)) can increase or decrease alignment
1506  // *except* on a struct or struct member, where it only increases
1507  // alignment unless 'packed' is also specified.
1508  //
1509  // It is an error for alignas to decrease alignment, so we can
1510  // ignore that possibility; Sema should diagnose it.
1511  if (isa<FieldDecl>(D)) {
1512  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1513  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1514  } else {
1515  UseAlignAttrOnly = true;
1516  }
1517  }
1518  else if (isa<FieldDecl>(D))
1519  UseAlignAttrOnly =
1520  D->hasAttr<PackedAttr>() ||
1521  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1522 
1523  // If we're using the align attribute only, just ignore everything
1524  // else about the declaration and its type.
1525  if (UseAlignAttrOnly) {
1526  // do nothing
1527  } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1528  QualType T = VD->getType();
1529  if (const auto *RT = T->getAs<ReferenceType>()) {
1530  if (ForAlignof)
1531  T = RT->getPointeeType();
1532  else
1533  T = getPointerType(RT->getPointeeType());
1534  }
1535  QualType BaseT = getBaseElementType(T);
1536  if (T->isFunctionType())
1537  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1538  else if (!BaseT->isIncompleteType()) {
1539  // Adjust alignments of declarations with array type by the
1540  // large-array alignment on the target.
1541  if (const ArrayType *arrayType = getAsArrayType(T)) {
1542  unsigned MinWidth = Target->getLargeArrayMinWidth();
1543  if (!ForAlignof && MinWidth) {
1544  if (isa<VariableArrayType>(arrayType))
1545  Align = std::max(Align, Target->getLargeArrayAlign());
1546  else if (isa<ConstantArrayType>(arrayType) &&
1547  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1548  Align = std::max(Align, Target->getLargeArrayAlign());
1549  }
1550  }
1551  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1552  if (BaseT.getQualifiers().hasUnaligned())
1553  Align = Target->getCharWidth();
1554  if (const auto *VD = dyn_cast<VarDecl>(D)) {
1555  if (VD->hasGlobalStorage() && !ForAlignof)
1556  Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
1557  }
1558  }
1559 
1560  // Fields can be subject to extra alignment constraints, like if
1561  // the field is packed, the struct is packed, or the struct has a
1562  // a max-field-alignment constraint (#pragma pack). So calculate
1563  // the actual alignment of the field within the struct, and then
1564  // (as we're expected to) constrain that by the alignment of the type.
1565  if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1566  const RecordDecl *Parent = Field->getParent();
1567  // We can only produce a sensible answer if the record is valid.
1568  if (!Parent->isInvalidDecl()) {
1569  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1570 
1571  // Start with the record's overall alignment.
1572  unsigned FieldAlign = toBits(Layout.getAlignment());
1573 
1574  // Use the GCD of that and the offset within the record.
1575  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1576  if (Offset > 0) {
1577  // Alignment is always a power of 2, so the GCD will be a power of 2,
1578  // which means we get to do this crazy thing instead of Euclid's.
1579  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1580  if (LowBitOfOffset < FieldAlign)
1581  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1582  }
1583 
1584  Align = std::min(Align, FieldAlign);
1585  }
1586  }
1587  }
1588 
1589  return toCharUnitsFromBits(Align);
1590 }
1591 
1592 // getTypeInfoDataSizeInChars - Return the size of a type, in
1593 // chars. If the type is a record, its data size is returned. This is
1594 // the size of the memcpy that's performed when assigning this type
1595 // using a trivial copy/move assignment operator.
1596 std::pair<CharUnits, CharUnits>
1598  std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
1599 
1600  // In C++, objects can sometimes be allocated into the tail padding
1601  // of a base-class subobject. We decide whether that's possible
1602  // during class layout, so here we can just trust the layout results.
1603  if (getLangOpts().CPlusPlus) {
1604  if (const auto *RT = T->getAs<RecordType>()) {
1605  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1606  sizeAndAlign.first = layout.getDataSize();
1607  }
1608  }
1609 
1610  return sizeAndAlign;
1611 }
1612 
1613 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1614 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1615 std::pair<CharUnits, CharUnits>
1617  const ConstantArrayType *CAT) {
1618  std::pair<CharUnits, CharUnits> EltInfo =
1619  Context.getTypeInfoInChars(CAT->getElementType());
1620  uint64_t Size = CAT->getSize().getZExtValue();
1621  assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
1622  (uint64_t)(-1)/Size) &&
1623  "Overflow in array type char size evaluation");
1624  uint64_t Width = EltInfo.first.getQuantity() * Size;
1625  unsigned Align = EltInfo.second.getQuantity();
1626  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1627  Context.getTargetInfo().getPointerWidth(0) == 64)
1628  Width = llvm::alignTo(Width, Align);
1629  return std::make_pair(CharUnits::fromQuantity(Width),
1630  CharUnits::fromQuantity(Align));
1631 }
1632 
1633 std::pair<CharUnits, CharUnits>
1635  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1636  return getConstantArrayInfoInChars(*this, CAT);
1637  TypeInfo Info = getTypeInfo(T);
1638  return std::make_pair(toCharUnitsFromBits(Info.Width),
1639  toCharUnitsFromBits(Info.Align));
1640 }
1641 
1642 std::pair<CharUnits, CharUnits>
1644  return getTypeInfoInChars(T.getTypePtr());
1645 }
1646 
1648  return getTypeInfo(T).AlignIsRequired;
1649 }
1650 
1652  return isAlignmentRequired(T.getTypePtr());
1653 }
1654 
1656  // An alignment on a typedef overrides anything else.
1657  if (const auto *TT = T->getAs<TypedefType>())
1658  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1659  return Align;
1660 
1661  // If we have an (array of) complete type, we're done.
1662  T = getBaseElementType(T);
1663  if (!T->isIncompleteType())
1664  return getTypeAlign(T);
1665 
1666  // If we had an array type, its element type might be a typedef
1667  // type with an alignment attribute.
1668  if (const auto *TT = T->getAs<TypedefType>())
1669  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1670  return Align;
1671 
1672  // Otherwise, see if the declaration of the type had an attribute.
1673  if (const auto *TT = T->getAs<TagType>())
1674  return TT->getDecl()->getMaxAlignment();
1675 
1676  return 0;
1677 }
1678 
1680  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1681  if (I != MemoizedTypeInfo.end())
1682  return I->second;
1683 
1684  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1685  TypeInfo TI = getTypeInfoImpl(T);
1686  MemoizedTypeInfo[T] = TI;
1687  return TI;
1688 }
1689 
1690 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1691 /// method does not work on incomplete types.
1692 ///
1693 /// FIXME: Pointers into different addr spaces could have different sizes and
1694 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1695 /// should take a QualType, &c.
1696 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1697  uint64_t Width = 0;
1698  unsigned Align = 8;
1699  bool AlignIsRequired = false;
1700  unsigned AS = 0;
1701  switch (T->getTypeClass()) {
1702 #define TYPE(Class, Base)
1703 #define ABSTRACT_TYPE(Class, Base)
1704 #define NON_CANONICAL_TYPE(Class, Base)
1705 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1706 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1707  case Type::Class: \
1708  assert(!T->isDependentType() && "should not see dependent types here"); \
1709  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1710 #include "clang/AST/TypeNodes.def"
1711  llvm_unreachable("Should not see dependent types");
1712 
1713  case Type::FunctionNoProto:
1714  case Type::FunctionProto:
1715  // GCC extension: alignof(function) = 32 bits
1716  Width = 0;
1717  Align = 32;
1718  break;
1719 
1720  case Type::IncompleteArray:
1721  case Type::VariableArray:
1722  Width = 0;
1723  Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
1724  break;
1725 
1726  case Type::ConstantArray: {
1727  const auto *CAT = cast<ConstantArrayType>(T);
1728 
1729  TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
1730  uint64_t Size = CAT->getSize().getZExtValue();
1731  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1732  "Overflow in array type bit size evaluation");
1733  Width = EltInfo.Width * Size;
1734  Align = EltInfo.Align;
1735  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1736  getTargetInfo().getPointerWidth(0) == 64)
1737  Width = llvm::alignTo(Width, Align);
1738  break;
1739  }
1740  case Type::ExtVector:
1741  case Type::Vector: {
1742  const auto *VT = cast<VectorType>(T);
1743  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1744  Width = EltInfo.Width * VT->getNumElements();
1745  Align = Width;
1746  // If the alignment is not a power of 2, round up to the next power of 2.
1747  // This happens for non-power-of-2 length vectors.
1748  if (Align & (Align-1)) {
1749  Align = llvm::NextPowerOf2(Align);
1750  Width = llvm::alignTo(Width, Align);
1751  }
1752  // Adjust the alignment based on the target max.
1753  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1754  if (TargetVectorAlign && TargetVectorAlign < Align)
1755  Align = TargetVectorAlign;
1756  break;
1757  }
1758 
1759  case Type::Builtin:
1760  switch (cast<BuiltinType>(T)->getKind()) {
1761  default: llvm_unreachable("Unknown builtin type!");
1762  case BuiltinType::Void:
1763  // GCC extension: alignof(void) = 8 bits.
1764  Width = 0;
1765  Align = 8;
1766  break;
1767  case BuiltinType::Bool:
1768  Width = Target->getBoolWidth();
1769  Align = Target->getBoolAlign();
1770  break;
1771  case BuiltinType::Char_S:
1772  case BuiltinType::Char_U:
1773  case BuiltinType::UChar:
1774  case BuiltinType::SChar:
1775  case BuiltinType::Char8:
1776  Width = Target->getCharWidth();
1777  Align = Target->getCharAlign();
1778  break;
1779  case BuiltinType::WChar_S:
1780  case BuiltinType::WChar_U:
1781  Width = Target->getWCharWidth();
1782  Align = Target->getWCharAlign();
1783  break;
1784  case BuiltinType::Char16:
1785  Width = Target->getChar16Width();
1786  Align = Target->getChar16Align();
1787  break;
1788  case BuiltinType::Char32:
1789  Width = Target->getChar32Width();
1790  Align = Target->getChar32Align();
1791  break;
1792  case BuiltinType::UShort:
1793  case BuiltinType::Short:
1794  Width = Target->getShortWidth();
1795  Align = Target->getShortAlign();
1796  break;
1797  case BuiltinType::UInt:
1798  case BuiltinType::Int:
1799  Width = Target->getIntWidth();
1800  Align = Target->getIntAlign();
1801  break;
1802  case BuiltinType::ULong:
1803  case BuiltinType::Long:
1804  Width = Target->getLongWidth();
1805  Align = Target->getLongAlign();
1806  break;
1807  case BuiltinType::ULongLong:
1808  case BuiltinType::LongLong:
1809  Width = Target->getLongLongWidth();
1810  Align = Target->getLongLongAlign();
1811  break;
1812  case BuiltinType::Int128:
1813  case BuiltinType::UInt128:
1814  Width = 128;
1815  Align = 128; // int128_t is 128-bit aligned on all targets.
1816  break;
1817  case BuiltinType::ShortAccum:
1818  case BuiltinType::UShortAccum:
1819  case BuiltinType::SatShortAccum:
1820  case BuiltinType::SatUShortAccum:
1821  Width = Target->getShortAccumWidth();
1822  Align = Target->getShortAccumAlign();
1823  break;
1824  case BuiltinType::Accum:
1825  case BuiltinType::UAccum:
1826  case BuiltinType::SatAccum:
1827  case BuiltinType::SatUAccum:
1828  Width = Target->getAccumWidth();
1829  Align = Target->getAccumAlign();
1830  break;
1831  case BuiltinType::LongAccum:
1832  case BuiltinType::ULongAccum:
1833  case BuiltinType::SatLongAccum:
1834  case BuiltinType::SatULongAccum:
1835  Width = Target->getLongAccumWidth();
1836  Align = Target->getLongAccumAlign();
1837  break;
1838  case BuiltinType::ShortFract:
1839  case BuiltinType::UShortFract:
1840  case BuiltinType::SatShortFract:
1841  case BuiltinType::SatUShortFract:
1842  Width = Target->getShortFractWidth();
1843  Align = Target->getShortFractAlign();
1844  break;
1845  case BuiltinType::Fract:
1846  case BuiltinType::UFract:
1847  case BuiltinType::SatFract:
1848  case BuiltinType::SatUFract:
1849  Width = Target->getFractWidth();
1850  Align = Target->getFractAlign();
1851  break;
1852  case BuiltinType::LongFract:
1853  case BuiltinType::ULongFract:
1854  case BuiltinType::SatLongFract:
1855  case BuiltinType::SatULongFract:
1856  Width = Target->getLongFractWidth();
1857  Align = Target->getLongFractAlign();
1858  break;
1859  case BuiltinType::Float16:
1860  case BuiltinType::Half:
1861  Width = Target->getHalfWidth();
1862  Align = Target->getHalfAlign();
1863  break;
1864  case BuiltinType::Float:
1865  Width = Target->getFloatWidth();
1866  Align = Target->getFloatAlign();
1867  break;
1868  case BuiltinType::Double:
1869  Width = Target->getDoubleWidth();
1870  Align = Target->getDoubleAlign();
1871  break;
1872  case BuiltinType::LongDouble:
1873  Width = Target->getLongDoubleWidth();
1874  Align = Target->getLongDoubleAlign();
1875  break;
1876  case BuiltinType::Float128:
1877  Width = Target->getFloat128Width();
1878  Align = Target->getFloat128Align();
1879  break;
1880  case BuiltinType::NullPtr:
1881  Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
1882  Align = Target->getPointerAlign(0); // == sizeof(void*)
1883  break;
1884  case BuiltinType::ObjCId:
1885  case BuiltinType::ObjCClass:
1886  case BuiltinType::ObjCSel:
1887  Width = Target->getPointerWidth(0);
1888  Align = Target->getPointerAlign(0);
1889  break;
1890  case BuiltinType::OCLSampler:
1891  case BuiltinType::OCLEvent:
1892  case BuiltinType::OCLClkEvent:
1893  case BuiltinType::OCLQueue:
1894  case BuiltinType::OCLReserveID:
1895 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1896  case BuiltinType::Id:
1897 #include "clang/Basic/OpenCLImageTypes.def"
1898  AS = getTargetAddressSpace(
1900  Width = Target->getPointerWidth(AS);
1901  Align = Target->getPointerAlign(AS);
1902  break;
1903  }
1904  break;
1905  case Type::ObjCObjectPointer:
1906  Width = Target->getPointerWidth(0);
1907  Align = Target->getPointerAlign(0);
1908  break;
1909  case Type::BlockPointer:
1910  AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
1911  Width = Target->getPointerWidth(AS);
1912  Align = Target->getPointerAlign(AS);
1913  break;
1914  case Type::LValueReference:
1915  case Type::RValueReference:
1916  // alignof and sizeof should never enter this code path here, so we go
1917  // the pointer route.
1918  AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
1919  Width = Target->getPointerWidth(AS);
1920  Align = Target->getPointerAlign(AS);
1921  break;
1922  case Type::Pointer:
1923  AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
1924  Width = Target->getPointerWidth(AS);
1925  Align = Target->getPointerAlign(AS);
1926  break;
1927  case Type::MemberPointer: {
1928  const auto *MPT = cast<MemberPointerType>(T);
1929  CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
1930  Width = MPI.Width;
1931  Align = MPI.Align;
1932  break;
1933  }
1934  case Type::Complex: {
1935  // Complex types have the same alignment as their elements, but twice the
1936  // size.
1937  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
1938  Width = EltInfo.Width * 2;
1939  Align = EltInfo.Align;
1940  break;
1941  }
1942  case Type::ObjCObject:
1943  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
1944  case Type::Adjusted:
1945  case Type::Decayed:
1946  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
1947  case Type::ObjCInterface: {
1948  const auto *ObjCI = cast<ObjCInterfaceType>(T);
1949  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
1950  Width = toBits(Layout.getSize());
1951  Align = toBits(Layout.getAlignment());
1952  break;
1953  }
1954  case Type::Record:
1955  case Type::Enum: {
1956  const auto *TT = cast<TagType>(T);
1957 
1958  if (TT->getDecl()->isInvalidDecl()) {
1959  Width = 8;
1960  Align = 8;
1961  break;
1962  }
1963 
1964  if (const auto *ET = dyn_cast<EnumType>(TT)) {
1965  const EnumDecl *ED = ET->getDecl();
1966  TypeInfo Info =
1968  if (unsigned AttrAlign = ED->getMaxAlignment()) {
1969  Info.Align = AttrAlign;
1970  Info.AlignIsRequired = true;
1971  }
1972  return Info;
1973  }
1974 
1975  const auto *RT = cast<RecordType>(TT);
1976  const RecordDecl *RD = RT->getDecl();
1977  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
1978  Width = toBits(Layout.getSize());
1979  Align = toBits(Layout.getAlignment());
1980  AlignIsRequired = RD->hasAttr<AlignedAttr>();
1981  break;
1982  }
1983 
1984  case Type::SubstTemplateTypeParm:
1985  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
1986  getReplacementType().getTypePtr());
1987 
1988  case Type::Auto:
1989  case Type::DeducedTemplateSpecialization: {
1990  const auto *A = cast<DeducedType>(T);
1991  assert(!A->getDeducedType().isNull() &&
1992  "cannot request the size of an undeduced or dependent auto type");
1993  return getTypeInfo(A->getDeducedType().getTypePtr());
1994  }
1995 
1996  case Type::Paren:
1997  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
1998 
1999  case Type::ObjCTypeParam:
2000  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2001 
2002  case Type::Typedef: {
2003  const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
2004  TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
2005  // If the typedef has an aligned attribute on it, it overrides any computed
2006  // alignment we have. This violates the GCC documentation (which says that
2007  // attribute(aligned) can only round up) but matches its implementation.
2008  if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
2009  Align = AttrAlign;
2010  AlignIsRequired = true;
2011  } else {
2012  Align = Info.Align;
2013  AlignIsRequired = Info.AlignIsRequired;
2014  }
2015  Width = Info.Width;
2016  break;
2017  }
2018 
2019  case Type::Elaborated:
2020  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2021 
2022  case Type::Attributed:
2023  return getTypeInfo(
2024  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2025 
2026  case Type::Atomic: {
2027  // Start with the base type information.
2028  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2029  Width = Info.Width;
2030  Align = Info.Align;
2031 
2032  if (!Width) {
2033  // An otherwise zero-sized type should still generate an
2034  // atomic operation.
2035  Width = Target->getCharWidth();
2036  assert(Align);
2037  } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2038  // If the size of the type doesn't exceed the platform's max
2039  // atomic promotion width, make the size and alignment more
2040  // favorable to atomic operations:
2041 
2042  // Round the size up to a power of 2.
2043  if (!llvm::isPowerOf2_64(Width))
2044  Width = llvm::NextPowerOf2(Width);
2045 
2046  // Set the alignment equal to the size.
2047  Align = static_cast<unsigned>(Width);
2048  }
2049  }
2050  break;
2051 
2052  case Type::Pipe:
2055  break;
2056  }
2057 
2058  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2059  return TypeInfo(Width, Align, AlignIsRequired);
2060 }
2061 
2062 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2063  UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2064  if (I != MemoizedUnadjustedAlign.end())
2065  return I->second;
2066 
2067  unsigned UnadjustedAlign;
2068  if (const auto *RT = T->getAs<RecordType>()) {
2069  const RecordDecl *RD = RT->getDecl();
2070  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2071  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2072  } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2073  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2074  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2075  } else {
2076  UnadjustedAlign = getTypeAlign(T);
2077  }
2078 
2079  MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2080  return UnadjustedAlign;
2081 }
2082 
2084  unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
2085  // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
2086  if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
2087  getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
2088  getTargetInfo().getABI() == "elfv1-qpx" &&
2089  T->isSpecificBuiltinType(BuiltinType::Double))
2090  SimdAlign = 256;
2091  return SimdAlign;
2092 }
2093 
2094 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2096  return CharUnits::fromQuantity(BitSize / getCharWidth());
2097 }
2098 
2099 /// toBits - Convert a size in characters to a size in characters.
2100 int64_t ASTContext::toBits(CharUnits CharSize) const {
2101  return CharSize.getQuantity() * getCharWidth();
2102 }
2103 
2104 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2105 /// This method does not work on incomplete types.
2107  return getTypeInfoInChars(T).first;
2108 }
2110  return getTypeInfoInChars(T).first;
2111 }
2112 
2113 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2114 /// characters. This method does not work on incomplete types.
2116  return toCharUnitsFromBits(getTypeAlign(T));
2117 }
2119  return toCharUnitsFromBits(getTypeAlign(T));
2120 }
2121 
2122 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2123 /// type, in characters, before alignment adustments. This method does
2124 /// not work on incomplete types.
2127 }
2130 }
2131 
2132 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2133 /// type for the current target in bits. This can be different than the ABI
2134 /// alignment in cases where it is beneficial for performance to overalign
2135 /// a data type.
2136 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2137  TypeInfo TI = getTypeInfo(T);
2138  unsigned ABIAlign = TI.Align;
2139 
2140  T = T->getBaseElementTypeUnsafe();
2141 
2142  // The preferred alignment of member pointers is that of a pointer.
2143  if (T->isMemberPointerType())
2145 
2146  if (!Target->allowsLargerPreferedTypeAlignment())
2147  return ABIAlign;
2148 
2149  // Double and long long should be naturally aligned if possible.
2150  if (const auto *CT = T->getAs<ComplexType>())
2151  T = CT->getElementType().getTypePtr();
2152  if (const auto *ET = T->getAs<EnumType>())
2153  T = ET->getDecl()->getIntegerType().getTypePtr();
2154  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2155  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2156  T->isSpecificBuiltinType(BuiltinType::ULongLong))
2157  // Don't increase the alignment if an alignment attribute was specified on a
2158  // typedef declaration.
2159  if (!TI.AlignIsRequired)
2160  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2161 
2162  return ABIAlign;
2163 }
2164 
2165 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2166 /// for __attribute__((aligned)) on this target, to be used if no alignment
2167 /// value is specified.
2170 }
2171 
2172 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2173 /// to a global variable of the specified type.
2175  return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign());
2176 }
2177 
2178 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2179 /// should be given to a global variable of the specified type.
2182 }
2183 
2186  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2187  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2188  Offset += Layout->getBaseClassOffset(Base);
2189  Layout = &getASTRecordLayout(Base);
2190  }
2191  return Offset;
2192 }
2193 
2194 /// DeepCollectObjCIvars -
2195 /// This routine first collects all declared, but not synthesized, ivars in
2196 /// super class and then collects all ivars, including those synthesized for
2197 /// current class. This routine is used for implementation of current class
2198 /// when all ivars, declared and synthesized are known.
2200  bool leafClass,
2201  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2202  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2203  DeepCollectObjCIvars(SuperClass, false, Ivars);
2204  if (!leafClass) {
2205  for (const auto *I : OI->ivars())
2206  Ivars.push_back(I);
2207  } else {
2208  auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2209  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2210  Iv= Iv->getNextIvar())
2211  Ivars.push_back(Iv);
2212  }
2213 }
2214 
2215 /// CollectInheritedProtocols - Collect all protocols in current class and
2216 /// those inherited by it.
2218  llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2219  if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2220  // We can use protocol_iterator here instead of
2221  // all_referenced_protocol_iterator since we are walking all categories.
2222  for (auto *Proto : OI->all_referenced_protocols()) {
2223  CollectInheritedProtocols(Proto, Protocols);
2224  }
2225 
2226  // Categories of this Interface.
2227  for (const auto *Cat : OI->visible_categories())
2228  CollectInheritedProtocols(Cat, Protocols);
2229 
2230  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2231  while (SD) {
2232  CollectInheritedProtocols(SD, Protocols);
2233  SD = SD->getSuperClass();
2234  }
2235  } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2236  for (auto *Proto : OC->protocols()) {
2237  CollectInheritedProtocols(Proto, Protocols);
2238  }
2239  } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2240  // Insert the protocol.
2241  if (!Protocols.insert(
2242  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2243  return;
2244 
2245  for (auto *Proto : OP->protocols())
2246  CollectInheritedProtocols(Proto, Protocols);
2247  }
2248 }
2249 
2251  const RecordDecl *RD) {
2252  assert(RD->isUnion() && "Must be union type");
2253  CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2254 
2255  for (const auto *Field : RD->fields()) {
2256  if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2257  return false;
2258  CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2259  if (FieldSize != UnionSize)
2260  return false;
2261  }
2262  return !RD->field_empty();
2263 }
2264 
2265 static bool isStructEmpty(QualType Ty) {
2266  const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
2267 
2268  if (!RD->field_empty())
2269  return false;
2270 
2271  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
2272  return ClassDecl->isEmpty();
2273 
2274  return true;
2275 }
2276 
2279  const RecordDecl *RD) {
2280  assert(!RD->isUnion() && "Must be struct/class type");
2281  const auto &Layout = Context.getASTRecordLayout(RD);
2282 
2283  int64_t CurOffsetInBits = 0;
2284  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2285  if (ClassDecl->isDynamicClass())
2286  return llvm::None;
2287 
2289  for (const auto Base : ClassDecl->bases()) {
2290  // Empty types can be inherited from, and non-empty types can potentially
2291  // have tail padding, so just make sure there isn't an error.
2292  if (!isStructEmpty(Base.getType())) {
2294  Context, Base.getType()->getAs<RecordType>()->getDecl());
2295  if (!Size)
2296  return llvm::None;
2297  Bases.emplace_back(Base.getType(), Size.getValue());
2298  }
2299  }
2300 
2301  llvm::sort(
2302  Bases.begin(), Bases.end(), [&](const std::pair<QualType, int64_t> &L,
2303  const std::pair<QualType, int64_t> &R) {
2304  return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
2305  Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
2306  });
2307 
2308  for (const auto Base : Bases) {
2309  int64_t BaseOffset = Context.toBits(
2310  Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl()));
2311  int64_t BaseSize = Base.second;
2312  if (BaseOffset != CurOffsetInBits)
2313  return llvm::None;
2314  CurOffsetInBits = BaseOffset + BaseSize;
2315  }
2316  }
2317 
2318  for (const auto *Field : RD->fields()) {
2319  if (!Field->getType()->isReferenceType() &&
2320  !Context.hasUniqueObjectRepresentations(Field->getType()))
2321  return llvm::None;
2322 
2323  int64_t FieldSizeInBits =
2324  Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2325  if (Field->isBitField()) {
2326  int64_t BitfieldSize = Field->getBitWidthValue(Context);
2327 
2328  if (BitfieldSize > FieldSizeInBits)
2329  return llvm::None;
2330  FieldSizeInBits = BitfieldSize;
2331  }
2332 
2333  int64_t FieldOffsetInBits = Context.getFieldOffset(Field);
2334 
2335  if (FieldOffsetInBits != CurOffsetInBits)
2336  return llvm::None;
2337 
2338  CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits;
2339  }
2340 
2341  return CurOffsetInBits;
2342 }
2343 
2345  // C++17 [meta.unary.prop]:
2346  // The predicate condition for a template specialization
2347  // has_unique_object_representations<T> shall be
2348  // satisfied if and only if:
2349  // (9.1) - T is trivially copyable, and
2350  // (9.2) - any two objects of type T with the same value have the same
2351  // object representation, where two objects
2352  // of array or non-union class type are considered to have the same value
2353  // if their respective sequences of
2354  // direct subobjects have the same values, and two objects of union type
2355  // are considered to have the same
2356  // value if they have the same active member and the corresponding members
2357  // have the same value.
2358  // The set of scalar types for which this condition holds is
2359  // implementation-defined. [ Note: If a type has padding
2360  // bits, the condition does not hold; otherwise, the condition holds true
2361  // for unsigned integral types. -- end note ]
2362  assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2363 
2364  // Arrays are unique only if their element type is unique.
2365  if (Ty->isArrayType())
2367 
2368  // (9.1) - T is trivially copyable...
2369  if (!Ty.isTriviallyCopyableType(*this))
2370  return false;
2371 
2372  // All integrals and enums are unique.
2373  if (Ty->isIntegralOrEnumerationType())
2374  return true;
2375 
2376  // All other pointers are unique.
2377  if (Ty->isPointerType())
2378  return true;
2379 
2380  if (Ty->isMemberPointerType()) {
2381  const auto *MPT = Ty->getAs<MemberPointerType>();
2382  return !ABI->getMemberPointerInfo(MPT).HasPadding;
2383  }
2384 
2385  if (Ty->isRecordType()) {
2386  const RecordDecl *Record = Ty->getAs<RecordType>()->getDecl();
2387 
2388  if (Record->isInvalidDecl())
2389  return false;
2390 
2391  if (Record->isUnion())
2392  return unionHasUniqueObjectRepresentations(*this, Record);
2393 
2394  Optional<int64_t> StructSize =
2395  structHasUniqueObjectRepresentations(*this, Record);
2396 
2397  return StructSize &&
2398  StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
2399  }
2400 
2401  // FIXME: More cases to handle here (list by rsmith):
2402  // vectors (careful about, eg, vector of 3 foo)
2403  // _Complex int and friends
2404  // _Atomic T
2405  // Obj-C block pointers
2406  // Obj-C object pointers
2407  // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2408  // clk_event_t, queue_t, reserve_id_t)
2409  // There're also Obj-C class types and the Obj-C selector type, but I think it
2410  // makes sense for those to return false here.
2411 
2412  return false;
2413 }
2414 
2416  unsigned count = 0;
2417  // Count ivars declared in class extension.
2418  for (const auto *Ext : OI->known_extensions())
2419  count += Ext->ivar_size();
2420 
2421  // Count ivar defined in this class's implementation. This
2422  // includes synthesized ivars.
2423  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2424  count += ImplDecl->ivar_size();
2425 
2426  return count;
2427 }
2428 
2430  if (!E)
2431  return false;
2432 
2433  // nullptr_t is always treated as null.
2434  if (E->getType()->isNullPtrType()) return true;
2435 
2436  if (E->getType()->isAnyPointerType() &&
2439  return true;
2440 
2441  // Unfortunately, __null has type 'int'.
2442  if (isa<GNUNullExpr>(E)) return true;
2443 
2444  return false;
2445 }
2446 
2447 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2448 /// exists.
2450  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2451  I = ObjCImpls.find(D);
2452  if (I != ObjCImpls.end())
2453  return cast<ObjCImplementationDecl>(I->second);
2454  return nullptr;
2455 }
2456 
2457 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2458 /// exists.
2460  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2461  I = ObjCImpls.find(D);
2462  if (I != ObjCImpls.end())
2463  return cast<ObjCCategoryImplDecl>(I->second);
2464  return nullptr;
2465 }
2466 
2467 /// Set the implementation of ObjCInterfaceDecl.
2469  ObjCImplementationDecl *ImplD) {
2470  assert(IFaceD && ImplD && "Passed null params");
2471  ObjCImpls[IFaceD] = ImplD;
2472 }
2473 
2474 /// Set the implementation of ObjCCategoryDecl.
2476  ObjCCategoryImplDecl *ImplD) {
2477  assert(CatD && ImplD && "Passed null params");
2478  ObjCImpls[CatD] = ImplD;
2479 }
2480 
2481 const ObjCMethodDecl *
2483  return ObjCMethodRedecls.lookup(MD);
2484 }
2485 
2487  const ObjCMethodDecl *Redecl) {
2488  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2489  ObjCMethodRedecls[MD] = Redecl;
2490 }
2491 
2493  const NamedDecl *ND) const {
2494  if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2495  return ID;
2496  if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2497  return CD->getClassInterface();
2498  if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2499  return IMD->getClassInterface();
2500 
2501  return nullptr;
2502 }
2503 
2504 /// Get the copy initialization expression of VarDecl, or nullptr if
2505 /// none exists.
2508  assert(VD && "Passed null params");
2509  assert(VD->hasAttr<BlocksAttr>() &&
2510  "getBlockVarCopyInits - not __block var");
2511  auto I = BlockVarCopyInits.find(VD);
2512  if (I != BlockVarCopyInits.end())
2513  return I->second;
2514  return {nullptr, false};
2515 }
2516 
2517 /// Set the copy inialization expression of a block var decl.
2519  bool CanThrow) {
2520  assert(VD && CopyExpr && "Passed null params");
2521  assert(VD->hasAttr<BlocksAttr>() &&
2522  "setBlockVarCopyInits - not __block var");
2523  BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2524 }
2525 
2527  unsigned DataSize) const {
2528  if (!DataSize)
2529  DataSize = TypeLoc::getFullDataSizeForType(T);
2530  else
2531  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2532  "incorrect data size provided to CreateTypeSourceInfo!");
2533 
2534  auto *TInfo =
2535  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2536  new (TInfo) TypeSourceInfo(T);
2537  return TInfo;
2538 }
2539 
2541  SourceLocation L) const {
2543  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2544  return DI;
2545 }
2546 
2547 const ASTRecordLayout &
2549  return getObjCLayout(D, nullptr);
2550 }
2551 
2552 const ASTRecordLayout &
2554  const ObjCImplementationDecl *D) const {
2555  return getObjCLayout(D->getClassInterface(), D);
2556 }
2557 
2558 //===----------------------------------------------------------------------===//
2559 // Type creation/memoization methods
2560 //===----------------------------------------------------------------------===//
2561 
2562 QualType
2563 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2564  unsigned fastQuals = quals.getFastQualifiers();
2565  quals.removeFastQualifiers();
2566 
2567  // Check if we've already instantiated this type.
2568  llvm::FoldingSetNodeID ID;
2569  ExtQuals::Profile(ID, baseType, quals);
2570  void *insertPos = nullptr;
2571  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2572  assert(eq->getQualifiers() == quals);
2573  return QualType(eq, fastQuals);
2574  }
2575 
2576  // If the base type is not canonical, make the appropriate canonical type.
2577  QualType canon;
2578  if (!baseType->isCanonicalUnqualified()) {
2579  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2580  canonSplit.Quals.addConsistentQualifiers(quals);
2581  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
2582 
2583  // Re-find the insert position.
2584  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
2585  }
2586 
2587  auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
2588  ExtQualNodes.InsertNode(eq, insertPos);
2589  return QualType(eq, fastQuals);
2590 }
2591 
2593  LangAS AddressSpace) const {
2594  QualType CanT = getCanonicalType(T);
2595  if (CanT.getAddressSpace() == AddressSpace)
2596  return T;
2597 
2598  // If we are composing extended qualifiers together, merge together
2599  // into one ExtQuals node.
2600  QualifierCollector Quals;
2601  const Type *TypeNode = Quals.strip(T);
2602 
2603  // If this type already has an address space specified, it cannot get
2604  // another one.
2605  assert(!Quals.hasAddressSpace() &&
2606  "Type cannot be in multiple addr spaces!");
2607  Quals.addAddressSpace(AddressSpace);
2608 
2609  return getExtQualType(TypeNode, Quals);
2610 }
2611 
2613  // If we are composing extended qualifiers together, merge together
2614  // into one ExtQuals node.
2615  QualifierCollector Quals;
2616  const Type *TypeNode = Quals.strip(T);
2617 
2618  // If the qualifier doesn't have an address space just return it.
2619  if (!Quals.hasAddressSpace())
2620  return T;
2621 
2622  Quals.removeAddressSpace();
2623 
2624  // Removal of the address space can mean there are no longer any
2625  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
2626  // or required.
2627  if (Quals.hasNonFastQualifiers())
2628  return getExtQualType(TypeNode, Quals);
2629  else
2630  return QualType(TypeNode, Quals.getFastQualifiers());
2631 }
2632 
2634  Qualifiers::GC GCAttr) const {
2635  QualType CanT = getCanonicalType(T);
2636  if (CanT.getObjCGCAttr() == GCAttr)
2637  return T;
2638 
2639  if (const auto *ptr = T->getAs<PointerType>()) {
2640  QualType Pointee = ptr->getPointeeType();
2641  if (Pointee->isAnyPointerType()) {
2642  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
2643  return getPointerType(ResultType);
2644  }
2645  }
2646 
2647  // If we are composing extended qualifiers together, merge together
2648  // into one ExtQuals node.
2649  QualifierCollector Quals;
2650  const Type *TypeNode = Quals.strip(T);
2651 
2652  // If this type already has an ObjCGC specified, it cannot get
2653  // another one.
2654  assert(!Quals.hasObjCGCAttr() &&
2655  "Type cannot have multiple ObjCGCs!");
2656  Quals.addObjCGCAttr(GCAttr);
2657 
2658  return getExtQualType(TypeNode, Quals);
2659 }
2660 
2662  FunctionType::ExtInfo Info) {
2663  if (T->getExtInfo() == Info)
2664  return T;
2665 
2666  QualType Result;
2667  if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
2668  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
2669  } else {
2670  const auto *FPT = cast<FunctionProtoType>(T);
2671  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2672  EPI.ExtInfo = Info;
2673  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
2674  }
2675 
2676  return cast<FunctionType>(Result.getTypePtr());
2677 }
2678 
2680  QualType ResultType) {
2681  FD = FD->getMostRecentDecl();
2682  while (true) {
2683  const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
2684  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2685  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
2686  if (FunctionDecl *Next = FD->getPreviousDecl())
2687  FD = Next;
2688  else
2689  break;
2690  }
2692  L->DeducedReturnType(FD, ResultType);
2693 }
2694 
2695 /// Get a function type and produce the equivalent function type with the
2696 /// specified exception specification. Type sugar that can be present on a
2697 /// declaration of a function with an exception specification is permitted
2698 /// and preserved. Other type sugar (for instance, typedefs) is not.
2701  // Might have some parens.
2702  if (const auto *PT = dyn_cast<ParenType>(Orig))
2703  return getParenType(
2704  getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
2705 
2706  // Might have a calling-convention attribute.
2707  if (const auto *AT = dyn_cast<AttributedType>(Orig))
2708  return getAttributedType(
2709  AT->getAttrKind(),
2710  getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
2711  getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
2712 
2713  // Anything else must be a function type. Rebuild it with the new exception
2714  // specification.
2715  const auto *Proto = cast<FunctionProtoType>(Orig);
2716  return getFunctionType(
2717  Proto->getReturnType(), Proto->getParamTypes(),
2718  Proto->getExtProtoInfo().withExceptionSpec(ESI));
2719 }
2720 
2722  QualType U) {
2723  return hasSameType(T, U) ||
2724  (getLangOpts().CPlusPlus17 &&
2727 }
2728 
2731  bool AsWritten) {
2732  // Update the type.
2733  QualType Updated =
2735  FD->setType(Updated);
2736 
2737  if (!AsWritten)
2738  return;
2739 
2740  // Update the type in the type source information too.
2741  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
2742  // If the type and the type-as-written differ, we may need to update
2743  // the type-as-written too.
2744  if (TSInfo->getType() != FD->getType())
2745  Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
2746 
2747  // FIXME: When we get proper type location information for exceptions,
2748  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
2749  // up the TypeSourceInfo;
2750  assert(TypeLoc::getFullDataSizeForType(Updated) ==
2751  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
2752  "TypeLoc size mismatch from updating exception specification");
2753  TSInfo->overrideType(Updated);
2754  }
2755 }
2756 
2757 /// getComplexType - Return the uniqued reference to the type for a complex
2758 /// number with the specified element type.
2760  // Unique pointers, to guarantee there is only one pointer of a particular
2761  // structure.
2762  llvm::FoldingSetNodeID ID;
2763  ComplexType::Profile(ID, T);
2764 
2765  void *InsertPos = nullptr;
2766  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
2767  return QualType(CT, 0);
2768 
2769  // If the pointee type isn't canonical, this won't be a canonical type either,
2770  // so fill in the canonical type field.
2771  QualType Canonical;
2772  if (!T.isCanonical()) {
2773  Canonical = getComplexType(getCanonicalType(T));
2774 
2775  // Get the new insert position for the node we care about.
2776  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
2777  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2778  }
2779  auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
2780  Types.push_back(New);
2781  ComplexTypes.InsertNode(New, InsertPos);
2782  return QualType(New, 0);
2783 }
2784 
2785 /// getPointerType - Return the uniqued reference to the type for a pointer to
2786 /// the specified type.
2788  // Unique pointers, to guarantee there is only one pointer of a particular
2789  // structure.
2790  llvm::FoldingSetNodeID ID;
2791  PointerType::Profile(ID, T);
2792 
2793  void *InsertPos = nullptr;
2794  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2795  return QualType(PT, 0);
2796 
2797  // If the pointee type isn't canonical, this won't be a canonical type either,
2798  // so fill in the canonical type field.
2799  QualType Canonical;
2800  if (!T.isCanonical()) {
2801  Canonical = getPointerType(getCanonicalType(T));
2802 
2803  // Get the new insert position for the node we care about.
2804  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2805  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2806  }
2807  auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
2808  Types.push_back(New);
2809  PointerTypes.InsertNode(New, InsertPos);
2810  return QualType(New, 0);
2811 }
2812 
2814  llvm::FoldingSetNodeID ID;
2815  AdjustedType::Profile(ID, Orig, New);
2816  void *InsertPos = nullptr;
2817  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2818  if (AT)
2819  return QualType(AT, 0);
2820 
2821  QualType Canonical = getCanonicalType(New);
2822 
2823  // Get the new insert position for the node we care about.
2824  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2825  assert(!AT && "Shouldn't be in the map!");
2826 
2827  AT = new (*this, TypeAlignment)
2828  AdjustedType(Type::Adjusted, Orig, New, Canonical);
2829  Types.push_back(AT);
2830  AdjustedTypes.InsertNode(AT, InsertPos);
2831  return QualType(AT, 0);
2832 }
2833 
2835  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
2836 
2837  QualType Decayed;
2838 
2839  // C99 6.7.5.3p7:
2840  // A declaration of a parameter as "array of type" shall be
2841  // adjusted to "qualified pointer to type", where the type
2842  // qualifiers (if any) are those specified within the [ and ] of
2843  // the array type derivation.
2844  if (T->isArrayType())
2845  Decayed = getArrayDecayedType(T);
2846 
2847  // C99 6.7.5.3p8:
2848  // A declaration of a parameter as "function returning type"
2849  // shall be adjusted to "pointer to function returning type", as
2850  // in 6.3.2.1.
2851  if (T->isFunctionType())
2852  Decayed = getPointerType(T);
2853 
2854  llvm::FoldingSetNodeID ID;
2855  AdjustedType::Profile(ID, T, Decayed);
2856  void *InsertPos = nullptr;
2857  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2858  if (AT)
2859  return QualType(AT, 0);
2860 
2861  QualType Canonical = getCanonicalType(Decayed);
2862 
2863  // Get the new insert position for the node we care about.
2864  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2865  assert(!AT && "Shouldn't be in the map!");
2866 
2867  AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
2868  Types.push_back(AT);
2869  AdjustedTypes.InsertNode(AT, InsertPos);
2870  return QualType(AT, 0);
2871 }
2872 
2873 /// getBlockPointerType - Return the uniqued reference to the type for
2874 /// a pointer to the specified block.
2876  assert(T->isFunctionType() && "block of function types only");
2877  // Unique pointers, to guarantee there is only one block of a particular
2878  // structure.
2879  llvm::FoldingSetNodeID ID;
2881 
2882  void *InsertPos = nullptr;
2883  if (BlockPointerType *PT =
2884  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2885  return QualType(PT, 0);
2886 
2887  // If the block pointee type isn't canonical, this won't be a canonical
2888  // type either so fill in the canonical type field.
2889  QualType Canonical;
2890  if (!T.isCanonical()) {
2891  Canonical = getBlockPointerType(getCanonicalType(T));
2892 
2893  // Get the new insert position for the node we care about.
2894  BlockPointerType *NewIP =
2895  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2896  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2897  }
2898  auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
2899  Types.push_back(New);
2900  BlockPointerTypes.InsertNode(New, InsertPos);
2901  return QualType(New, 0);
2902 }
2903 
2904 /// getLValueReferenceType - Return the uniqued reference to the type for an
2905 /// lvalue reference to the specified type.
2906 QualType
2907 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
2908  assert(getCanonicalType(T) != OverloadTy &&
2909  "Unresolved overloaded function type");
2910 
2911  // Unique pointers, to guarantee there is only one pointer of a particular
2912  // structure.
2913  llvm::FoldingSetNodeID ID;
2914  ReferenceType::Profile(ID, T, SpelledAsLValue);
2915 
2916  void *InsertPos = nullptr;
2917  if (LValueReferenceType *RT =
2918  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
2919  return QualType(RT, 0);
2920 
2921  const auto *InnerRef = T->getAs<ReferenceType>();
2922 
2923  // If the referencee type isn't canonical, this won't be a canonical type
2924  // either, so fill in the canonical type field.
2925  QualType Canonical;
2926  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
2927  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
2928  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
2929 
2930  // Get the new insert position for the node we care about.
2931  LValueReferenceType *NewIP =
2932  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
2933  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2934  }
2935 
2936  auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
2937  SpelledAsLValue);
2938  Types.push_back(New);
2939  LValueReferenceTypes.InsertNode(New, InsertPos);
2940 
2941  return QualType(New, 0);
2942 }
2943 
2944 /// getRValueReferenceType - Return the uniqued reference to the type for an
2945 /// rvalue reference to the specified type.
2947  // Unique pointers, to guarantee there is only one pointer of a particular
2948  // structure.
2949  llvm::FoldingSetNodeID ID;
2950  ReferenceType::Profile(ID, T, false);
2951 
2952  void *InsertPos = nullptr;
2953  if (RValueReferenceType *RT =
2954  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
2955  return QualType(RT, 0);
2956 
2957  const auto *InnerRef = T->getAs<ReferenceType>();
2958 
2959  // If the referencee type isn't canonical, this won't be a canonical type
2960  // either, so fill in the canonical type field.
2961  QualType Canonical;
2962  if (InnerRef || !T.isCanonical()) {
2963  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
2964  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
2965 
2966  // Get the new insert position for the node we care about.
2967  RValueReferenceType *NewIP =
2968  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
2969  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2970  }
2971 
2972  auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
2973  Types.push_back(New);
2974  RValueReferenceTypes.InsertNode(New, InsertPos);
2975  return QualType(New, 0);
2976 }
2977 
2978 /// getMemberPointerType - Return the uniqued reference to the type for a
2979 /// member pointer to the specified type, in the specified class.
2981  // Unique pointers, to guarantee there is only one pointer of a particular
2982  // structure.
2983  llvm::FoldingSetNodeID ID;
2984  MemberPointerType::Profile(ID, T, Cls);
2985 
2986  void *InsertPos = nullptr;
2987  if (MemberPointerType *PT =
2988  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2989  return QualType(PT, 0);
2990 
2991  // If the pointee or class type isn't canonical, this won't be a canonical
2992  // type either, so fill in the canonical type field.
2993  QualType Canonical;
2994  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
2996 
2997  // Get the new insert position for the node we care about.
2998  MemberPointerType *NewIP =
2999  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3000  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3001  }
3002  auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
3003  Types.push_back(New);
3004  MemberPointerTypes.InsertNode(New, InsertPos);
3005  return QualType(New, 0);
3006 }
3007 
3008 /// getConstantArrayType - Return the unique reference to the type for an
3009 /// array of the specified element type.
3011  const llvm::APInt &ArySizeIn,
3013  unsigned IndexTypeQuals) const {
3014  assert((EltTy->isDependentType() ||
3015  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3016  "Constant array of VLAs is illegal!");
3017 
3018  // Convert the array size into a canonical width matching the pointer size for
3019  // the target.
3020  llvm::APInt ArySize(ArySizeIn);
3021  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3022 
3023  llvm::FoldingSetNodeID ID;
3024  ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
3025 
3026  void *InsertPos = nullptr;
3027  if (ConstantArrayType *ATP =
3028  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3029  return QualType(ATP, 0);
3030 
3031  // If the element type isn't canonical or has qualifiers, this won't
3032  // be a canonical type either, so fill in the canonical type field.
3033  QualType Canon;
3034  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3035  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3036  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
3037  ASM, IndexTypeQuals);
3038  Canon = getQualifiedType(Canon, canonSplit.Quals);
3039 
3040  // Get the new insert position for the node we care about.
3041  ConstantArrayType *NewIP =
3042  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3043  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3044  }
3045 
3046  auto *New = new (*this,TypeAlignment)
3047  ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
3048  ConstantArrayTypes.InsertNode(New, InsertPos);
3049  Types.push_back(New);
3050  return QualType(New, 0);
3051 }
3052 
3053 /// getVariableArrayDecayedType - Turns the given type, which may be
3054 /// variably-modified, into the corresponding type with all the known
3055 /// sizes replaced with [*].
3057  // Vastly most common case.
3058  if (!type->isVariablyModifiedType()) return type;
3059 
3060  QualType result;
3061 
3062  SplitQualType split = type.getSplitDesugaredType();
3063  const Type *ty = split.Ty;
3064  switch (ty->getTypeClass()) {
3065 #define TYPE(Class, Base)
3066 #define ABSTRACT_TYPE(Class, Base)
3067 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3068 #include "clang/AST/TypeNodes.def"
3069  llvm_unreachable("didn't desugar past all non-canonical types?");
3070 
3071  // These types should never be variably-modified.
3072  case Type::Builtin:
3073  case Type::Complex:
3074  case Type::Vector:
3075  case Type::DependentVector:
3076  case Type::ExtVector:
3077  case Type::DependentSizedExtVector:
3078  case Type::DependentAddressSpace:
3079  case Type::ObjCObject:
3080  case Type::ObjCInterface:
3081  case Type::ObjCObjectPointer:
3082  case Type::Record:
3083  case Type::Enum:
3084  case Type::UnresolvedUsing:
3085  case Type::TypeOfExpr:
3086  case Type::TypeOf:
3087  case Type::Decltype:
3088  case Type::UnaryTransform:
3089  case Type::DependentName:
3090  case Type::InjectedClassName:
3091  case Type::TemplateSpecialization:
3092  case Type::DependentTemplateSpecialization:
3093  case Type::TemplateTypeParm:
3094  case Type::SubstTemplateTypeParmPack:
3095  case Type::Auto:
3096  case Type::DeducedTemplateSpecialization:
3097  case Type::PackExpansion:
3098  llvm_unreachable("type should never be variably-modified");
3099 
3100  // These types can be variably-modified but should never need to
3101  // further decay.
3102  case Type::FunctionNoProto:
3103  case Type::FunctionProto:
3104  case Type::BlockPointer:
3105  case Type::MemberPointer:
3106  case Type::Pipe:
3107  return type;
3108 
3109  // These types can be variably-modified. All these modifications
3110  // preserve structure except as noted by comments.
3111  // TODO: if we ever care about optimizing VLAs, there are no-op
3112  // optimizations available here.
3113  case Type::Pointer:
3115  cast<PointerType>(ty)->getPointeeType()));
3116  break;
3117 
3118  case Type::LValueReference: {
3119  const auto *lv = cast<LValueReferenceType>(ty);
3120  result = getLValueReferenceType(
3121  getVariableArrayDecayedType(lv->getPointeeType()),
3122  lv->isSpelledAsLValue());
3123  break;
3124  }
3125 
3126  case Type::RValueReference: {
3127  const auto *lv = cast<RValueReferenceType>(ty);
3128  result = getRValueReferenceType(
3129  getVariableArrayDecayedType(lv->getPointeeType()));
3130  break;
3131  }
3132 
3133  case Type::Atomic: {
3134  const auto *at = cast<AtomicType>(ty);
3135  result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3136  break;
3137  }
3138 
3139  case Type::ConstantArray: {
3140  const auto *cat = cast<ConstantArrayType>(ty);
3141  result = getConstantArrayType(
3142  getVariableArrayDecayedType(cat->getElementType()),
3143  cat->getSize(),
3144  cat->getSizeModifier(),
3145  cat->getIndexTypeCVRQualifiers());
3146  break;
3147  }
3148 
3149  case Type::DependentSizedArray: {
3150  const auto *dat = cast<DependentSizedArrayType>(ty);
3151  result = getDependentSizedArrayType(
3152  getVariableArrayDecayedType(dat->getElementType()),
3153  dat->getSizeExpr(),
3154  dat->getSizeModifier(),
3155  dat->getIndexTypeCVRQualifiers(),
3156  dat->getBracketsRange());
3157  break;
3158  }
3159 
3160  // Turn incomplete types into [*] types.
3161  case Type::IncompleteArray: {
3162  const auto *iat = cast<IncompleteArrayType>(ty);
3163  result = getVariableArrayType(
3164  getVariableArrayDecayedType(iat->getElementType()),
3165  /*size*/ nullptr,
3167  iat->getIndexTypeCVRQualifiers(),
3168  SourceRange());
3169  break;
3170  }
3171 
3172  // Turn VLA types into [*] types.
3173  case Type::VariableArray: {
3174  const auto *vat = cast<VariableArrayType>(ty);
3175  result = getVariableArrayType(
3176  getVariableArrayDecayedType(vat->getElementType()),
3177  /*size*/ nullptr,
3179  vat->getIndexTypeCVRQualifiers(),
3180  vat->getBracketsRange());
3181  break;
3182  }
3183  }
3184 
3185  // Apply the top-level qualifiers from the original.
3186  return getQualifiedType(result, split.Quals);
3187 }
3188 
3189 /// getVariableArrayType - Returns a non-unique reference to the type for a
3190 /// variable array of the specified element type.
3192  Expr *NumElts,
3194  unsigned IndexTypeQuals,
3195  SourceRange Brackets) const {
3196  // Since we don't unique expressions, it isn't possible to unique VLA's
3197  // that have an expression provided for their size.
3198  QualType Canon;
3199 
3200  // Be sure to pull qualifiers off the element type.
3201  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3202  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3203  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3204  IndexTypeQuals, Brackets);
3205  Canon = getQualifiedType(Canon, canonSplit.Quals);
3206  }
3207 
3208  auto *New = new (*this, TypeAlignment)
3209  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3210 
3211  VariableArrayTypes.push_back(New);
3212  Types.push_back(New);
3213  return QualType(New, 0);
3214 }
3215 
3216 /// getDependentSizedArrayType - Returns a non-unique reference to
3217 /// the type for a dependently-sized array of the specified element
3218 /// type.
3220  Expr *numElements,
3222  unsigned elementTypeQuals,
3223  SourceRange brackets) const {
3224  assert((!numElements || numElements->isTypeDependent() ||
3225  numElements->isValueDependent()) &&
3226  "Size must be type- or value-dependent!");
3227 
3228  // Dependently-sized array types that do not have a specified number
3229  // of elements will have their sizes deduced from a dependent
3230  // initializer. We do no canonicalization here at all, which is okay
3231  // because they can't be used in most locations.
3232  if (!numElements) {
3233  auto *newType
3234  = new (*this, TypeAlignment)
3235  DependentSizedArrayType(*this, elementType, QualType(),
3236  numElements, ASM, elementTypeQuals,
3237  brackets);
3238  Types.push_back(newType);
3239  return QualType(newType, 0);
3240  }
3241 
3242  // Otherwise, we actually build a new type every time, but we
3243  // also build a canonical type.
3244 
3245  SplitQualType canonElementType = getCanonicalType(elementType).split();
3246 
3247  void *insertPos = nullptr;
3248  llvm::FoldingSetNodeID ID;
3250  QualType(canonElementType.Ty, 0),
3251  ASM, elementTypeQuals, numElements);
3252 
3253  // Look for an existing type with these properties.
3254  DependentSizedArrayType *canonTy =
3255  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3256 
3257  // If we don't have one, build one.
3258  if (!canonTy) {
3259  canonTy = new (*this, TypeAlignment)
3260  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3261  QualType(), numElements, ASM, elementTypeQuals,
3262  brackets);
3263  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3264  Types.push_back(canonTy);
3265  }
3266 
3267  // Apply qualifiers from the element type to the array.
3268  QualType canon = getQualifiedType(QualType(canonTy,0),
3269  canonElementType.Quals);
3270 
3271  // If we didn't need extra canonicalization for the element type or the size
3272  // expression, then just use that as our result.
3273  if (QualType(canonElementType.Ty, 0) == elementType &&
3274  canonTy->getSizeExpr() == numElements)
3275  return canon;
3276 
3277  // Otherwise, we need to build a type which follows the spelling
3278  // of the element type.
3279  auto *sugaredType
3280  = new (*this, TypeAlignment)
3281  DependentSizedArrayType(*this, elementType, canon, numElements,
3282  ASM, elementTypeQuals, brackets);
3283  Types.push_back(sugaredType);
3284  return QualType(sugaredType, 0);
3285 }
3286 
3289  unsigned elementTypeQuals) const {
3290  llvm::FoldingSetNodeID ID;
3291  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3292 
3293  void *insertPos = nullptr;
3294  if (IncompleteArrayType *iat =
3295  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3296  return QualType(iat, 0);
3297 
3298  // If the element type isn't canonical, this won't be a canonical type
3299  // either, so fill in the canonical type field. We also have to pull
3300  // qualifiers off the element type.
3301  QualType canon;
3302 
3303  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3304  SplitQualType canonSplit = getCanonicalType(elementType).split();
3305  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3306  ASM, elementTypeQuals);
3307  canon = getQualifiedType(canon, canonSplit.Quals);
3308 
3309  // Get the new insert position for the node we care about.
3310  IncompleteArrayType *existing =
3311  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3312  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3313  }
3314 
3315  auto *newType = new (*this, TypeAlignment)
3316  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3317 
3318  IncompleteArrayTypes.InsertNode(newType, insertPos);
3319  Types.push_back(newType);
3320  return QualType(newType, 0);
3321 }
3322 
3323 /// getVectorType - Return the unique reference to a vector type of
3324 /// the specified element type and size. VectorType must be a built-in type.
3325 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
3326  VectorType::VectorKind VecKind) const {
3327  assert(vecType->isBuiltinType());
3328 
3329  // Check if we've already instantiated a vector of this type.
3330  llvm::FoldingSetNodeID ID;
3331  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
3332 
3333  void *InsertPos = nullptr;
3334  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3335  return QualType(VTP, 0);
3336 
3337  // If the element type isn't canonical, this won't be a canonical type either,
3338  // so fill in the canonical type field.
3339  QualType Canonical;
3340  if (!vecType.isCanonical()) {
3341  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
3342 
3343  // Get the new insert position for the node we care about.
3344  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3345  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3346  }
3347  auto *New = new (*this, TypeAlignment)
3348  VectorType(vecType, NumElts, Canonical, VecKind);
3349  VectorTypes.InsertNode(New, InsertPos);
3350  Types.push_back(New);
3351  return QualType(New, 0);
3352 }
3353 
3354 QualType
3356  SourceLocation AttrLoc,
3357  VectorType::VectorKind VecKind) const {
3358  llvm::FoldingSetNodeID ID;
3359  DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
3360  VecKind);
3361  void *InsertPos = nullptr;
3362  DependentVectorType *Canon =
3363  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3364  DependentVectorType *New;
3365 
3366  if (Canon) {
3367  New = new (*this, TypeAlignment) DependentVectorType(
3368  *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
3369  } else {
3370  QualType CanonVecTy = getCanonicalType(VecType);
3371  if (CanonVecTy == VecType) {
3372  New = new (*this, TypeAlignment) DependentVectorType(
3373  *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
3374 
3375  DependentVectorType *CanonCheck =
3376  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3377  assert(!CanonCheck &&
3378  "Dependent-sized vector_size canonical type broken");
3379  (void)CanonCheck;
3380  DependentVectorTypes.InsertNode(New, InsertPos);
3381  } else {
3382  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3383  SourceLocation());
3384  New = new (*this, TypeAlignment) DependentVectorType(
3385  *this, VecType, Canon, SizeExpr, AttrLoc, VecKind);
3386  }
3387  }
3388 
3389  Types.push_back(New);
3390  return QualType(New, 0);
3391 }
3392 
3393 /// getExtVectorType - Return the unique reference to an extended vector type of
3394 /// the specified element type and size. VectorType must be a built-in type.
3395 QualType
3396 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
3397  assert(vecType->isBuiltinType() || vecType->isDependentType());
3398 
3399  // Check if we've already instantiated a vector of this type.
3400  llvm::FoldingSetNodeID ID;
3401  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
3403  void *InsertPos = nullptr;
3404  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3405  return QualType(VTP, 0);
3406 
3407  // If the element type isn't canonical, this won't be a canonical type either,
3408  // so fill in the canonical type field.
3409  QualType Canonical;
3410  if (!vecType.isCanonical()) {
3411  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
3412 
3413  // Get the new insert position for the node we care about.
3414  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3415  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3416  }
3417  auto *New = new (*this, TypeAlignment)
3418  ExtVectorType(vecType, NumElts, Canonical);
3419  VectorTypes.InsertNode(New, InsertPos);
3420  Types.push_back(New);
3421  return QualType(New, 0);
3422 }
3423 
3424 QualType
3426  Expr *SizeExpr,
3427  SourceLocation AttrLoc) const {
3428  llvm::FoldingSetNodeID ID;
3430  SizeExpr);
3431 
3432  void *InsertPos = nullptr;
3434  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3436  if (Canon) {
3437  // We already have a canonical version of this array type; use it as
3438  // the canonical type for a newly-built type.
3439  New = new (*this, TypeAlignment)
3440  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
3441  SizeExpr, AttrLoc);
3442  } else {
3443  QualType CanonVecTy = getCanonicalType(vecType);
3444  if (CanonVecTy == vecType) {
3445  New = new (*this, TypeAlignment)
3446  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
3447  AttrLoc);
3448 
3449  DependentSizedExtVectorType *CanonCheck
3450  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3451  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
3452  (void)CanonCheck;
3453  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
3454  } else {
3455  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3456  SourceLocation());
3457  New = new (*this, TypeAlignment)
3458  DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
3459  }
3460  }
3461 
3462  Types.push_back(New);
3463  return QualType(New, 0);
3464 }
3465 
3467  Expr *AddrSpaceExpr,
3468  SourceLocation AttrLoc) const {
3469  assert(AddrSpaceExpr->isInstantiationDependent());
3470 
3471  QualType canonPointeeType = getCanonicalType(PointeeType);
3472 
3473  void *insertPos = nullptr;
3474  llvm::FoldingSetNodeID ID;
3475  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
3476  AddrSpaceExpr);
3477 
3478  DependentAddressSpaceType *canonTy =
3479  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
3480 
3481  if (!canonTy) {
3482  canonTy = new (*this, TypeAlignment)
3483  DependentAddressSpaceType(*this, canonPointeeType,
3484  QualType(), AddrSpaceExpr, AttrLoc);
3485  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
3486  Types.push_back(canonTy);
3487  }
3488 
3489  if (canonPointeeType == PointeeType &&
3490  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
3491  return QualType(canonTy, 0);
3492 
3493  auto *sugaredType
3494  = new (*this, TypeAlignment)
3495  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
3496  AddrSpaceExpr, AttrLoc);
3497  Types.push_back(sugaredType);
3498  return QualType(sugaredType, 0);
3499 }
3500 
3501 /// Determine whether \p T is canonical as the result type of a function.
3503  return T.isCanonical() &&
3506 }
3507 
3508 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
3509 QualType
3511  const FunctionType::ExtInfo &Info) const {
3512  // Unique functions, to guarantee there is only one function of a particular
3513  // structure.
3514  llvm::FoldingSetNodeID ID;
3515  FunctionNoProtoType::Profile(ID, ResultTy, Info);
3516 
3517  void *InsertPos = nullptr;
3518  if (FunctionNoProtoType *FT =
3519  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
3520  return QualType(FT, 0);
3521 
3522  QualType Canonical;
3523  if (!isCanonicalResultType(ResultTy)) {
3524  Canonical =
3526 
3527  // Get the new insert position for the node we care about.
3528  FunctionNoProtoType *NewIP =
3529  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3530  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3531  }
3532 
3533  auto *New = new (*this, TypeAlignment)
3534  FunctionNoProtoType(ResultTy, Canonical, Info);
3535  Types.push_back(New);
3536  FunctionNoProtoTypes.InsertNode(New, InsertPos);
3537  return QualType(New, 0);
3538 }
3539 
3542  CanQualType CanResultType = getCanonicalType(ResultType);
3543 
3544  // Canonical result types do not have ARC lifetime qualifiers.
3545  if (CanResultType.getQualifiers().hasObjCLifetime()) {
3546  Qualifiers Qs = CanResultType.getQualifiers();
3547  Qs.removeObjCLifetime();
3549  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
3550  }
3551 
3552  return CanResultType;
3553 }
3554 
3556  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
3557  if (ESI.Type == EST_None)
3558  return true;
3559  if (!NoexceptInType)
3560  return false;
3561 
3562  // C++17 onwards: exception specification is part of the type, as a simple
3563  // boolean "can this function type throw".
3564  if (ESI.Type == EST_BasicNoexcept)
3565  return true;
3566 
3567  // A noexcept(expr) specification is (possibly) canonical if expr is
3568  // value-dependent.
3569  if (ESI.Type == EST_DependentNoexcept)
3570  return true;
3571 
3572  // A dynamic exception specification is canonical if it only contains pack
3573  // expansions (so we can't tell whether it's non-throwing) and all its
3574  // contained types are canonical.
3575  if (ESI.Type == EST_Dynamic) {
3576  bool AnyPackExpansions = false;
3577  for (QualType ET : ESI.Exceptions) {
3578  if (!ET.isCanonical())
3579  return false;
3580  if (ET->getAs<PackExpansionType>())
3581  AnyPackExpansions = true;
3582  }
3583  return AnyPackExpansions;
3584  }
3585 
3586  return false;
3587 }
3588 
3589 QualType ASTContext::getFunctionTypeInternal(
3590  QualType ResultTy, ArrayRef<QualType> ArgArray,
3591  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
3592  size_t NumArgs = ArgArray.size();
3593 
3594  // Unique functions, to guarantee there is only one function of a particular
3595  // structure.
3596  llvm::FoldingSetNodeID ID;
3597  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
3598  *this, true);
3599 
3600  QualType Canonical;
3601  bool Unique = false;
3602 
3603  void *InsertPos = nullptr;
3604  if (FunctionProtoType *FPT =
3605  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
3606  QualType Existing = QualType(FPT, 0);
3607 
3608  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
3609  // it so long as our exception specification doesn't contain a dependent
3610  // noexcept expression, or we're just looking for a canonical type.
3611  // Otherwise, we're going to need to create a type
3612  // sugar node to hold the concrete expression.
3613  if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
3614  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
3615  return Existing;
3616 
3617  // We need a new type sugar node for this one, to hold the new noexcept
3618  // expression. We do no canonicalization here, but that's OK since we don't
3619  // expect to see the same noexcept expression much more than once.
3620  Canonical = getCanonicalType(Existing);
3621  Unique = true;
3622  }
3623 
3624  bool NoexceptInType = getLangOpts().CPlusPlus17;
3625  bool IsCanonicalExceptionSpec =
3626  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
3627 
3628  // Determine whether the type being created is already canonical or not.
3629  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
3630  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
3631  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
3632  if (!ArgArray[i].isCanonicalAsParam())
3633  isCanonical = false;
3634 
3635  if (OnlyWantCanonical)
3636  assert(isCanonical &&
3637  "given non-canonical parameters constructing canonical type");
3638 
3639  // If this type isn't canonical, get the canonical version of it if we don't
3640  // already have it. The exception spec is only partially part of the
3641  // canonical type, and only in C++17 onwards.
3642  if (!isCanonical && Canonical.isNull()) {
3643  SmallVector<QualType, 16> CanonicalArgs;
3644  CanonicalArgs.reserve(NumArgs);
3645  for (unsigned i = 0; i != NumArgs; ++i)
3646  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
3647 
3648  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
3649  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
3650  CanonicalEPI.HasTrailingReturn = false;
3651 
3652  if (IsCanonicalExceptionSpec) {
3653  // Exception spec is already OK.
3654  } else if (NoexceptInType) {
3655  switch (EPI.ExceptionSpec.Type) {
3657  // We don't know yet. It shouldn't matter what we pick here; no-one
3658  // should ever look at this.
3659  LLVM_FALLTHROUGH;
3660  case EST_None: case EST_MSAny: case EST_NoexceptFalse:
3661  CanonicalEPI.ExceptionSpec.Type = EST_None;
3662  break;
3663 
3664  // A dynamic exception specification is almost always "not noexcept",
3665  // with the exception that a pack expansion might expand to no types.
3666  case EST_Dynamic: {
3667  bool AnyPacks = false;
3668  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
3669  if (ET->getAs<PackExpansionType>())
3670  AnyPacks = true;
3671  ExceptionTypeStorage.push_back(getCanonicalType(ET));
3672  }
3673  if (!AnyPacks)
3674  CanonicalEPI.ExceptionSpec.Type = EST_None;
3675  else {
3676  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
3677  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
3678  }
3679  break;
3680  }
3681 
3683  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
3684  break;
3685 
3686  case EST_DependentNoexcept:
3687  llvm_unreachable("dependent noexcept is already canonical");
3688  }
3689  } else {
3691  }
3692 
3693  // Adjust the canonical function result type.
3694  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
3695  Canonical =
3696  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
3697 
3698  // Get the new insert position for the node we care about.
3699  FunctionProtoType *NewIP =
3700  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3701  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3702  }
3703 
3704  // FunctionProtoType objects are allocated with extra bytes after
3705  // them for three variable size arrays at the end:
3706  // - parameter types
3707  // - exception types
3708  // - extended parameter information
3709  // Instead of the exception types, there could be a noexcept
3710  // expression, or information used to resolve the exception
3711  // specification.
3712  size_t Size =
3713  sizeof(FunctionProtoType) + NumArgs * sizeof(QualType) +
3714  FunctionProtoType::getExceptionSpecSize(
3715  EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
3716 
3717  // Put the ExtParameterInfos last. If all were equal, it would make
3718  // more sense to put these before the exception specification, because
3719  // it's much easier to skip past them compared to the elaborate switch
3720  // required to skip the exception specification. However, all is not
3721  // equal; ExtParameterInfos are used to model very uncommon features,
3722  // and it's better not to burden the more common paths.
3723  if (EPI.ExtParameterInfos) {
3724  Size += NumArgs * sizeof(FunctionProtoType::ExtParameterInfo);
3725  }
3726 
3727  auto *FTP = (FunctionProtoType *) Allocate(Size, TypeAlignment);
3728  FunctionProtoType::ExtProtoInfo newEPI = EPI;
3729  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
3730  Types.push_back(FTP);
3731  if (!Unique)
3732  FunctionProtoTypes.InsertNode(FTP, InsertPos);
3733  return QualType(FTP, 0);
3734 }
3735 
3736 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
3737  llvm::FoldingSetNodeID ID;
3738  PipeType::Profile(ID, T, ReadOnly);
3739 
3740  void *InsertPos = nullptr;
3741  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
3742  return QualType(PT, 0);
3743 
3744  // If the pipe element type isn't canonical, this won't be a canonical type
3745  // either, so fill in the canonical type field.
3746  QualType Canonical;
3747  if (!T.isCanonical()) {
3748  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
3749 
3750  // Get the new insert position for the node we care about.
3751  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
3752  assert(!NewIP && "Shouldn't be in the map!");
3753  (void)NewIP;
3754  }
3755  auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
3756  Types.push_back(New);
3757  PipeTypes.InsertNode(New, InsertPos);
3758  return QualType(New, 0);
3759 }
3760 
3762  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
3763  return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
3764  : Ty;
3765 }
3766 
3768  return getPipeType(T, true);
3769 }
3770 
3772  return getPipeType(T, false);
3773 }
3774 
3775 #ifndef NDEBUG
3777  if (!isa<CXXRecordDecl>(D)) return false;
3778  const auto *RD = cast<CXXRecordDecl>(D);
3779  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
3780  return true;
3781  if (RD->getDescribedClassTemplate() &&
3782  !isa<ClassTemplateSpecializationDecl>(RD))
3783  return true;
3784  return false;
3785 }
3786 #endif
3787 
3788 /// getInjectedClassNameType - Return the unique reference to the
3789 /// injected class name type for the specified templated declaration.
3791  QualType TST) const {
3792  assert(NeedsInjectedClassNameType(Decl));
3793  if (Decl->TypeForDecl) {
3794  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3795  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
3796  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
3797  Decl->TypeForDecl = PrevDecl->TypeForDecl;
3798  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3799  } else {
3800  Type *newType =
3801  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
3802  Decl->TypeForDecl = newType;
3803  Types.push_back(newType);
3804  }
3805  return QualType(Decl->TypeForDecl, 0);
3806 }
3807 
3808 /// getTypeDeclType - Return the unique reference to the type for the
3809 /// specified type declaration.
3810 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
3811  assert(Decl && "Passed null for Decl param");
3812  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
3813 
3814  if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
3815  return getTypedefType(Typedef);
3816 
3817  assert(!isa<TemplateTypeParmDecl>(Decl) &&
3818  "Template type parameter types are always available.");
3819 
3820  if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
3821  assert(Record->isFirstDecl() && "struct/union has previous declaration");
3822  assert(!NeedsInjectedClassNameType(Record));
3823  return getRecordType(Record);
3824  } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
3825  assert(Enum->isFirstDecl() && "enum has previous declaration");
3826  return getEnumType(Enum);
3827  } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
3828  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
3829  Decl->TypeForDecl = newType;
3830  Types.push_back(newType);
3831  } else
3832  llvm_unreachable("TypeDecl without a type?");
3833 
3834  return QualType(Decl->TypeForDecl, 0);
3835 }
3836 
3837 /// getTypedefType - Return the unique reference to the type for the
3838 /// specified typedef name decl.
3839 QualType
3841  QualType Canonical) const {
3842  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3843 
3844  if (Canonical.isNull())
3845  Canonical = getCanonicalType(Decl->getUnderlyingType());
3846  auto *newType = new (*this, TypeAlignment)
3847  TypedefType(Type::Typedef, Decl, Canonical);
3848  Decl->TypeForDecl = newType;
3849  Types.push_back(newType);
3850  return QualType(newType, 0);
3851 }
3852 
3854  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3855 
3856  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
3857  if (PrevDecl->TypeForDecl)
3858  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3859 
3860  auto *newType = new (*this, TypeAlignment) RecordType(Decl);
3861  Decl->TypeForDecl = newType;
3862  Types.push_back(newType);
3863  return QualType(newType, 0);
3864 }
3865 
3867  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3868 
3869  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
3870  if (PrevDecl->TypeForDecl)
3871  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
3872 
3873  auto *newType = new (*this, TypeAlignment) EnumType(Decl);
3874  Decl->TypeForDecl = newType;
3875  Types.push_back(newType);
3876  return QualType(newType, 0);
3877 }
3878 
3880  QualType modifiedType,
3881  QualType equivalentType) {
3882  llvm::FoldingSetNodeID id;
3883  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
3884 
3885  void *insertPos = nullptr;
3886  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
3887  if (type) return QualType(type, 0);
3888 
3889  QualType canon = getCanonicalType(equivalentType);
3890  type = new (*this, TypeAlignment)
3891  AttributedType(canon, attrKind, modifiedType, equivalentType);
3892 
3893  Types.push_back(type);
3894  AttributedTypes.InsertNode(type, insertPos);
3895 
3896  return QualType(type, 0);
3897 }
3898 
3899 /// Retrieve a substitution-result type.
3900 QualType
3902  QualType Replacement) const {
3903  assert(Replacement.isCanonical()
3904  && "replacement types must always be canonical");
3905 
3906  llvm::FoldingSetNodeID ID;
3907  SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
3908  void *InsertPos = nullptr;
3909  SubstTemplateTypeParmType *SubstParm
3910  = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3911 
3912  if (!SubstParm) {
3913  SubstParm = new (*this, TypeAlignment)
3914  SubstTemplateTypeParmType(Parm, Replacement);
3915  Types.push_back(SubstParm);
3916  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
3917  }
3918 
3919  return QualType(SubstParm, 0);
3920 }
3921 
3922 /// Retrieve a
3924  const TemplateTypeParmType *Parm,
3925  const TemplateArgument &ArgPack) {
3926 #ifndef NDEBUG
3927  for (const auto &P : ArgPack.pack_elements()) {
3928  assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
3929  assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
3930  }
3931 #endif
3932 
3933  llvm::FoldingSetNodeID ID;
3934  SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
3935  void *InsertPos = nullptr;
3936  if (SubstTemplateTypeParmPackType *SubstParm
3937  = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
3938  return QualType(SubstParm, 0);
3939 
3940  QualType Canon;
3941  if (!Parm->isCanonicalUnqualified()) {
3942  Canon = getCanonicalType(QualType(Parm, 0));
3943  Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
3944  ArgPack);
3945  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
3946  }
3947 
3948  auto *SubstParm
3949  = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
3950  ArgPack);
3951  Types.push_back(SubstParm);
3952  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
3953  return QualType(SubstParm, 0);
3954 }
3955 
3956 /// Retrieve the template type parameter type for a template
3957 /// parameter or parameter pack with the given depth, index, and (optionally)
3958 /// name.
3960  bool ParameterPack,
3961  TemplateTypeParmDecl *TTPDecl) const {
3962  llvm::FoldingSetNodeID ID;
3963  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
3964  void *InsertPos = nullptr;
3965  TemplateTypeParmType *TypeParm
3966  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3967 
3968  if (TypeParm)
3969  return QualType(TypeParm, 0);
3970 
3971  if (TTPDecl) {
3972  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
3973  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
3974 
3975  TemplateTypeParmType *TypeCheck
3976  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
3977  assert(!TypeCheck && "Template type parameter canonical type broken");
3978  (void)TypeCheck;
3979  } else
3980  TypeParm = new (*this, TypeAlignment)
3981  TemplateTypeParmType(Depth, Index, ParameterPack);
3982 
3983  Types.push_back(TypeParm);
3984  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
3985 
3986  return QualType(TypeParm, 0);
3987 }
3988 
3991  SourceLocation NameLoc,
3992  const TemplateArgumentListInfo &Args,
3993  QualType Underlying) const {
3994  assert(!Name.getAsDependentTemplateName() &&
3995  "No dependent template names here!");
3996  QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
3997 
4002  TL.setTemplateNameLoc(NameLoc);
4003  TL.setLAngleLoc(Args.getLAngleLoc());
4004  TL.setRAngleLoc(Args.getRAngleLoc());
4005  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4006  TL.setArgLocInfo(i, Args[i].getLocInfo());
4007  return DI;
4008 }
4009 
4010 QualType
4012  const TemplateArgumentListInfo &Args,
4013  QualType Underlying) const {
4014  assert(!Template.getAsDependentTemplateName() &&
4015  "No dependent template names here!");
4016 
4018  ArgVec.reserve(Args.size());
4019  for (const TemplateArgumentLoc &Arg : Args.arguments())
4020  ArgVec.push_back(Arg.getArgument());
4021 
4022  return getTemplateSpecializationType(Template, ArgVec, Underlying);
4023 }
4024 
4025 #ifndef NDEBUG
4027  for (const TemplateArgument &Arg : Args)
4028  if (Arg.isPackExpansion())
4029  return true;
4030 
4031  return true;
4032 }
4033 #endif
4034 
4035 QualType
4038  QualType Underlying) const {
4039  assert(!Template.getAsDependentTemplateName() &&
4040  "No dependent template names here!");
4041  // Look through qualified template names.
4042  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4043  Template = TemplateName(QTN->getTemplateDecl());
4044 
4045  bool IsTypeAlias =
4046  Template.getAsTemplateDecl() &&
4047  isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
4048  QualType CanonType;
4049  if (!Underlying.isNull())
4050  CanonType = getCanonicalType(Underlying);
4051  else {
4052  // We can get here with an alias template when the specialization contains
4053  // a pack expansion that does not match up with a parameter pack.
4054  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
4055  "Caller must compute aliased type");
4056  IsTypeAlias = false;
4057  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
4058  }
4059 
4060  // Allocate the (non-canonical) template specialization type, but don't
4061  // try to unique it: these types typically have location information that
4062  // we don't unique and don't want to lose.
4063  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
4064  sizeof(TemplateArgument) * Args.size() +
4065  (IsTypeAlias? sizeof(QualType) : 0),
4066  TypeAlignment);
4067  auto *Spec
4068  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4069  IsTypeAlias ? Underlying : QualType());
4070 
4071  Types.push_back(Spec);
4072  return QualType(Spec, 0);
4073 }
4074 
4076  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4077  assert(!Template.getAsDependentTemplateName() &&
4078  "No dependent template names here!");
4079 
4080  // Look through qualified template names.
4081  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4082  Template = TemplateName(QTN->getTemplateDecl());
4083 
4084  // Build the canonical template specialization type.
4085  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4087  unsigned NumArgs = Args.size();
4088  CanonArgs.reserve(NumArgs);
4089  for (const TemplateArgument &Arg : Args)
4090  CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
4091 
4092  // Determine whether this canonical template specialization type already
4093  // exists.
4094  llvm::FoldingSetNodeID ID;
4095  TemplateSpecializationType::Profile(ID, CanonTemplate,
4096  CanonArgs, *this);
4097 
4098  void *InsertPos = nullptr;
4100  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4101 
4102  if (!Spec) {
4103  // Allocate a new canonical template specialization type.
4104  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
4105  sizeof(TemplateArgument) * NumArgs),
4106  TypeAlignment);
4107  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
4108  CanonArgs,
4109  QualType(), QualType());
4110  Types.push_back(Spec);
4111  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
4112  }
4113 
4114  assert(Spec->isDependentType() &&
4115  "Non-dependent template-id type must have a canonical type");
4116  return QualType(Spec, 0);
4117 }
4118 
4120  NestedNameSpecifier *NNS,
4121  QualType NamedType,
4122  TagDecl *OwnedTagDecl) const {
4123  llvm::FoldingSetNodeID ID;
4124  ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
4125 
4126  void *InsertPos = nullptr;
4127  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4128  if (T)
4129  return QualType(T, 0);
4130 
4131  QualType Canon = NamedType;
4132  if (!Canon.isCanonical()) {
4133  Canon = getCanonicalType(NamedType);
4134  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4135  assert(!CheckT && "Elaborated canonical type broken");
4136  (void)CheckT;
4137  }
4138 
4139  T = new (*this, TypeAlignment)
4140  ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
4141  Types.push_back(T);
4142  ElaboratedTypes.InsertNode(T, InsertPos);
4143  return QualType(T, 0);
4144 }
4145 
4146 QualType
4148  llvm::FoldingSetNodeID ID;
4149  ParenType::Profile(ID, InnerType);
4150 
4151  void *InsertPos = nullptr;
4152  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4153  if (T)
4154  return QualType(T, 0);
4155 
4156  QualType Canon = InnerType;
4157  if (!Canon.isCanonical()) {
4158  Canon = getCanonicalType(InnerType);
4159  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4160  assert(!CheckT && "Paren canonical type broken");
4161  (void)CheckT;
4162  }
4163 
4164  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
4165  Types.push_back(T);
4166  ParenTypes.InsertNode(T, InsertPos);
4167  return QualType(T, 0);
4168 }
4169 
4171  NestedNameSpecifier *NNS,
4172  const IdentifierInfo *Name,
4173  QualType Canon) const {
4174  if (Canon.isNull()) {
4176  if (CanonNNS != NNS)
4177  Canon = getDependentNameType(Keyword, CanonNNS, Name);
4178  }
4179 
4180  llvm::FoldingSetNodeID ID;
4181  DependentNameType::Profile(ID, Keyword, NNS, Name);
4182 
4183  void *InsertPos = nullptr;
4185  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
4186  if (T)
4187  return QualType(T, 0);
4188 
4189  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
4190  Types.push_back(T);
4191  DependentNameTypes.InsertNode(T, InsertPos);
4192  return QualType(T, 0);
4193 }
4194 
4195 QualType
4197  ElaboratedTypeKeyword Keyword,
4198  NestedNameSpecifier *NNS,
4199  const IdentifierInfo *Name,
4200  const TemplateArgumentListInfo &Args) const {
4201  // TODO: avoid this copy
4203  for (unsigned I = 0, E = Args.size(); I != E; ++I)
4204  ArgCopy.push_back(Args[I].getArgument());
4205  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
4206 }
4207 
4208 QualType
4210  ElaboratedTypeKeyword Keyword,
4211  NestedNameSpecifier *NNS,
4212  const IdentifierInfo *Name,
4213  ArrayRef<TemplateArgument> Args) const {
4214  assert((!NNS || NNS->isDependent()) &&
4215  "nested-name-specifier must be dependent");
4216 
4217  llvm::FoldingSetNodeID ID;
4218  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
4219  Name, Args);
4220 
4221  void *InsertPos = nullptr;
4223  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4224  if (T)
4225  return QualType(T, 0);
4226 
4228 
4229  ElaboratedTypeKeyword CanonKeyword = Keyword;
4230  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
4231 
4232  bool AnyNonCanonArgs = false;
4233  unsigned NumArgs = Args.size();
4234  SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
4235  for (unsigned I = 0; I != NumArgs; ++I) {
4236  CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
4237  if (!CanonArgs[I].structurallyEquals(Args[I]))
4238  AnyNonCanonArgs = true;
4239  }
4240 
4241  QualType Canon;
4242  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
4243  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
4244  Name,
4245  CanonArgs);
4246 
4247  // Find the insert position again.
4248  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4249  }
4250 
4251  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
4252  sizeof(TemplateArgument) * NumArgs),
4253  TypeAlignment);
4254  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
4255  Name, Args, Canon);
4256  Types.push_back(T);
4257  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
4258  return QualType(T, 0);
4259 }
4260 
4262  TemplateArgument Arg;
4263  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
4264  QualType ArgType = getTypeDeclType(TTP);
4265  if (TTP->isParameterPack())
4266  ArgType = getPackExpansionType(ArgType, None);
4267 
4268  Arg = TemplateArgument(ArgType);
4269  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
4270  Expr *E = new (*this) DeclRefExpr(
4271  NTTP, /*enclosing*/false,
4272  NTTP->getType().getNonLValueExprType(*this),
4273  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
4274 
4275  if (NTTP->isParameterPack())
4276  E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
4277  None);
4278  Arg = TemplateArgument(E);
4279  } else {
4280  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
4281  if (TTP->isParameterPack())
4283  else
4284  Arg = TemplateArgument(TemplateName(TTP));
4285  }
4286 
4287  if (Param->isTemplateParameterPack())
4288  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
4289 
4290  return Arg;
4291 }
4292 
4293 void
4296  Args.reserve(Args.size() + Params->size());
4297 
4298  for (NamedDecl *Param : *Params)
4299  Args.push_back(getInjectedTemplateArg(Param));
4300 }
4301 
4303  Optional<unsigned> NumExpansions) {
4304  llvm::FoldingSetNodeID ID;
4305  PackExpansionType::Profile(ID, Pattern, NumExpansions);
4306 
4307  assert(Pattern->containsUnexpandedParameterPack() &&
4308  "Pack expansions must expand one or more parameter packs");
4309  void *InsertPos = nullptr;
4311  = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4312  if (T)
4313  return QualType(T, 0);
4314 
4315  QualType Canon;
4316  if (!Pattern.isCanonical()) {
4317  Canon = getCanonicalType(Pattern);
4318  // The canonical type might not contain an unexpanded parameter pack, if it
4319  // contains an alias template specialization which ignores one of its
4320  // parameters.
4321  if (Canon->containsUnexpandedParameterPack()) {
4322  Canon = getPackExpansionType(Canon, NumExpansions);
4323 
4324  // Find the insert position again, in case we inserted an element into
4325  // PackExpansionTypes and invalidated our insert position.
4326  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4327  }
4328  }
4329 
4330  T = new (*this, TypeAlignment)
4331  PackExpansionType(Pattern, Canon, NumExpansions);
4332  Types.push_back(T);
4333  PackExpansionTypes.InsertNode(T, InsertPos);
4334  return QualType(T, 0);
4335 }
4336 
4337 /// CmpProtocolNames - Comparison predicate for sorting protocols
4338 /// alphabetically.
4339 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
4340  ObjCProtocolDecl *const *RHS) {
4341  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
4342 }
4343 
4345  if (Protocols.empty()) return true;
4346 
4347  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
4348  return false;
4349 
4350  for (unsigned i = 1; i != Protocols.size(); ++i)
4351  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
4352  Protocols[i]->getCanonicalDecl() != Protocols[i])
4353  return false;
4354  return true;
4355 }
4356 
4357 static void
4359  // Sort protocols, keyed by name.
4360  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
4361 
4362  // Canonicalize.
4363  for (ObjCProtocolDecl *&P : Protocols)
4364  P = P->getCanonicalDecl();
4365 
4366  // Remove duplicates.
4367  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
4368  Protocols.erase(ProtocolsEnd, Protocols.end());
4369 }
4370 
4372  ObjCProtocolDecl * const *Protocols,
4373  unsigned NumProtocols) const {
4374  return getObjCObjectType(BaseType, {},
4375  llvm::makeArrayRef(Protocols, NumProtocols),
4376  /*isKindOf=*/false);
4377 }
4378 
4380  QualType baseType,
4381  ArrayRef<QualType> typeArgs,
4382  ArrayRef<ObjCProtocolDecl *> protocols,
4383  bool isKindOf) const {
4384  // If the base type is an interface and there aren't any protocols or
4385  // type arguments to add, then the interface type will do just fine.
4386  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
4387  isa<ObjCInterfaceType>(baseType))
4388  return baseType;
4389 
4390  // Look in the folding set for an existing type.
4391  llvm::FoldingSetNodeID ID;
4392  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
4393  void *InsertPos = nullptr;
4394  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
4395  return QualType(QT, 0);
4396 
4397  // Determine the type arguments to be used for canonicalization,
4398  // which may be explicitly specified here or written on the base
4399  // type.
4400  ArrayRef<QualType> effectiveTypeArgs = typeArgs;
4401  if (effectiveTypeArgs.empty()) {
4402  if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
4403  effectiveTypeArgs = baseObject->getTypeArgs();
4404  }
4405 
4406  // Build the canonical type, which has the canonical base type and a
4407  // sorted-and-uniqued list of protocols and the type arguments
4408  // canonicalized.
4409  QualType canonical;
4410  bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
4411  effectiveTypeArgs.end(),
4412  [&](QualType type) {
4413  return type.isCanonical();
4414  });
4415  bool protocolsSorted = areSortedAndUniqued(protocols);
4416  if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
4417  // Determine the canonical type arguments.
4418  ArrayRef<QualType> canonTypeArgs;
4419  SmallVector<QualType, 4> canonTypeArgsVec;
4420  if (!typeArgsAreCanonical) {
4421  canonTypeArgsVec.reserve(effectiveTypeArgs.size());
4422  for (auto typeArg : effectiveTypeArgs)
4423  canonTypeArgsVec.push_back(getCanonicalType(typeArg));
4424  canonTypeArgs = canonTypeArgsVec;
4425  } else {
4426  canonTypeArgs = effectiveTypeArgs;
4427  }
4428 
4429  ArrayRef<ObjCProtocolDecl *> canonProtocols;
4430  SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
4431  if (!protocolsSorted) {
4432  canonProtocolsVec.append(protocols.begin(), protocols.end());
4433  SortAndUniqueProtocols(canonProtocolsVec);
4434  canonProtocols = canonProtocolsVec;
4435  } else {
4436  canonProtocols = protocols;
4437  }
4438 
4439  canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
4440  canonProtocols, isKindOf);
4441 
4442  // Regenerate InsertPos.
4443  ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
4444  }
4445 
4446  unsigned size = sizeof(ObjCObjectTypeImpl);
4447  size += typeArgs.size() * sizeof(QualType);
4448  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4449  void *mem = Allocate(size, TypeAlignment);
4450  auto *T =
4451  new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
4452  isKindOf);
4453 
4454  Types.push_back(T);
4455  ObjCObjectTypes.InsertNode(T, InsertPos);
4456  return QualType(T, 0);
4457 }
4458 
4459 /// Apply Objective-C protocol qualifiers to the given type.
4460 /// If this is for the canonical type of a type parameter, we can apply
4461 /// protocol qualifiers on the ObjCObjectPointerType.
4462 QualType
4464  ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
4465  bool allowOnPointerType) const {
4466  hasError = false;
4467 
4468  if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
4469  return getObjCTypeParamType(objT->getDecl(), protocols);
4470  }
4471 
4472  // Apply protocol qualifiers to ObjCObjectPointerType.
4473  if (allowOnPointerType) {
4474  if (const auto *objPtr =
4475  dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
4476  const ObjCObjectType *objT = objPtr->getObjectType();
4477  // Merge protocol lists and construct ObjCObjectType.
4478  SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
4479  protocolsVec.append(objT->qual_begin(),
4480  objT->qual_end());
4481  protocolsVec.append(protocols.begin(), protocols.end());
4482  ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
4483  type = getObjCObjectType(
4484  objT->getBaseType(),
4485  objT->getTypeArgsAsWritten(),
4486  protocols,
4487  objT->isKindOfTypeAsWritten());
4488  return getObjCObjectPointerType(type);
4489  }
4490  }
4491 
4492  // Apply protocol qualifiers to ObjCObjectType.
4493  if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
4494  // FIXME: Check for protocols to which the class type is already
4495  // known to conform.
4496 
4497  return getObjCObjectType(objT->getBaseType(),
4498  objT->getTypeArgsAsWritten(),
4499  protocols,
4500  objT->isKindOfTypeAsWritten());
4501  }
4502 
4503  // If the canonical type is ObjCObjectType, ...
4504  if (type->isObjCObjectType()) {
4505  // Silently overwrite any existing protocol qualifiers.
4506  // TODO: determine whether that's the right thing to do.
4507 
4508  // FIXME: Check for protocols to which the class type is already
4509  // known to conform.
4510  return getObjCObjectType(type, {}, protocols, false);
4511  }
4512 
4513  // id<protocol-list>
4514  if (type->isObjCIdType()) {
4515  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4516  type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
4517  objPtr->isKindOfType());
4518  return getObjCObjectPointerType(type);
4519  }
4520 
4521  // Class<protocol-list>
4522  if (type->isObjCClassType()) {
4523  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4524  type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
4525  objPtr->isKindOfType());
4526  return getObjCObjectPointerType(type);
4527  }
4528 
4529  hasError = true;
4530  return type;
4531 }
4532 
4533 QualType
4535  ArrayRef<ObjCProtocolDecl *> protocols,
4536  QualType Canonical) const {
4537  // Look in the folding set for an existing type.
4538  llvm::FoldingSetNodeID ID;
4539  ObjCTypeParamType::Profile(ID, Decl, protocols);
4540  void *InsertPos = nullptr;
4541  if (ObjCTypeParamType *TypeParam =
4542  ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
4543  return QualType(TypeParam, 0);
4544 
4545  if (Canonical.isNull()) {
4546  // We canonicalize to the underlying type.
4547  Canonical = getCanonicalType(Decl->getUnderlyingType());
4548  if (!protocols.empty()) {
4549  // Apply the protocol qualifers.
4550  bool hasError;
4551  Canonical = applyObjCProtocolQualifiers(Canonical, protocols, hasError,
4552  true/*allowOnPointerType*/);
4553  assert(!hasError && "Error when apply protocol qualifier to bound type");
4554  }
4555  }
4556 
4557  unsigned size = sizeof(ObjCTypeParamType);
4558  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4559  void *mem = Allocate(size, TypeAlignment);
4560  auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
4561 
4562  Types.push_back(newType);
4563  ObjCTypeParamTypes.InsertNode(newType, InsertPos);
4564  return QualType(newType, 0);
4565 }
4566 
4567 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
4568 /// protocol list adopt all protocols in QT's qualified-id protocol
4569 /// list.
4571  ObjCInterfaceDecl *IC) {
4572  if (!QT->isObjCQualifiedIdType())
4573  return false;
4574 
4575  if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
4576  // If both the right and left sides have qualifiers.
4577  for (auto *Proto : OPT->quals()) {
4578  if (!IC->ClassImplementsProtocol(Proto, false))
4579  return false;
4580  }
4581  return true;
4582  }
4583  return false;
4584 }
4585 
4586 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
4587 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
4588 /// of protocols.
4590  ObjCInterfaceDecl *IDecl) {
4591  if (!QT->isObjCQualifiedIdType())
4592  return false;
4593  const auto *OPT = QT->getAs<ObjCObjectPointerType>();
4594  if (!OPT)
4595  return false;
4596  if (!IDecl->hasDefinition())
4597  return false;
4598  llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
4599  CollectInheritedProtocols(IDecl, InheritedProtocols);
4600  if (InheritedProtocols.empty())
4601  return false;
4602  // Check that if every protocol in list of id<plist> conforms to a protocol
4603  // of IDecl's, then bridge casting is ok.
4604  bool Conforms = false;
4605  for (auto *Proto : OPT->quals()) {
4606  Conforms = false;
4607  for (auto *PI : InheritedProtocols) {
4608  if (ProtocolCompatibleWithProtocol(Proto, PI)) {
4609  Conforms = true;
4610  break;
4611  }
4612  }
4613  if (!Conforms)
4614  break;
4615  }
4616  if (Conforms)
4617  return true;
4618 
4619  for (auto *PI : InheritedProtocols) {
4620  // If both the right and left sides have qualifiers.
4621  bool Adopts = false;
4622  for (auto *Proto : OPT->quals()) {
4623  // return 'true' if 'PI' is in the inheritance hierarchy of Proto
4624  if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
4625  break;
4626  }
4627  if (!Adopts)
4628  return false;
4629  }
4630  return true;
4631 }
4632 
4633 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
4634 /// the given object type.
4636  llvm::FoldingSetNodeID ID;
4637  ObjCObjectPointerType::Profile(ID, ObjectT);
4638 
4639  void *InsertPos = nullptr;
4640  if (ObjCObjectPointerType *QT =
4641  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4642  return QualType(QT, 0);
4643 
4644  // Find the canonical object type.
4645  QualType Canonical;
4646  if (!ObjectT.isCanonical()) {
4647  Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
4648 
4649  // Regenerate InsertPos.
4650  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4651  }
4652 
4653  // No match.
4654  void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
4655  auto *QType =
4656  new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
4657 
4658  Types.push_back(QType);
4659  ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
4660  return QualType(QType, 0);
4661 }
4662 
4663 /// getObjCInterfaceType - Return the unique reference to the type for the
4664 /// specified ObjC interface decl. The list of protocols is optional.
4666  ObjCInterfaceDecl *PrevDecl) const {
4667  if (Decl->TypeForDecl)
4668  return QualType(Decl->TypeForDecl, 0);
4669 
4670  if (PrevDecl) {
4671  assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
4672  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4673  return QualType(PrevDecl->TypeForDecl, 0);
4674  }
4675 
4676  // Prefer the definition, if there is one.
4677  if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
4678  Decl = Def;
4679 
4680  void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
4681  auto *T = new (Mem) ObjCInterfaceType(Decl);
4682  Decl->TypeForDecl = T;
4683  Types.push_back(T);
4684  return QualType(T, 0);
4685 }
4686 
4687 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
4688 /// TypeOfExprType AST's (since expression's are never shared). For example,
4689 /// multiple declarations that refer to "typeof(x)" all contain different
4690 /// DeclRefExpr's. This doesn't effect the type checker, since it operates
4691 /// on canonical type's (which are always unique).
4693  TypeOfExprType *toe;
4694  if (tofExpr->isTypeDependent()) {
4695  llvm::FoldingSetNodeID ID;
4696  DependentTypeOfExprType::Profile(ID, *this, tofExpr);
4697 
4698  void *InsertPos = nullptr;
4700  = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
4701  if (Canon) {
4702  // We already have a "canonical" version of an identical, dependent
4703  // typeof(expr) type. Use that as our canonical type.
4704  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
4705  QualType((TypeOfExprType*)Canon, 0));
4706  } else {
4707  // Build a new, canonical typeof(expr) type.
4708  Canon
4709  = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
4710  DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
4711  toe = Canon;
4712  }
4713  } else {
4714  QualType Canonical = getCanonicalType(tofExpr->getType());
4715  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
4716  }
4717  Types.push_back(toe);
4718  return QualType(toe, 0);
4719 }
4720 
4721 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
4722 /// TypeOfType nodes. The only motivation to unique these nodes would be
4723 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
4724 /// an issue. This doesn't affect the type checker, since it operates
4725 /// on canonical types (which are always unique).
4727  QualType Canonical = getCanonicalType(tofType);
4728  auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
4729  Types.push_back(tot);
4730  return QualType(tot, 0);
4731 }
4732 
4733 /// Unlike many "get<Type>" functions, we don't unique DecltypeType
4734 /// nodes. This would never be helpful, since each such type has its own
4735 /// expression, and would not give a significant memory saving, since there
4736 /// is an Expr tree under each such type.
4738  DecltypeType *dt;
4739 
4740  // C++11 [temp.type]p2:
4741  // If an expression e involves a template parameter, decltype(e) denotes a
4742  // unique dependent type. Two such decltype-specifiers refer to the same
4743  // type only if their expressions are equivalent (14.5.6.1).
4744  if (e->isInstantiationDependent()) {
4745  llvm::FoldingSetNodeID ID;
4746  DependentDecltypeType::Profile(ID, *this, e);
4747 
4748  void *InsertPos = nullptr;
4749  DependentDecltypeType *Canon
4750  = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
4751  if (!Canon) {
4752  // Build a new, canonical decltype(expr) type.
4753  Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
4754  DependentDecltypeTypes.InsertNode(Canon, InsertPos);
4755  }
4756  dt = new (*this, TypeAlignment)
4757  DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
4758  } else {
4759  dt = new (*this, TypeAlignment)
4760  DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
4761  }
4762  Types.push_back(dt);
4763  return QualType(dt, 0);
4764 }
4765 
4766 /// getUnaryTransformationType - We don't unique these, since the memory
4767 /// savings are minimal and these are rare.
4769  QualType UnderlyingType,
4771  const {
4772  UnaryTransformType *ut = nullptr;
4773 
4774  if (BaseType->isDependentType()) {
4775  // Look in the folding set for an existing type.
4776  llvm::FoldingSetNodeID ID;
4778 
4779  void *InsertPos = nullptr;
4781  = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
4782 
4783  if (!Canon) {
4784  // Build a new, canonical __underlying_type(type) type.
4785  Canon = new (*this, TypeAlignment)
4787  Kind);
4788  DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
4789  }
4790  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4791  QualType(), Kind,
4792  QualType(Canon, 0));
4793  } else {
4794  QualType CanonType = getCanonicalType(UnderlyingType);
4795  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4796  UnderlyingType, Kind,
4797  CanonType);
4798  }
4799  Types.push_back(ut);
4800  return QualType(ut, 0);
4801 }
4802 
4803 /// getAutoType - Return the uniqued reference to the 'auto' type which has been
4804 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the
4805 /// canonical deduced-but-dependent 'auto' type.
4807  bool IsDependent) const {
4808  if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
4809  return getAutoDeductType();
4810 
4811  // Look in the folding set for an existing type.
4812  void *InsertPos = nullptr;
4813  llvm::FoldingSetNodeID ID;
4814  AutoType::Profile(ID, DeducedType, Keyword, IsDependent);
4815  if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
4816  return QualType(AT, 0);
4817 
4818  auto *AT = new (*this, TypeAlignment)
4819  AutoType(DeducedType, Keyword, IsDependent);
4820  Types.push_back(AT);
4821  if (InsertPos)
4822  AutoTypes.InsertNode(AT, InsertPos);
4823  return QualType(AT, 0);
4824 }
4825 
4826 /// Return the uniqued reference to the deduced template specialization type
4827 /// which has been deduced to the given type, or to the canonical undeduced
4828 /// such type, or the canonical deduced-but-dependent such type.
4830  TemplateName Template, QualType DeducedType, bool IsDependent) const {
4831  // Look in the folding set for an existing type.
4832  void *InsertPos = nullptr;
4833  llvm::FoldingSetNodeID ID;
4834  DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
4835  IsDependent);
4837  DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
4838  return QualType(DTST, 0);
4839 
4840  auto *DTST = new (*this, TypeAlignment)
4841  DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
4842  Types.push_back(DTST);
4843  if (InsertPos)
4844  DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
4845  return QualType(DTST, 0);
4846 }
4847 
4848 /// getAtomicType - Return the uniqued reference to the atomic type for
4849 /// the given value type.
4851  // Unique pointers, to guarantee there is only one pointer of a particular
4852  // structure.
4853  llvm::FoldingSetNodeID ID;
4854  AtomicType::Profile(ID, T);
4855 
4856  void *InsertPos = nullptr;
4857  if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
4858  return QualType(AT, 0);
4859 
4860  // If the atomic value type isn't canonical, this won't be a canonical type
4861  // either, so fill in the canonical type field.
4862  QualType Canonical;
4863  if (!T.isCanonical()) {
4864  Canonical = getAtomicType(getCanonicalType(T));
4865 
4866  // Get the new insert position for the node we care about.
4867  AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
4868  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4869  }
4870  auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
4871  Types.push_back(New);
4872  AtomicTypes.InsertNode(New, InsertPos);
4873  return QualType(New, 0);
4874 }
4875 
4876 /// getAutoDeductType - Get type pattern for deducing against 'auto'.
4878  if (AutoDeductTy.isNull())
4881  /*dependent*/false),
4882  0);
4883  return AutoDeductTy;
4884 }
4885 
4886 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
4888  if (AutoRRefDeductTy.isNull())
4890  assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
4891  return AutoRRefDeductTy;
4892 }
4893 
4894 /// getTagDeclType - Return the unique reference to the type for the
4895 /// specified TagDecl (struct/union/class/enum) decl.
4897  assert(Decl);
4898  // FIXME: What is the design on getTagDeclType when it requires casting
4899  // away const? mutable?
4900  return getTypeDeclType(const_cast<TagDecl*>(Decl));
4901 }
4902 
4903 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
4904 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
4905 /// needs to agree with the definition in <stddef.h>.
4907  return getFromTargetType(Target->getSizeType());
4908 }
4909 
4910 /// Return the unique signed counterpart of the integer type
4911 /// corresponding to size_t.
4913  return getFromTargetType(Target->getSignedSizeType());
4914 }
4915 
4916 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
4918  return getFromTargetType(Target->getIntMaxType());
4919 }
4920 
4921 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
4923  return getFromTargetType(Target->getUIntMaxType());
4924 }
4925 
4926 /// getSignedWCharType - Return the type of "signed wchar_t".
4927 /// Used when in C++, as a GCC extension.
4929  // FIXME: derive from "Target" ?
4930  return WCharTy;
4931 }
4932 
4933 /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
4934 /// Used when in C++, as a GCC extension.
4936  // FIXME: derive from "Target" ?
4937  return UnsignedIntTy;
4938 }
4939 
4941  return getFromTargetType(Target->getIntPtrType());
4942 }
4943 
4946 }
4947 
4948 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
4949 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
4951  return getFromTargetType(Target->getPtrDiffType(0));
4952 }
4953 
4954 /// Return the unique unsigned counterpart of "ptrdiff_t"
4955 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
4956 /// in the definition of %tu format specifier.
4958  return getFromTargetType(Target->getUnsignedPtrDiffType(0));
4959 }
4960 
4961 /// Return the unique type for "pid_t" defined in
4962 /// <sys/types.h>. We need this to compute the correct type for vfork().
4964  return getFromTargetType(Target->getProcessIDType());
4965 }
4966 
4967 //===----------------------------------------------------------------------===//
4968 // Type Operators
4969 //===----------------------------------------------------------------------===//
4970 
4972  // Push qualifiers into arrays, and then discard any remaining
4973  // qualifiers.
4974  T = getCanonicalType(T);
4976  const Type *Ty = T.getTypePtr();
4977  QualType Result;
4978  if (isa<ArrayType>(Ty)) {
4979  Result = getArrayDecayedType(QualType(Ty,0));
4980  } else if (isa<FunctionType>(Ty)) {
4981  Result = getPointerType(QualType(Ty, 0));
4982  } else {
4983  Result = QualType(Ty, 0);
4984  }
4985 
4986  return CanQualType::CreateUnsafe(Result);
4987 }
4988 
4990  Qualifiers &quals) {
4991  SplitQualType splitType = type.getSplitUnqualifiedType();
4992 
4993  // FIXME: getSplitUnqualifiedType() actually walks all the way to
4994  // the unqualified desugared type and then drops it on the floor.
4995  // We then have to strip that sugar back off with
4996  // getUnqualifiedDesugaredType(), which is silly.
4997  const auto *AT =
4998  dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
4999 
5000  // If we don't have an array, just use the results in splitType.
5001  if (!AT) {
5002  quals = splitType.Quals;
5003  return QualType(splitType.Ty, 0);
5004  }
5005 
5006  // Otherwise, recurse on the array's element type.
5007  QualType elementType = AT->getElementType();
5008  QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
5009 
5010  // If that didn't change the element type, AT has no qualifiers, so we
5011  // can just use the results in splitType.
5012  if (elementType == unqualElementType) {
5013  assert(quals.empty()); // from the recursive call
5014  quals = splitType.Quals;
5015  return QualType(splitType.Ty, 0);
5016  }
5017 
5018  // Otherwise, add in the qualifiers from the outermost type, then
5019  // build the type back up.
5020  quals.addConsistentQualifiers(splitType.Quals);
5021 
5022  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
5023  return getConstantArrayType(unqualElementType, CAT->getSize(),
5024  CAT->getSizeModifier(), 0);
5025  }
5026 
5027  if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
5028  return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
5029  }
5030 
5031  if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
5032  return getVariableArrayType(unqualElementType,
5033  VAT->getSizeExpr(),
5034  VAT->getSizeModifier(),
5035  VAT->getIndexTypeCVRQualifiers(),
5036  VAT->getBracketsRange());
5037  }
5038 
5039  const auto *DSAT = cast<DependentSizedArrayType>(AT);
5040  return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
5041  DSAT->getSizeModifier(), 0,
5042  SourceRange());
5043 }
5044 
5045 /// Attempt to unwrap two types that may both be array types with the same bound
5046 /// (or both be array types of unknown bound) for the purpose of comparing the
5047 /// cv-decomposition of two types per C++ [conv.qual].
5049  bool UnwrappedAny = false;
5050  while (true) {
5051  auto *AT1 = getAsArrayType(T1);
5052  if (!AT1) return UnwrappedAny;
5053 
5054  auto *AT2 = getAsArrayType(T2);
5055  if (!AT2) return UnwrappedAny;
5056 
5057  // If we don't have two array types with the same constant bound nor two
5058  // incomplete array types, we've unwrapped everything we can.
5059  if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
5060  auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
5061  if (!CAT2 || CAT1->getSize() != CAT2->getSize())
5062  return UnwrappedAny;
5063  } else if (!isa<IncompleteArrayType>(AT1) ||
5064  !isa<IncompleteArrayType>(AT2)) {
5065  return UnwrappedAny;
5066  }
5067 
5068  T1 = AT1->getElementType();
5069  T2 = AT2->getElementType();
5070  UnwrappedAny = true;
5071  }
5072 }
5073 
5074 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
5075 ///
5076 /// If T1 and T2 are both pointer types of the same kind, or both array types
5077 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is
5078 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
5079 ///
5080 /// This function will typically be called in a loop that successively
5081 /// "unwraps" pointer and pointer-to-member types to compare them at each
5082 /// level.
5083 ///
5084 /// \return \c true if a pointer type was unwrapped, \c false if we reached a
5085 /// pair of types that can't be unwrapped further.
5087  UnwrapSimilarArrayTypes(T1, T2);
5088 
5089  const auto *T1PtrType = T1->getAs<PointerType>();
5090  const auto *T2PtrType = T2->getAs<PointerType>();
5091  if (T1PtrType && T2PtrType) {
5092  T1 = T1PtrType->getPointeeType();
5093  T2 = T2PtrType->getPointeeType();
5094  return true;
5095  }
5096 
5097  const auto *T1MPType = T1->getAs<MemberPointerType>();
5098  const auto *T2MPType = T2->getAs<MemberPointerType>();
5099  if (T1MPType && T2MPType &&
5100  hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
5101  QualType(T2MPType->getClass(), 0))) {
5102  T1 = T1MPType->getPointeeType();
5103  T2 = T2MPType->getPointeeType();
5104  return true;
5105  }
5106 
5107  if (getLangOpts().ObjC1) {
5108  const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
5109  const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
5110  if (T1OPType && T2OPType) {
5111  T1 = T1OPType->getPointeeType();
5112  T2 = T2OPType->getPointeeType();
5113  return true;
5114  }
5115  }
5116 
5117  // FIXME: Block pointers, too?
5118 
5119  return false;
5120 }
5121 
5123  while (true) {
5124  Qualifiers Quals;
5125  T1 = getUnqualifiedArrayType(T1, Quals);
5126  T2 = getUnqualifiedArrayType(T2, Quals);
5127  if (hasSameType(T1, T2))
5128  return true;
5129  if (!UnwrapSimilarTypes(T1, T2))
5130  return false;
5131  }
5132 }
5133 
5135  while (true) {
5136  Qualifiers Quals1, Quals2;
5137  T1 = getUnqualifiedArrayType(T1, Quals1);
5138  T2 = getUnqualifiedArrayType(T2, Quals2);
5139 
5140  Quals1.removeCVRQualifiers();
5141  Quals2.removeCVRQualifiers();
5142  if (Quals1 != Quals2)
5143  return false;
5144 
5145  if (hasSameType(T1, T2))
5146  return true;
5147 
5148  if (!UnwrapSimilarTypes(T1, T2))
5149  return false;
5150  }
5151 }
5152 
5155  SourceLocation NameLoc) const {
5156  switch (Name.getKind()) {
5159  // DNInfo work in progress: CHECKME: what about DNLoc?
5161  NameLoc);
5162 
5165  // DNInfo work in progress: CHECKME: what about DNLoc?
5166  return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
5167  }
5168 
5171  DeclarationName DName;
5172  if (DTN->isIdentifier()) {
5174  return DeclarationNameInfo(DName, NameLoc);
5175  } else {
5177  // DNInfo work in progress: FIXME: source locations?
5178  DeclarationNameLoc DNLoc;
5181  return DeclarationNameInfo(DName, NameLoc, DNLoc);
5182  }
5183  }
5184 
5188  return DeclarationNameInfo(subst->getParameter()->getDeclName(),
5189  NameLoc);
5190  }
5191 
5196  NameLoc);
5197  }
5198  }
5199 
5200  llvm_unreachable("bad template name kind!");
5201 }
5202 
5204  switch (Name.getKind()) {
5206  case TemplateName::Template: {
5207  TemplateDecl *Template = Name.getAsTemplateDecl();
5208  if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
5209  Template = getCanonicalTemplateTemplateParmDecl(TTP);
5210 
5211  // The canonical template name is the canonical template declaration.
5212  return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
5213  }
5214 
5216  llvm_unreachable("cannot canonicalize overloaded template");
5217 
5220  assert(DTN && "Non-dependent template names must refer to template decls.");
5221  return DTN->CanonicalTemplateName;
5222  }
5223 
5227  return getCanonicalTemplateName(subst->getReplacement());
5228  }
5229 
5233  TemplateTemplateParmDecl *canonParameter
5234  = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
5235  TemplateArgument canonArgPack
5237  return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
5238  }
5239  }
5240 
5241  llvm_unreachable("bad template name!");
5242 }
5243 
5245  X = getCanonicalTemplateName(X);
5246  Y = getCanonicalTemplateName(Y);
5247  return X.getAsVoidPointer() == Y.getAsVoidPointer();
5248 }
5249 
5252  switch (Arg.getKind()) {
5254  return Arg;
5255 
5257  return Arg;
5258 
5260  auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
5261  return TemplateArgument(D, Arg.getParamTypeForDecl());
5262  }
5263 
5266  /*isNullPtr*/true);
5267 
5270 
5274  Arg.getNumTemplateExpansions());
5275 
5278 
5281 
5282  case TemplateArgument::Pack: {
5283  if (Arg.pack_size() == 0)
5284  return Arg;
5285 
5286  auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
5287  unsigned Idx = 0;
5289  AEnd = Arg.pack_end();
5290  A != AEnd; (void)++A, ++Idx)
5291  CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
5292 
5293  return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
5294  }
5295  }
5296 
5297  // Silence GCC warning
5298  llvm_unreachable("Unhandled template argument kind");
5299 }
5300 
5303  if (!NNS)
5304  return nullptr;
5305 
5306  switch (NNS->getKind()) {
5308  // Canonicalize the prefix but keep the identifier the same.
5309  return NestedNameSpecifier::Create(*this,
5311  NNS->getAsIdentifier());
5312 
5314  // A namespace is canonical; build a nested-name-specifier with
5315  // this namespace and no prefix.
5316  return NestedNameSpecifier::Create(*this, nullptr,
5318 
5320  // A namespace is canonical; build a nested-name-specifier with
5321  // this namespace and no prefix.
5322  return NestedNameSpecifier::Create(*this, nullptr,
5324  ->getOriginalNamespace());
5325 
5328  QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
5329 
5330  // If we have some kind of dependent-named type (e.g., "typename T::type"),
5331  // break it apart into its prefix and identifier, then reconsititute those
5332  // as the canonical nested-name-specifier. This is required to canonicalize
5333  // a dependent nested-name-specifier involving typedefs of dependent-name
5334  // types, e.g.,
5335  // typedef typename T::type T1;
5336  // typedef typename T1::type T2;
5337  if (const auto *DNT = T->getAs<DependentNameType>())
5338  return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
5339  const_cast<IdentifierInfo *>(DNT->getIdentifier()));
5340 
5341  // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
5342  // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
5343  // first place?
5344  return NestedNameSpecifier::Create(*this, nullptr, false,
5345  const_cast<Type *>(T.getTypePtr()));
5346  }
5347 
5350  // The global specifier and __super specifer are canonical and unique.
5351  return NNS;
5352  }
5353 
5354  llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
5355 }
5356 
5358  // Handle the non-qualified case efficiently.
5359  if (!T.hasLocalQualifiers()) {
5360  // Handle the common positive case fast.
5361  if (const auto *AT = dyn_cast<ArrayType>(T))
5362  return AT;
5363  }
5364 
5365  // Handle the common negative case fast.
5366  if (!isa<ArrayType>(T.getCanonicalType()))
5367  return nullptr;
5368 
5369  // Apply any qualifiers from the array type to the element type. This
5370  // implements C99 6.7.3p8: "If the specification of an array type includes
5371  // any type qualifiers, the element type is so qualified, not the array type."
5372 
5373  // If we get here, we either have type qualifiers on the type, or we have
5374  // sugar such as a typedef in the way. If we have type qualifiers on the type
5375  // we must propagate them down into the element type.
5376 
5378  Qualifiers qs = split.Quals;
5379 
5380  // If we have a simple case, just return now.
5381  const auto *ATy = dyn_cast<ArrayType>(split.Ty);
5382  if (!ATy || qs.empty())
5383  return ATy;
5384 
5385  // Otherwise, we have an array and we have qualifiers on it. Push the
5386  // qualifiers into the array element type and return a new array type.
5387  QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
5388 
5389  if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy))
5390  return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
5391  CAT->getSizeModifier(),
5392  CAT->getIndexTypeCVRQualifiers()));
5393  if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy))
5394  return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
5395  IAT->getSizeModifier(),
5396  IAT->getIndexTypeCVRQualifiers()));
5397 
5398  if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy))
5399  return cast<ArrayType>(
5400  getDependentSizedArrayType(NewEltTy,
5401  DSAT->getSizeExpr(),
5402  DSAT->getSizeModifier(),
5403  DSAT->getIndexTypeCVRQualifiers(),
5404  DSAT->getBracketsRange()));
5405 
5406  const auto *VAT = cast<VariableArrayType>(ATy);
5407  return cast<ArrayType>(getVariableArrayType(NewEltTy,
5408  VAT->getSizeExpr(),
5409  VAT->getSizeModifier(),
5410  VAT->getIndexTypeCVRQualifiers(),
5411  VAT->getBracketsRange()));
5412 }
5413 
5415  if (T->isArrayType() || T->isFunctionType())
5416  return getDecayedType(T);
5417  return T;
5418 }
5419 
5422  T = getAdjustedParameterType(T);
5423  return T.getUnqualifiedType();
5424 }
5425 
5427  // C++ [except.throw]p3:
5428  // A throw-expression initializes a temporary object, called the exception
5429  // object, the type of which is determined by removing any top-level
5430  // cv-qualifiers from the static type of the operand of throw and adjusting
5431  // the type from "array of T" or "function returning T" to "pointer to T"
5432  // or "pointer to function returning T", [...]
5434  if (T->isArrayType() || T->isFunctionType())
5435  T = getDecayedType(T);
5436  return T.getUnqualifiedType();
5437 }
5438 
5439 /// getArrayDecayedType - Return the properly qualified result of decaying the
5440 /// specified array type to a pointer. This operation is non-trivial when
5441 /// handling typedefs etc. The canonical type of "T" must be an array type,
5442 /// this returns a pointer to a properly qualified element of the array.
5443 ///
5444 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
5446  // Get the element type with 'getAsArrayType' so that we don't lose any
5447  // typedefs in the element type of the array. This also handles propagation
5448  // of type qualifiers from the array type into the element type if present
5449  // (C99 6.7.3p8).
5450  const ArrayType *PrettyArrayType = getAsArrayType(Ty);
5451  assert(PrettyArrayType && "Not an array type!");
5452 
5453  QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
5454 
5455  // int x[restrict 4] -> int *restrict
5457  PrettyArrayType->getIndexTypeQualifiers());
5458 
5459  // int x[_Nullable] -> int * _Nullable
5460  if (auto Nullability = Ty->getNullability(*this)) {
5461  Result = const_cast<ASTContext *>(this)->getAttributedType(
5463  }
5464  return Result;
5465 }
5466 
5468  return getBaseElementType(array->getElementType());
5469 }
5470 
5472  Qualifiers qs;
5473  while (true) {
5474  SplitQualType split = type.getSplitDesugaredType();
5475  const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
5476  if (!array) break;
5477 
5478  type = array->getElementType();
5479  qs.addConsistentQualifiers(split.Quals);
5480  }
5481 
5482  return getQualifiedType(type, qs);
5483 }
5484 
5485 /// getConstantArrayElementCount - Returns number of constant array elements.
5486 uint64_t
5488  uint64_t ElementCount = 1;
5489  do {
5490  ElementCount *= CA->getSize().getZExtValue();
5491  CA = dyn_cast_or_null<ConstantArrayType>(
5493  } while (CA);
5494  return ElementCount;
5495 }
5496 
5497 /// getFloatingRank - Return a relative rank for floating point types.
5498 /// This routine will assert if passed a built-in type that isn't a float.
5500  if (const auto *CT = T->getAs<ComplexType>())
5501  return getFloatingRank(CT->getElementType());
5502 
5503  assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type");
5504  switch (T->getAs<BuiltinType>()->getKind()) {
5505  default: llvm_unreachable("getFloatingRank(): not a floating type");
5506  case BuiltinType::Float16: return Float16Rank;
5507  case BuiltinType::Half: return HalfRank;
5508  case BuiltinType::Float: return FloatRank;
5509  case BuiltinType::Double: return DoubleRank;
5510  case BuiltinType::LongDouble: return LongDoubleRank;
5511  case BuiltinType::Float128: return Float128Rank;
5512  }
5513 }
5514 
5515 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating
5516 /// point or a complex type (based on typeDomain/typeSize).
5517 /// 'typeDomain' is a real floating point or complex type.
5518 /// 'typeSize' is a real floating point or complex type.
5520  QualType Domain) const {
5521  FloatingRank EltRank = getFloatingRank(Size);
5522  if (Domain->isComplexType()) {
5523  switch (EltRank) {
5524  case Float16Rank:
5525  case HalfRank: llvm_unreachable("Complex half is not supported");
5526  case FloatRank: return FloatComplexTy;
5527  case DoubleRank: return DoubleComplexTy;
5528  case