clang  10.0.0svn
ASTContext.cpp
Go to the documentation of this file.
1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ASTContext interface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/AST/ASTContext.h"
14 #include "CXXABI.h"
15 #include "clang/AST/APValue.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/AttrIterator.h"
20 #include "clang/AST/CharUnits.h"
21 #include "clang/AST/Comment.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclBase.h"
24 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/AST/DeclOpenMP.h"
28 #include "clang/AST/DeclTemplate.h"
30 #include "clang/AST/Expr.h"
31 #include "clang/AST/ExprCXX.h"
33 #include "clang/AST/Mangle.h"
37 #include "clang/AST/RecordLayout.h"
39 #include "clang/AST/Stmt.h"
40 #include "clang/AST/TemplateBase.h"
41 #include "clang/AST/TemplateName.h"
42 #include "clang/AST/Type.h"
43 #include "clang/AST/TypeLoc.h"
47 #include "clang/Basic/Builtins.h"
50 #include "clang/Basic/FixedPoint.h"
52 #include "clang/Basic/LLVM.h"
54 #include "clang/Basic/Linkage.h"
59 #include "clang/Basic/Specifiers.h"
61 #include "clang/Basic/TargetInfo.h"
62 #include "clang/Basic/XRayLists.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/APSInt.h"
65 #include "llvm/ADT/ArrayRef.h"
66 #include "llvm/ADT/DenseMap.h"
67 #include "llvm/ADT/DenseSet.h"
68 #include "llvm/ADT/FoldingSet.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/PointerUnion.h"
72 #include "llvm/ADT/STLExtras.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/StringExtras.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Triple.h"
78 #include "llvm/Support/Capacity.h"
79 #include "llvm/Support/Casting.h"
80 #include "llvm/Support/Compiler.h"
81 #include "llvm/Support/ErrorHandling.h"
82 #include "llvm/Support/MathExtras.h"
83 #include "llvm/Support/raw_ostream.h"
84 #include <algorithm>
85 #include <cassert>
86 #include <cstddef>
87 #include <cstdint>
88 #include <cstdlib>
89 #include <map>
90 #include <memory>
91 #include <string>
92 #include <tuple>
93 #include <utility>
94 
95 using namespace clang;
96 
99 };
100 
101 /// \returns location that is relevant when searching for Doc comments related
102 /// to \p D.
104  SourceManager &SourceMgr) {
105  assert(D);
106 
107  // User can not attach documentation to implicit declarations.
108  if (D->isImplicit())
109  return {};
110 
111  // User can not attach documentation to implicit instantiations.
112  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
113  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
114  return {};
115  }
116 
117  if (const auto *VD = dyn_cast<VarDecl>(D)) {
118  if (VD->isStaticDataMember() &&
119  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
120  return {};
121  }
122 
123  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
124  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
125  return {};
126  }
127 
128  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
129  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
130  if (TSK == TSK_ImplicitInstantiation ||
131  TSK == TSK_Undeclared)
132  return {};
133  }
134 
135  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
136  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
137  return {};
138  }
139  if (const auto *TD = dyn_cast<TagDecl>(D)) {
140  // When tag declaration (but not definition!) is part of the
141  // decl-specifier-seq of some other declaration, it doesn't get comment
142  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
143  return {};
144  }
145  // TODO: handle comments for function parameters properly.
146  if (isa<ParmVarDecl>(D))
147  return {};
148 
149  // TODO: we could look up template parameter documentation in the template
150  // documentation.
151  if (isa<TemplateTypeParmDecl>(D) ||
152  isa<NonTypeTemplateParmDecl>(D) ||
153  isa<TemplateTemplateParmDecl>(D))
154  return {};
155 
156  // Find declaration location.
157  // For Objective-C declarations we generally don't expect to have multiple
158  // declarators, thus use declaration starting location as the "declaration
159  // location".
160  // For all other declarations multiple declarators are used quite frequently,
161  // so we use the location of the identifier as the "declaration location".
162  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
163  isa<ObjCPropertyDecl>(D) ||
164  isa<RedeclarableTemplateDecl>(D) ||
165  isa<ClassTemplateSpecializationDecl>(D))
166  return D->getBeginLoc();
167  else {
168  const SourceLocation DeclLoc = D->getLocation();
169  if (DeclLoc.isMacroID()) {
170  if (isa<TypedefDecl>(D)) {
171  // If location of the typedef name is in a macro, it is because being
172  // declared via a macro. Try using declaration's starting location as
173  // the "declaration location".
174  return D->getBeginLoc();
175  } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
176  // If location of the tag decl is inside a macro, but the spelling of
177  // the tag name comes from a macro argument, it looks like a special
178  // macro like NS_ENUM is being used to define the tag decl. In that
179  // case, adjust the source location to the expansion loc so that we can
180  // attach the comment to the tag decl.
181  if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
182  TD->isCompleteDefinition())
183  return SourceMgr.getExpansionLoc(DeclLoc);
184  }
185  }
186  return DeclLoc;
187  }
188 
189  return {};
190 }
191 
193  const Decl *D, const SourceLocation RepresentativeLocForDecl,
194  const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
195  // If the declaration doesn't map directly to a location in a file, we
196  // can't find the comment.
197  if (RepresentativeLocForDecl.isInvalid() ||
198  !RepresentativeLocForDecl.isFileID())
199  return nullptr;
200 
201  // If there are no comments anywhere, we won't find anything.
202  if (CommentsInTheFile.empty())
203  return nullptr;
204 
205  // Decompose the location for the declaration and find the beginning of the
206  // file buffer.
207  const std::pair<FileID, unsigned> DeclLocDecomp =
208  SourceMgr.getDecomposedLoc(RepresentativeLocForDecl);
209 
210  // Slow path.
211  auto OffsetCommentBehindDecl =
212  CommentsInTheFile.lower_bound(DeclLocDecomp.second);
213 
214  // First check whether we have a trailing comment.
215  if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
216  RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
217  if ((CommentBehindDecl->isDocumentation() ||
218  LangOpts.CommentOpts.ParseAllComments) &&
219  CommentBehindDecl->isTrailingComment() &&
220  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
221  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
222 
223  // Check that Doxygen trailing comment comes after the declaration, starts
224  // on the same line and in the same file as the declaration.
225  if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) ==
226  Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first,
227  OffsetCommentBehindDecl->first)) {
228  return CommentBehindDecl;
229  }
230  }
231  }
232 
233  // The comment just after the declaration was not a trailing comment.
234  // Let's look at the previous comment.
235  if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
236  return nullptr;
237 
238  auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
239  RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
240 
241  // Check that we actually have a non-member Doxygen comment.
242  if (!(CommentBeforeDecl->isDocumentation() ||
243  LangOpts.CommentOpts.ParseAllComments) ||
244  CommentBeforeDecl->isTrailingComment())
245  return nullptr;
246 
247  // Decompose the end of the comment.
248  const unsigned CommentEndOffset =
249  Comments.getCommentEndOffset(CommentBeforeDecl);
250 
251  // Get the corresponding buffer.
252  bool Invalid = false;
253  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
254  &Invalid).data();
255  if (Invalid)
256  return nullptr;
257 
258  // Extract text between the comment and declaration.
259  StringRef Text(Buffer + CommentEndOffset,
260  DeclLocDecomp.second - CommentEndOffset);
261 
262  // There should be no other declarations or preprocessor directives between
263  // comment and declaration.
264  if (Text.find_first_of(";{}#@") != StringRef::npos)
265  return nullptr;
266 
267  return CommentBeforeDecl;
268 }
269 
271  const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
272 
273  // If the declaration doesn't map directly to a location in a file, we
274  // can't find the comment.
275  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
276  return nullptr;
277 
278  if (ExternalSource && !CommentsLoaded) {
279  ExternalSource->ReadComments();
280  CommentsLoaded = true;
281  }
282 
283  if (Comments.empty())
284  return nullptr;
285 
286  const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
287  const auto CommentsInThisFile = Comments.getCommentsInFile(File);
288  if (!CommentsInThisFile || CommentsInThisFile->empty())
289  return nullptr;
290 
291  return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
292 }
293 
294 /// If we have a 'templated' declaration for a template, adjust 'D' to
295 /// refer to the actual template.
296 /// If we have an implicit instantiation, adjust 'D' to refer to template.
297 static const Decl &adjustDeclToTemplate(const Decl &D) {
298  if (const auto *FD = dyn_cast<FunctionDecl>(&D)) {
299  // Is this function declaration part of a function template?
300  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
301  return *FTD;
302 
303  // Nothing to do if function is not an implicit instantiation.
304  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
305  return D;
306 
307  // Function is an implicit instantiation of a function template?
308  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
309  return *FTD;
310 
311  // Function is instantiated from a member definition of a class template?
312  if (const FunctionDecl *MemberDecl =
314  return *MemberDecl;
315 
316  return D;
317  }
318  if (const auto *VD = dyn_cast<VarDecl>(&D)) {
319  // Static data member is instantiated from a member definition of a class
320  // template?
321  if (VD->isStaticDataMember())
322  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
323  return *MemberDecl;
324 
325  return D;
326  }
327  if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) {
328  // Is this class declaration part of a class template?
329  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
330  return *CTD;
331 
332  // Class is an implicit instantiation of a class template or partial
333  // specialization?
334  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
335  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
336  return D;
337  llvm::PointerUnion<ClassTemplateDecl *,
339  PU = CTSD->getSpecializedTemplateOrPartial();
340  return PU.is<ClassTemplateDecl *>()
341  ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>())
342  : *static_cast<const Decl *>(
344  }
345 
346  // Class is instantiated from a member definition of a class template?
347  if (const MemberSpecializationInfo *Info =
348  CRD->getMemberSpecializationInfo())
349  return *Info->getInstantiatedFrom();
350 
351  return D;
352  }
353  if (const auto *ED = dyn_cast<EnumDecl>(&D)) {
354  // Enum is instantiated from a member definition of a class template?
355  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
356  return *MemberDecl;
357 
358  return D;
359  }
360  // FIXME: Adjust alias templates?
361  return D;
362 }
363 
365  const Decl *D,
366  const Decl **OriginalDecl) const {
367  if (!D) {
368  if (OriginalDecl)
369  OriginalDecl = nullptr;
370  return nullptr;
371  }
372 
373  D = &adjustDeclToTemplate(*D);
374 
375  // Any comment directly attached to D?
376  {
377  auto DeclComment = DeclRawComments.find(D);
378  if (DeclComment != DeclRawComments.end()) {
379  if (OriginalDecl)
380  *OriginalDecl = D;
381  return DeclComment->second;
382  }
383  }
384 
385  // Any comment attached to any redeclaration of D?
386  const Decl *CanonicalD = D->getCanonicalDecl();
387  if (!CanonicalD)
388  return nullptr;
389 
390  {
391  auto RedeclComment = RedeclChainComments.find(CanonicalD);
392  if (RedeclComment != RedeclChainComments.end()) {
393  if (OriginalDecl)
394  *OriginalDecl = RedeclComment->second;
395  auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second);
396  assert(CommentAtRedecl != DeclRawComments.end() &&
397  "This decl is supposed to have comment attached.");
398  return CommentAtRedecl->second;
399  }
400  }
401 
402  // Any redeclarations of D that we haven't checked for comments yet?
403  // We can't use DenseMap::iterator directly since it'd get invalid.
404  auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
405  auto LookupRes = CommentlessRedeclChains.find(CanonicalD);
406  if (LookupRes != CommentlessRedeclChains.end())
407  return LookupRes->second;
408  return nullptr;
409  }();
410 
411  for (const auto Redecl : D->redecls()) {
412  assert(Redecl);
413  // Skip all redeclarations that have been checked previously.
414  if (LastCheckedRedecl) {
415  if (LastCheckedRedecl == Redecl) {
416  LastCheckedRedecl = nullptr;
417  }
418  continue;
419  }
420  const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl);
421  if (RedeclComment) {
422  cacheRawCommentForDecl(*Redecl, *RedeclComment);
423  if (OriginalDecl)
424  *OriginalDecl = Redecl;
425  return RedeclComment;
426  }
427  CommentlessRedeclChains[CanonicalD] = Redecl;
428  }
429 
430  if (OriginalDecl)
431  *OriginalDecl = nullptr;
432  return nullptr;
433 }
434 
436  const RawComment &Comment) const {
437  assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
438  DeclRawComments.try_emplace(&OriginalD, &Comment);
439  const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
440  RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD);
441  CommentlessRedeclChains.erase(CanonicalDecl);
442 }
443 
444 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
446  const DeclContext *DC = ObjCMethod->getDeclContext();
447  if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
448  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
449  if (!ID)
450  return;
451  // Add redeclared method here.
452  for (const auto *Ext : ID->known_extensions()) {
453  if (ObjCMethodDecl *RedeclaredMethod =
454  Ext->getMethod(ObjCMethod->getSelector(),
455  ObjCMethod->isInstanceMethod()))
456  Redeclared.push_back(RedeclaredMethod);
457  }
458  }
459 }
460 
462  const Preprocessor *PP) {
463  if (Comments.empty() || Decls.empty())
464  return;
465 
466  // See if there are any new comments that are not attached to a decl.
467  // The location doesn't have to be precise - we care only about the file.
468  const FileID File =
469  SourceMgr.getDecomposedLoc((*Decls.begin())->getLocation()).first;
470  auto CommentsInThisFile = Comments.getCommentsInFile(File);
471  if (!CommentsInThisFile || CommentsInThisFile->empty() ||
472  CommentsInThisFile->rbegin()->second->isAttached())
473  return;
474 
475  // There is at least one comment not attached to a decl.
476  // Maybe it should be attached to one of Decls?
477  //
478  // Note that this way we pick up not only comments that precede the
479  // declaration, but also comments that *follow* the declaration -- thanks to
480  // the lookahead in the lexer: we've consumed the semicolon and looked
481  // ahead through comments.
482 
483  for (const Decl *D : Decls) {
484  assert(D);
485  if (D->isInvalidDecl())
486  continue;
487 
488  D = &adjustDeclToTemplate(*D);
489 
490  const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
491 
492  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
493  continue;
494 
495  if (DeclRawComments.count(D) > 0)
496  continue;
497 
498  if (RawComment *const DocComment =
499  getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) {
500  cacheRawCommentForDecl(*D, *DocComment);
501  comments::FullComment *FC = DocComment->parse(*this, PP, D);
502  ParsedComments[D->getCanonicalDecl()] = FC;
503  }
504  }
505 }
506 
508  const Decl *D) const {
509  auto *ThisDeclInfo = new (*this) comments::DeclInfo;
510  ThisDeclInfo->CommentDecl = D;
511  ThisDeclInfo->IsFilled = false;
512  ThisDeclInfo->fill();
513  ThisDeclInfo->CommentDecl = FC->getDecl();
514  if (!ThisDeclInfo->TemplateParameters)
515  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
516  comments::FullComment *CFC =
517  new (*this) comments::FullComment(FC->getBlocks(),
518  ThisDeclInfo);
519  return CFC;
520 }
521 
524  return RC ? RC->parse(*this, nullptr, D) : nullptr;
525 }
526 
528  const Decl *D,
529  const Preprocessor *PP) const {
530  if (!D || D->isInvalidDecl())
531  return nullptr;
532  D = &adjustDeclToTemplate(*D);
533 
534  const Decl *Canonical = D->getCanonicalDecl();
535  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
536  ParsedComments.find(Canonical);
537 
538  if (Pos != ParsedComments.end()) {
539  if (Canonical != D) {
540  comments::FullComment *FC = Pos->second;
542  return CFC;
543  }
544  return Pos->second;
545  }
546 
547  const Decl *OriginalDecl = nullptr;
548 
549  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
550  if (!RC) {
551  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
553  const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
554  if (OMD && OMD->isPropertyAccessor())
555  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
556  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
557  return cloneFullComment(FC, D);
558  if (OMD)
559  addRedeclaredMethods(OMD, Overridden);
560  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
561  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
562  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
563  return cloneFullComment(FC, D);
564  }
565  else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
566  // Attach any tag type's documentation to its typedef if latter
567  // does not have one of its own.
568  QualType QT = TD->getUnderlyingType();
569  if (const auto *TT = QT->getAs<TagType>())
570  if (const Decl *TD = TT->getDecl())
571  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
572  return cloneFullComment(FC, D);
573  }
574  else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
575  while (IC->getSuperClass()) {
576  IC = IC->getSuperClass();
577  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
578  return cloneFullComment(FC, D);
579  }
580  }
581  else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
582  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
583  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
584  return cloneFullComment(FC, D);
585  }
586  else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
587  if (!(RD = RD->getDefinition()))
588  return nullptr;
589  // Check non-virtual bases.
590  for (const auto &I : RD->bases()) {
591  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
592  continue;
593  QualType Ty = I.getType();
594  if (Ty.isNull())
595  continue;
596  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
597  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
598  continue;
599 
600  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
601  return cloneFullComment(FC, D);
602  }
603  }
604  // Check virtual bases.
605  for (const auto &I : RD->vbases()) {
606  if (I.getAccessSpecifier() != AS_public)
607  continue;
608  QualType Ty = I.getType();
609  if (Ty.isNull())
610  continue;
611  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
612  if (!(VirtualBase= VirtualBase->getDefinition()))
613  continue;
614  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
615  return cloneFullComment(FC, D);
616  }
617  }
618  }
619  return nullptr;
620  }
621 
622  // If the RawComment was attached to other redeclaration of this Decl, we
623  // should parse the comment in context of that other Decl. This is important
624  // because comments can contain references to parameter names which can be
625  // different across redeclarations.
626  if (D != OriginalDecl && OriginalDecl)
627  return getCommentForDecl(OriginalDecl, PP);
628 
629  comments::FullComment *FC = RC->parse(*this, PP, D);
630  ParsedComments[Canonical] = FC;
631  return FC;
632 }
633 
634 void
635 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
636  TemplateTemplateParmDecl *Parm) {
637  ID.AddInteger(Parm->getDepth());
638  ID.AddInteger(Parm->getPosition());
639  ID.AddBoolean(Parm->isParameterPack());
640 
642  ID.AddInteger(Params->size());
644  PEnd = Params->end();
645  P != PEnd; ++P) {
646  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
647  ID.AddInteger(0);
648  ID.AddBoolean(TTP->isParameterPack());
649  continue;
650  }
651 
652  if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
653  ID.AddInteger(1);
654  ID.AddBoolean(NTTP->isParameterPack());
655  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
656  if (NTTP->isExpandedParameterPack()) {
657  ID.AddBoolean(true);
658  ID.AddInteger(NTTP->getNumExpansionTypes());
659  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
660  QualType T = NTTP->getExpansionType(I);
661  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
662  }
663  } else
664  ID.AddBoolean(false);
665  continue;
666  }
667 
668  auto *TTP = cast<TemplateTemplateParmDecl>(*P);
669  ID.AddInteger(2);
670  Profile(ID, TTP);
671  }
672 }
673 
675 ASTContext::getCanonicalTemplateTemplateParmDecl(
676  TemplateTemplateParmDecl *TTP) const {
677  // Check if we already have a canonical template template parameter.
678  llvm::FoldingSetNodeID ID;
679  CanonicalTemplateTemplateParm::Profile(ID, TTP);
680  void *InsertPos = nullptr;
681  CanonicalTemplateTemplateParm *Canonical
682  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
683  if (Canonical)
684  return Canonical->getParam();
685 
686  // Build a canonical template parameter list.
688  SmallVector<NamedDecl *, 4> CanonParams;
689  CanonParams.reserve(Params->size());
691  PEnd = Params->end();
692  P != PEnd; ++P) {
693  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
694  CanonParams.push_back(
696  SourceLocation(),
697  SourceLocation(),
698  TTP->getDepth(),
699  TTP->getIndex(), nullptr, false,
700  TTP->isParameterPack()));
701  else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
702  QualType T = getCanonicalType(NTTP->getType());
705  if (NTTP->isExpandedParameterPack()) {
706  SmallVector<QualType, 2> ExpandedTypes;
707  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
708  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
709  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
710  ExpandedTInfos.push_back(
711  getTrivialTypeSourceInfo(ExpandedTypes.back()));
712  }
713 
715  SourceLocation(),
716  SourceLocation(),
717  NTTP->getDepth(),
718  NTTP->getPosition(), nullptr,
719  T,
720  TInfo,
721  ExpandedTypes,
722  ExpandedTInfos);
723  } else {
725  SourceLocation(),
726  SourceLocation(),
727  NTTP->getDepth(),
728  NTTP->getPosition(), nullptr,
729  T,
730  NTTP->isParameterPack(),
731  TInfo);
732  }
733  CanonParams.push_back(Param);
734 
735  } else
736  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
737  cast<TemplateTemplateParmDecl>(*P)));
738  }
739 
740  assert(!TTP->getRequiresClause() &&
741  "Unexpected requires-clause on template template-parameter");
742  Expr *const CanonRequiresClause = nullptr;
743 
744  TemplateTemplateParmDecl *CanonTTP
746  SourceLocation(), TTP->getDepth(),
747  TTP->getPosition(),
748  TTP->isParameterPack(),
749  nullptr,
751  SourceLocation(),
752  CanonParams,
753  SourceLocation(),
754  CanonRequiresClause));
755 
756  // Get the new insert position for the node we care about.
757  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
758  assert(!Canonical && "Shouldn't be in the map!");
759  (void)Canonical;
760 
761  // Create the canonical template template parameter entry.
762  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
763  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
764  return CanonTTP;
765 }
766 
767 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
768  if (!LangOpts.CPlusPlus) return nullptr;
769 
770  switch (T.getCXXABI().getKind()) {
771  case TargetCXXABI::GenericARM: // Same as Itanium at this level
772  case TargetCXXABI::iOS:
773  case TargetCXXABI::iOS64:
779  return CreateItaniumCXXABI(*this);
781  return CreateMicrosoftCXXABI(*this);
782  }
783  llvm_unreachable("Invalid CXXABI type!");
784 }
785 
786 static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
787  const LangOptions &LOpts) {
788  if (LOpts.FakeAddressSpaceMap) {
789  // The fake address space map must have a distinct entry for each
790  // language-specific address space.
791  static const unsigned FakeAddrSpaceMap[] = {
792  0, // Default
793  1, // opencl_global
794  3, // opencl_local
795  2, // opencl_constant
796  0, // opencl_private
797  4, // opencl_generic
798  5, // cuda_device
799  6, // cuda_constant
800  7 // cuda_shared
801  };
802  return &FakeAddrSpaceMap;
803  } else {
804  return &T.getAddressSpaceMap();
805  }
806 }
807 
809  const LangOptions &LangOpts) {
810  switch (LangOpts.getAddressSpaceMapMangling()) {
812  return TI.useAddressSpaceMapMangling();
814  return true;
816  return false;
817  }
818  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
819 }
820 
822  IdentifierTable &idents, SelectorTable &sels,
823  Builtin::Context &builtins)
824  : FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
825  DependentTemplateSpecializationTypes(this_()),
826  SubstTemplateTemplateParmPacks(this_()), SourceMgr(SM), LangOpts(LOpts),
827  SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
828  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
829  LangOpts.XRayNeverInstrumentFiles,
830  LangOpts.XRayAttrListFiles, SM)),
831  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
832  BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
833  CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
834  CompCategories(this_()), LastSDM(nullptr, 0) {
835  TUDecl = TranslationUnitDecl::Create(*this);
836  TraversalScope = {TUDecl};
837 }
838 
840  // Release the DenseMaps associated with DeclContext objects.
841  // FIXME: Is this the ideal solution?
842  ReleaseDeclContextMaps();
843 
844  // Call all of the deallocation functions on all of their targets.
845  for (auto &Pair : Deallocations)
846  (Pair.first)(Pair.second);
847 
848  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
849  // because they can contain DenseMaps.
850  for (llvm::DenseMap<const ObjCContainerDecl*,
851  const ASTRecordLayout*>::iterator
852  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
853  // Increment in loop to prevent using deallocated memory.
854  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
855  R->Destroy(*this);
856 
857  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
858  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
859  // Increment in loop to prevent using deallocated memory.
860  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
861  R->Destroy(*this);
862  }
863 
864  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
865  AEnd = DeclAttrs.end();
866  A != AEnd; ++A)
867  A->second->~AttrVec();
868 
869  for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
870  MaterializedTemporaryValues)
871  MTVPair.second->~APValue();
872 
873  for (const auto &Value : ModuleInitializers)
874  Value.second->~PerModuleInitializers();
875 
876  for (APValue *Value : APValueCleanups)
877  Value->~APValue();
878 }
879 
881  /// Contains parents of a node.
883 
884  /// Maps from a node to its parents. This is used for nodes that have
885  /// pointer identity only, which are more common and we can save space by
886  /// only storing a unique pointer to them.
887  using ParentMapPointers = llvm::DenseMap<
888  const void *,
889  llvm::PointerUnion4<const Decl *, const Stmt *,
891 
892  /// Parent map for nodes without pointer identity. We store a full
893  /// DynTypedNode for all keys.
894  using ParentMapOtherNodes = llvm::DenseMap<
896  llvm::PointerUnion4<const Decl *, const Stmt *,
897  ast_type_traits::DynTypedNode *, ParentVector *>>;
898 
899  ParentMapPointers PointerParents;
900  ParentMapOtherNodes OtherParents;
901  class ASTVisitor;
902 
903  static ast_type_traits::DynTypedNode
904  getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
905  if (const auto *D = U.dyn_cast<const Decl *>())
907  if (const auto *S = U.dyn_cast<const Stmt *>())
909  return *U.get<ast_type_traits::DynTypedNode *>();
910  }
911 
912  template <typename NodeTy, typename MapTy>
913  static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
914  const MapTy &Map) {
915  auto I = Map.find(Node);
916  if (I == Map.end()) {
918  }
919  if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
920  return llvm::makeArrayRef(*V);
921  }
922  return getSingleDynTypedNodeFromParentMap(I->second);
923  }
924 
925 public:
926  ParentMap(ASTContext &Ctx);
928  for (const auto &Entry : PointerParents) {
929  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
930  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
931  } else if (Entry.second.is<ParentVector *>()) {
932  delete Entry.second.get<ParentVector *>();
933  }
934  }
935  for (const auto &Entry : OtherParents) {
936  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
937  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
938  } else if (Entry.second.is<ParentVector *>()) {
939  delete Entry.second.get<ParentVector *>();
940  }
941  }
942  }
943 
944  DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
945  if (Node.getNodeKind().hasPointerIdentity())
946  return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
947  return getDynNodeFromMap(Node, OtherParents);
948  }
949 };
950 
951 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
952  TraversalScope = TopLevelDecls;
953  Parents.reset();
954 }
955 
956 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
957  Deallocations.push_back({Callback, Data});
958 }
959 
960 void
962  ExternalSource = std::move(Source);
963 }
964 
966  llvm::errs() << "\n*** AST Context Stats:\n";
967  llvm::errs() << " " << Types.size() << " types total.\n";
968 
969  unsigned counts[] = {
970 #define TYPE(Name, Parent) 0,
971 #define ABSTRACT_TYPE(Name, Parent)
972 #include "clang/AST/TypeNodes.def"
973  0 // Extra
974  };
975 
976  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
977  Type *T = Types[i];
978  counts[(unsigned)T->getTypeClass()]++;
979  }
980 
981  unsigned Idx = 0;
982  unsigned TotalBytes = 0;
983 #define TYPE(Name, Parent) \
984  if (counts[Idx]) \
985  llvm::errs() << " " << counts[Idx] << " " << #Name \
986  << " types, " << sizeof(Name##Type) << " each " \
987  << "(" << counts[Idx] * sizeof(Name##Type) \
988  << " bytes)\n"; \
989  TotalBytes += counts[Idx] * sizeof(Name##Type); \
990  ++Idx;
991 #define ABSTRACT_TYPE(Name, Parent)
992 #include "clang/AST/TypeNodes.def"
993 
994  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
995 
996  // Implicit special member functions.
997  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
999  << " implicit default constructors created\n";
1000  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1002  << " implicit copy constructors created\n";
1003  if (getLangOpts().CPlusPlus)
1004  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1006  << " implicit move constructors created\n";
1007  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1009  << " implicit copy assignment operators created\n";
1010  if (getLangOpts().CPlusPlus)
1011  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1013  << " implicit move assignment operators created\n";
1014  llvm::errs() << NumImplicitDestructorsDeclared << "/"
1016  << " implicit destructors created\n";
1017 
1018  if (ExternalSource) {
1019  llvm::errs() << "\n";
1020  ExternalSource->PrintStats();
1021  }
1022 
1023  BumpAlloc.PrintStats();
1024 }
1025 
1027  bool NotifyListeners) {
1028  if (NotifyListeners)
1029  if (auto *Listener = getASTMutationListener())
1031 
1032  MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
1033 }
1034 
1036  auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1037  if (It == MergedDefModules.end())
1038  return;
1039 
1040  auto &Merged = It->second;
1042  for (Module *&M : Merged)
1043  if (!Found.insert(M).second)
1044  M = nullptr;
1045  Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
1046 }
1047 
1048 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1049  if (LazyInitializers.empty())
1050  return;
1051 
1052  auto *Source = Ctx.getExternalSource();
1053  assert(Source && "lazy initializers but no external source");
1054 
1055  auto LazyInits = std::move(LazyInitializers);
1056  LazyInitializers.clear();
1057 
1058  for (auto ID : LazyInits)
1059  Initializers.push_back(Source->GetExternalDecl(ID));
1060 
1061  assert(LazyInitializers.empty() &&
1062  "GetExternalDecl for lazy module initializer added more inits");
1063 }
1064 
1066  // One special case: if we add a module initializer that imports another
1067  // module, and that module's only initializer is an ImportDecl, simplify.
1068  if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1069  auto It = ModuleInitializers.find(ID->getImportedModule());
1070 
1071  // Maybe the ImportDecl does nothing at all. (Common case.)
1072  if (It == ModuleInitializers.end())
1073  return;
1074 
1075  // Maybe the ImportDecl only imports another ImportDecl.
1076  auto &Imported = *It->second;
1077  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1078  Imported.resolve(*this);
1079  auto *OnlyDecl = Imported.Initializers.front();
1080  if (isa<ImportDecl>(OnlyDecl))
1081  D = OnlyDecl;
1082  }
1083  }
1084 
1085  auto *&Inits = ModuleInitializers[M];
1086  if (!Inits)
1087  Inits = new (*this) PerModuleInitializers;
1088  Inits->Initializers.push_back(D);
1089 }
1090 
1092  auto *&Inits = ModuleInitializers[M];
1093  if (!Inits)
1094  Inits = new (*this) PerModuleInitializers;
1095  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1096  IDs.begin(), IDs.end());
1097 }
1098 
1100  auto It = ModuleInitializers.find(M);
1101  if (It == ModuleInitializers.end())
1102  return None;
1103 
1104  auto *Inits = It->second;
1105  Inits->resolve(*this);
1106  return Inits->Initializers;
1107 }
1108 
1110  if (!ExternCContext)
1111  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1112 
1113  return ExternCContext;
1114 }
1115 
1118  const IdentifierInfo *II) const {
1119  auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
1120  BuiltinTemplate->setImplicit();
1121  TUDecl->addDecl(BuiltinTemplate);
1122 
1123  return BuiltinTemplate;
1124 }
1125 
1128  if (!MakeIntegerSeqDecl)
1129  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1131  return MakeIntegerSeqDecl;
1132 }
1133 
1136  if (!TypePackElementDecl)
1137  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1139  return TypePackElementDecl;
1140 }
1141 
1143  RecordDecl::TagKind TK) const {
1144  SourceLocation Loc;
1145  RecordDecl *NewDecl;
1146  if (getLangOpts().CPlusPlus)
1147  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1148  Loc, &Idents.get(Name));
1149  else
1150  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1151  &Idents.get(Name));
1152  NewDecl->setImplicit();
1153  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1154  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1155  return NewDecl;
1156 }
1157 
1159  StringRef Name) const {
1161  TypedefDecl *NewDecl = TypedefDecl::Create(
1162  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1163  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1164  NewDecl->setImplicit();
1165  return NewDecl;
1166 }
1167 
1169  if (!Int128Decl)
1170  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1171  return Int128Decl;
1172 }
1173 
1175  if (!UInt128Decl)
1176  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1177  return UInt128Decl;
1178 }
1179 
1180 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1181  auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1182  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1183  Types.push_back(Ty);
1184 }
1185 
1187  const TargetInfo *AuxTarget) {
1188  assert((!this->Target || this->Target == &Target) &&
1189  "Incorrect target reinitialization");
1190  assert(VoidTy.isNull() && "Context reinitialized?");
1191 
1192  this->Target = &Target;
1193  this->AuxTarget = AuxTarget;
1194 
1195  ABI.reset(createCXXABI(Target));
1196  AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1197  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1198 
1199  // C99 6.2.5p19.
1200  InitBuiltinType(VoidTy, BuiltinType::Void);
1201 
1202  // C99 6.2.5p2.
1203  InitBuiltinType(BoolTy, BuiltinType::Bool);
1204  // C99 6.2.5p3.
1205  if (LangOpts.CharIsSigned)
1206  InitBuiltinType(CharTy, BuiltinType::Char_S);
1207  else
1208  InitBuiltinType(CharTy, BuiltinType::Char_U);
1209  // C99 6.2.5p4.
1210  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1211  InitBuiltinType(ShortTy, BuiltinType::Short);
1212  InitBuiltinType(IntTy, BuiltinType::Int);
1213  InitBuiltinType(LongTy, BuiltinType::Long);
1214  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1215 
1216  // C99 6.2.5p6.
1217  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1218  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1219  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1220  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1221  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1222 
1223  // C99 6.2.5p10.
1224  InitBuiltinType(FloatTy, BuiltinType::Float);
1225  InitBuiltinType(DoubleTy, BuiltinType::Double);
1226  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1227 
1228  // GNU extension, __float128 for IEEE quadruple precision
1229  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1230 
1231  // C11 extension ISO/IEC TS 18661-3
1232  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1233 
1234  // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1235  InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1236  InitBuiltinType(AccumTy, BuiltinType::Accum);
1237  InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1238  InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1239  InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1240  InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1241  InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1242  InitBuiltinType(FractTy, BuiltinType::Fract);
1243  InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1244  InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1245  InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1246  InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1247  InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1248  InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1249  InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1250  InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1251  InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1252  InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1253  InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1254  InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1255  InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1256  InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1257  InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1258  InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1259 
1260  // GNU extension, 128-bit integers.
1261  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1262  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1263 
1264  // C++ 3.9.1p5
1265  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1266  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1267  else // -fshort-wchar makes wchar_t be unsigned.
1268  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1269  if (LangOpts.CPlusPlus && LangOpts.WChar)
1270  WideCharTy = WCharTy;
1271  else {
1272  // C99 (or C++ using -fno-wchar).
1273  WideCharTy = getFromTargetType(Target.getWCharType());
1274  }
1275 
1276  WIntTy = getFromTargetType(Target.getWIntType());
1277 
1278  // C++20 (proposed)
1279  InitBuiltinType(Char8Ty, BuiltinType::Char8);
1280 
1281  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1282  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1283  else // C99
1284  Char16Ty = getFromTargetType(Target.getChar16Type());
1285 
1286  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1287  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1288  else // C99
1289  Char32Ty = getFromTargetType(Target.getChar32Type());
1290 
1291  // Placeholder type for type-dependent expressions whose type is
1292  // completely unknown. No code should ever check a type against
1293  // DependentTy and users should never see it; however, it is here to
1294  // help diagnose failures to properly check for type-dependent
1295  // expressions.
1296  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1297 
1298  // Placeholder type for functions.
1299  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1300 
1301  // Placeholder type for bound members.
1302  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1303 
1304  // Placeholder type for pseudo-objects.
1305  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1306 
1307  // "any" type; useful for debugger-like clients.
1308  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1309 
1310  // Placeholder type for unbridged ARC casts.
1311  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1312 
1313  // Placeholder type for builtin functions.
1314  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1315 
1316  // Placeholder type for OMP array sections.
1317  if (LangOpts.OpenMP)
1318  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1319 
1320  // C99 6.2.5p11.
1325 
1326  // Builtin types for 'id', 'Class', and 'SEL'.
1327  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1328  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1329  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1330 
1331  if (LangOpts.OpenCL) {
1332 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1333  InitBuiltinType(SingletonId, BuiltinType::Id);
1334 #include "clang/Basic/OpenCLImageTypes.def"
1335 
1336  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1337  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1338  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1339  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1340  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1341 
1342 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1343  InitBuiltinType(Id##Ty, BuiltinType::Id);
1344 #include "clang/Basic/OpenCLExtensionTypes.def"
1345  }
1346 
1347  if (Target.hasAArch64SVETypes()) {
1348 #define SVE_TYPE(Name, Id, SingletonId) \
1349  InitBuiltinType(SingletonId, BuiltinType::Id);
1350 #include "clang/Basic/AArch64SVEACLETypes.def"
1351  }
1352 
1353  // Builtin type for __objc_yes and __objc_no
1354  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1355  SignedCharTy : BoolTy);
1356 
1357  ObjCConstantStringType = QualType();
1358 
1359  ObjCSuperType = QualType();
1360 
1361  // void * type
1362  if (LangOpts.OpenCLVersion >= 200) {
1363  auto Q = VoidTy.getQualifiers();
1367  } else {
1369  }
1370 
1371  // nullptr type (C++0x 2.14.7)
1372  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1373 
1374  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1375  InitBuiltinType(HalfTy, BuiltinType::Half);
1376 
1377  // Builtin type used to help define __builtin_va_list.
1378  VaListTagDecl = nullptr;
1379 }
1380 
1382  return SourceMgr.getDiagnostics();
1383 }
1384 
1386  AttrVec *&Result = DeclAttrs[D];
1387  if (!Result) {
1388  void *Mem = Allocate(sizeof(AttrVec));
1389  Result = new (Mem) AttrVec;
1390  }
1391 
1392  return *Result;
1393 }
1394 
1395 /// Erase the attributes corresponding to the given declaration.
1397  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1398  if (Pos != DeclAttrs.end()) {
1399  Pos->second->~AttrVec();
1400  DeclAttrs.erase(Pos);
1401  }
1402 }
1403 
1404 // FIXME: Remove ?
1407  assert(Var->isStaticDataMember() && "Not a static data member");
1409  .dyn_cast<MemberSpecializationInfo *>();
1410 }
1411 
1414  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1415  TemplateOrInstantiation.find(Var);
1416  if (Pos == TemplateOrInstantiation.end())
1417  return {};
1418 
1419  return Pos->second;
1420 }
1421 
1422 void
1425  SourceLocation PointOfInstantiation) {
1426  assert(Inst->isStaticDataMember() && "Not a static data member");
1427  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1429  Tmpl, TSK, PointOfInstantiation));
1430 }
1431 
1432 void
1435  assert(!TemplateOrInstantiation[Inst] &&
1436  "Already noted what the variable was instantiated from");
1437  TemplateOrInstantiation[Inst] = TSI;
1438 }
1439 
1440 NamedDecl *
1442  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1443  if (Pos == InstantiatedFromUsingDecl.end())
1444  return nullptr;
1445 
1446  return Pos->second;
1447 }
1448 
1449 void
1451  assert((isa<UsingDecl>(Pattern) ||
1452  isa<UnresolvedUsingValueDecl>(Pattern) ||
1453  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1454  "pattern decl is not a using decl");
1455  assert((isa<UsingDecl>(Inst) ||
1456  isa<UnresolvedUsingValueDecl>(Inst) ||
1457  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1458  "instantiation did not produce a using decl");
1459  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1460  InstantiatedFromUsingDecl[Inst] = Pattern;
1461 }
1462 
1465  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1466  = InstantiatedFromUsingShadowDecl.find(Inst);
1467  if (Pos == InstantiatedFromUsingShadowDecl.end())
1468  return nullptr;
1469 
1470  return Pos->second;
1471 }
1472 
1473 void
1475  UsingShadowDecl *Pattern) {
1476  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1477  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1478 }
1479 
1481  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1482  = InstantiatedFromUnnamedFieldDecl.find(Field);
1483  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1484  return nullptr;
1485 
1486  return Pos->second;
1487 }
1488 
1490  FieldDecl *Tmpl) {
1491  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1492  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1493  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1494  "Already noted what unnamed field was instantiated from");
1495 
1496  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1497 }
1498 
1501  return overridden_methods(Method).begin();
1502 }
1503 
1506  return overridden_methods(Method).end();
1507 }
1508 
1509 unsigned
1511  auto Range = overridden_methods(Method);
1512  return Range.end() - Range.begin();
1513 }
1514 
1517  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1518  OverriddenMethods.find(Method->getCanonicalDecl());
1519  if (Pos == OverriddenMethods.end())
1520  return overridden_method_range(nullptr, nullptr);
1521  return overridden_method_range(Pos->second.begin(), Pos->second.end());
1522 }
1523 
1525  const CXXMethodDecl *Overridden) {
1526  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1527  OverriddenMethods[Method].push_back(Overridden);
1528 }
1529 
1531  const NamedDecl *D,
1532  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1533  assert(D);
1534 
1535  if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1536  Overridden.append(overridden_methods_begin(CXXMethod),
1537  overridden_methods_end(CXXMethod));
1538  return;
1539  }
1540 
1541  const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1542  if (!Method)
1543  return;
1544 
1546  Method->getOverriddenMethods(OverDecls);
1547  Overridden.append(OverDecls.begin(), OverDecls.end());
1548 }
1549 
1551  assert(!Import->NextLocalImport && "Import declaration already in the chain");
1552  assert(!Import->isFromASTFile() && "Non-local import declaration");
1553  if (!FirstLocalImport) {
1554  FirstLocalImport = Import;
1555  LastLocalImport = Import;
1556  return;
1557  }
1558 
1559  LastLocalImport->NextLocalImport = Import;
1560  LastLocalImport = Import;
1561 }
1562 
1563 //===----------------------------------------------------------------------===//
1564 // Type Sizing and Analysis
1565 //===----------------------------------------------------------------------===//
1566 
1567 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1568 /// scalar floating point type.
1569 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1570  const auto *BT = T->getAs<BuiltinType>();
1571  assert(BT && "Not a floating point type!");
1572  switch (BT->getKind()) {
1573  default: llvm_unreachable("Not a floating point type!");
1574  case BuiltinType::Float16:
1575  case BuiltinType::Half:
1576  return Target->getHalfFormat();
1577  case BuiltinType::Float: return Target->getFloatFormat();
1578  case BuiltinType::Double: return Target->getDoubleFormat();
1579  case BuiltinType::LongDouble:
1580  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1581  return AuxTarget->getLongDoubleFormat();
1582  return Target->getLongDoubleFormat();
1583  case BuiltinType::Float128:
1584  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1585  return AuxTarget->getFloat128Format();
1586  return Target->getFloat128Format();
1587  }
1588 }
1589 
1590 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1591  unsigned Align = Target->getCharWidth();
1592 
1593  bool UseAlignAttrOnly = false;
1594  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1595  Align = AlignFromAttr;
1596 
1597  // __attribute__((aligned)) can increase or decrease alignment
1598  // *except* on a struct or struct member, where it only increases
1599  // alignment unless 'packed' is also specified.
1600  //
1601  // It is an error for alignas to decrease alignment, so we can
1602  // ignore that possibility; Sema should diagnose it.
1603  if (isa<FieldDecl>(D)) {
1604  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1605  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1606  } else {
1607  UseAlignAttrOnly = true;
1608  }
1609  }
1610  else if (isa<FieldDecl>(D))
1611  UseAlignAttrOnly =
1612  D->hasAttr<PackedAttr>() ||
1613  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1614 
1615  // If we're using the align attribute only, just ignore everything
1616  // else about the declaration and its type.
1617  if (UseAlignAttrOnly) {
1618  // do nothing
1619  } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1620  QualType T = VD->getType();
1621  if (const auto *RT = T->getAs<ReferenceType>()) {
1622  if (ForAlignof)
1623  T = RT->getPointeeType();
1624  else
1625  T = getPointerType(RT->getPointeeType());
1626  }
1627  QualType BaseT = getBaseElementType(T);
1628  if (T->isFunctionType())
1629  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1630  else if (!BaseT->isIncompleteType()) {
1631  // Adjust alignments of declarations with array type by the
1632  // large-array alignment on the target.
1633  if (const ArrayType *arrayType = getAsArrayType(T)) {
1634  unsigned MinWidth = Target->getLargeArrayMinWidth();
1635  if (!ForAlignof && MinWidth) {
1636  if (isa<VariableArrayType>(arrayType))
1637  Align = std::max(Align, Target->getLargeArrayAlign());
1638  else if (isa<ConstantArrayType>(arrayType) &&
1639  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1640  Align = std::max(Align, Target->getLargeArrayAlign());
1641  }
1642  }
1643  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1644  if (BaseT.getQualifiers().hasUnaligned())
1645  Align = Target->getCharWidth();
1646  if (const auto *VD = dyn_cast<VarDecl>(D)) {
1647  if (VD->hasGlobalStorage() && !ForAlignof) {
1648  uint64_t TypeSize = getTypeSize(T.getTypePtr());
1649  Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
1650  }
1651  }
1652  }
1653 
1654  // Fields can be subject to extra alignment constraints, like if
1655  // the field is packed, the struct is packed, or the struct has a
1656  // a max-field-alignment constraint (#pragma pack). So calculate
1657  // the actual alignment of the field within the struct, and then
1658  // (as we're expected to) constrain that by the alignment of the type.
1659  if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1660  const RecordDecl *Parent = Field->getParent();
1661  // We can only produce a sensible answer if the record is valid.
1662  if (!Parent->isInvalidDecl()) {
1663  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1664 
1665  // Start with the record's overall alignment.
1666  unsigned FieldAlign = toBits(Layout.getAlignment());
1667 
1668  // Use the GCD of that and the offset within the record.
1669  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1670  if (Offset > 0) {
1671  // Alignment is always a power of 2, so the GCD will be a power of 2,
1672  // which means we get to do this crazy thing instead of Euclid's.
1673  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1674  if (LowBitOfOffset < FieldAlign)
1675  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1676  }
1677 
1678  Align = std::min(Align, FieldAlign);
1679  }
1680  }
1681  }
1682 
1683  return toCharUnitsFromBits(Align);
1684 }
1685 
1686 // getTypeInfoDataSizeInChars - Return the size of a type, in
1687 // chars. If the type is a record, its data size is returned. This is
1688 // the size of the memcpy that's performed when assigning this type
1689 // using a trivial copy/move assignment operator.
1690 std::pair<CharUnits, CharUnits>
1692  std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
1693 
1694  // In C++, objects can sometimes be allocated into the tail padding
1695  // of a base-class subobject. We decide whether that's possible
1696  // during class layout, so here we can just trust the layout results.
1697  if (getLangOpts().CPlusPlus) {
1698  if (const auto *RT = T->getAs<RecordType>()) {
1699  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1700  sizeAndAlign.first = layout.getDataSize();
1701  }
1702  }
1703 
1704  return sizeAndAlign;
1705 }
1706 
1707 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1708 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1709 std::pair<CharUnits, CharUnits>
1711  const ConstantArrayType *CAT) {
1712  std::pair<CharUnits, CharUnits> EltInfo =
1713  Context.getTypeInfoInChars(CAT->getElementType());
1714  uint64_t Size = CAT->getSize().getZExtValue();
1715  assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
1716  (uint64_t)(-1)/Size) &&
1717  "Overflow in array type char size evaluation");
1718  uint64_t Width = EltInfo.first.getQuantity() * Size;
1719  unsigned Align = EltInfo.second.getQuantity();
1720  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1721  Context.getTargetInfo().getPointerWidth(0) == 64)
1722  Width = llvm::alignTo(Width, Align);
1723  return std::make_pair(CharUnits::fromQuantity(Width),
1724  CharUnits::fromQuantity(Align));
1725 }
1726 
1727 std::pair<CharUnits, CharUnits>
1729  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1730  return getConstantArrayInfoInChars(*this, CAT);
1731  TypeInfo Info = getTypeInfo(T);
1732  return std::make_pair(toCharUnitsFromBits(Info.Width),
1733  toCharUnitsFromBits(Info.Align));
1734 }
1735 
1736 std::pair<CharUnits, CharUnits>
1738  return getTypeInfoInChars(T.getTypePtr());
1739 }
1740 
1742  return getTypeInfo(T).AlignIsRequired;
1743 }
1744 
1746  return isAlignmentRequired(T.getTypePtr());
1747 }
1748 
1750  // An alignment on a typedef overrides anything else.
1751  if (const auto *TT = T->getAs<TypedefType>())
1752  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1753  return Align;
1754 
1755  // If we have an (array of) complete type, we're done.
1756  T = getBaseElementType(T);
1757  if (!T->isIncompleteType())
1758  return getTypeAlign(T);
1759 
1760  // If we had an array type, its element type might be a typedef
1761  // type with an alignment attribute.
1762  if (const auto *TT = T->getAs<TypedefType>())
1763  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1764  return Align;
1765 
1766  // Otherwise, see if the declaration of the type had an attribute.
1767  if (const auto *TT = T->getAs<TagType>())
1768  return TT->getDecl()->getMaxAlignment();
1769 
1770  return 0;
1771 }
1772 
1774  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1775  if (I != MemoizedTypeInfo.end())
1776  return I->second;
1777 
1778  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1779  TypeInfo TI = getTypeInfoImpl(T);
1780  MemoizedTypeInfo[T] = TI;
1781  return TI;
1782 }
1783 
1784 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1785 /// method does not work on incomplete types.
1786 ///
1787 /// FIXME: Pointers into different addr spaces could have different sizes and
1788 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1789 /// should take a QualType, &c.
1790 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1791  uint64_t Width = 0;
1792  unsigned Align = 8;
1793  bool AlignIsRequired = false;
1794  unsigned AS = 0;
1795  switch (T->getTypeClass()) {
1796 #define TYPE(Class, Base)
1797 #define ABSTRACT_TYPE(Class, Base)
1798 #define NON_CANONICAL_TYPE(Class, Base)
1799 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1800 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1801  case Type::Class: \
1802  assert(!T->isDependentType() && "should not see dependent types here"); \
1803  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1804 #include "clang/AST/TypeNodes.def"
1805  llvm_unreachable("Should not see dependent types");
1806 
1807  case Type::FunctionNoProto:
1808  case Type::FunctionProto:
1809  // GCC extension: alignof(function) = 32 bits
1810  Width = 0;
1811  Align = 32;
1812  break;
1813 
1814  case Type::IncompleteArray:
1815  case Type::VariableArray:
1816  Width = 0;
1817  Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
1818  break;
1819 
1820  case Type::ConstantArray: {
1821  const auto *CAT = cast<ConstantArrayType>(T);
1822 
1823  TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
1824  uint64_t Size = CAT->getSize().getZExtValue();
1825  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1826  "Overflow in array type bit size evaluation");
1827  Width = EltInfo.Width * Size;
1828  Align = EltInfo.Align;
1829  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1830  getTargetInfo().getPointerWidth(0) == 64)
1831  Width = llvm::alignTo(Width, Align);
1832  break;
1833  }
1834  case Type::ExtVector:
1835  case Type::Vector: {
1836  const auto *VT = cast<VectorType>(T);
1837  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1838  Width = EltInfo.Width * VT->getNumElements();
1839  Align = Width;
1840  // If the alignment is not a power of 2, round up to the next power of 2.
1841  // This happens for non-power-of-2 length vectors.
1842  if (Align & (Align-1)) {
1843  Align = llvm::NextPowerOf2(Align);
1844  Width = llvm::alignTo(Width, Align);
1845  }
1846  // Adjust the alignment based on the target max.
1847  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1848  if (TargetVectorAlign && TargetVectorAlign < Align)
1849  Align = TargetVectorAlign;
1850  break;
1851  }
1852 
1853  case Type::Builtin:
1854  switch (cast<BuiltinType>(T)->getKind()) {
1855  default: llvm_unreachable("Unknown builtin type!");
1856  case BuiltinType::Void:
1857  // GCC extension: alignof(void) = 8 bits.
1858  Width = 0;
1859  Align = 8;
1860  break;
1861  case BuiltinType::Bool:
1862  Width = Target->getBoolWidth();
1863  Align = Target->getBoolAlign();
1864  break;
1865  case BuiltinType::Char_S:
1866  case BuiltinType::Char_U:
1867  case BuiltinType::UChar:
1868  case BuiltinType::SChar:
1869  case BuiltinType::Char8:
1870  Width = Target->getCharWidth();
1871  Align = Target->getCharAlign();
1872  break;
1873  case BuiltinType::WChar_S:
1874  case BuiltinType::WChar_U:
1875  Width = Target->getWCharWidth();
1876  Align = Target->getWCharAlign();
1877  break;
1878  case BuiltinType::Char16:
1879  Width = Target->getChar16Width();
1880  Align = Target->getChar16Align();
1881  break;
1882  case BuiltinType::Char32:
1883  Width = Target->getChar32Width();
1884  Align = Target->getChar32Align();
1885  break;
1886  case BuiltinType::UShort:
1887  case BuiltinType::Short:
1888  Width = Target->getShortWidth();
1889  Align = Target->getShortAlign();
1890  break;
1891  case BuiltinType::UInt:
1892  case BuiltinType::Int:
1893  Width = Target->getIntWidth();
1894  Align = Target->getIntAlign();
1895  break;
1896  case BuiltinType::ULong:
1897  case BuiltinType::Long:
1898  Width = Target->getLongWidth();
1899  Align = Target->getLongAlign();
1900  break;
1901  case BuiltinType::ULongLong:
1902  case BuiltinType::LongLong:
1903  Width = Target->getLongLongWidth();
1904  Align = Target->getLongLongAlign();
1905  break;
1906  case BuiltinType::Int128:
1907  case BuiltinType::UInt128:
1908  Width = 128;
1909  Align = 128; // int128_t is 128-bit aligned on all targets.
1910  break;
1911  case BuiltinType::ShortAccum:
1912  case BuiltinType::UShortAccum:
1913  case BuiltinType::SatShortAccum:
1914  case BuiltinType::SatUShortAccum:
1915  Width = Target->getShortAccumWidth();
1916  Align = Target->getShortAccumAlign();
1917  break;
1918  case BuiltinType::Accum:
1919  case BuiltinType::UAccum:
1920  case BuiltinType::SatAccum:
1921  case BuiltinType::SatUAccum:
1922  Width = Target->getAccumWidth();
1923  Align = Target->getAccumAlign();
1924  break;
1925  case BuiltinType::LongAccum:
1926  case BuiltinType::ULongAccum:
1927  case BuiltinType::SatLongAccum:
1928  case BuiltinType::SatULongAccum:
1929  Width = Target->getLongAccumWidth();
1930  Align = Target->getLongAccumAlign();
1931  break;
1932  case BuiltinType::ShortFract:
1933  case BuiltinType::UShortFract:
1934  case BuiltinType::SatShortFract:
1935  case BuiltinType::SatUShortFract:
1936  Width = Target->getShortFractWidth();
1937  Align = Target->getShortFractAlign();
1938  break;
1939  case BuiltinType::Fract:
1940  case BuiltinType::UFract:
1941  case BuiltinType::SatFract:
1942  case BuiltinType::SatUFract:
1943  Width = Target->getFractWidth();
1944  Align = Target->getFractAlign();
1945  break;
1946  case BuiltinType::LongFract:
1947  case BuiltinType::ULongFract:
1948  case BuiltinType::SatLongFract:
1949  case BuiltinType::SatULongFract:
1950  Width = Target->getLongFractWidth();
1951  Align = Target->getLongFractAlign();
1952  break;
1953  case BuiltinType::Float16:
1954  case BuiltinType::Half:
1955  if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
1956  !getLangOpts().OpenMPIsDevice) {
1957  Width = Target->getHalfWidth();
1958  Align = Target->getHalfAlign();
1959  } else {
1960  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1961  "Expected OpenMP device compilation.");
1962  Width = AuxTarget->getHalfWidth();
1963  Align = AuxTarget->getHalfAlign();
1964  }
1965  break;
1966  case BuiltinType::Float:
1967  Width = Target->getFloatWidth();
1968  Align = Target->getFloatAlign();
1969  break;
1970  case BuiltinType::Double:
1971  Width = Target->getDoubleWidth();
1972  Align = Target->getDoubleAlign();
1973  break;
1974  case BuiltinType::LongDouble:
1975  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1976  (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
1977  Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
1978  Width = AuxTarget->getLongDoubleWidth();
1979  Align = AuxTarget->getLongDoubleAlign();
1980  } else {
1981  Width = Target->getLongDoubleWidth();
1982  Align = Target->getLongDoubleAlign();
1983  }
1984  break;
1985  case BuiltinType::Float128:
1986  if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
1987  !getLangOpts().OpenMPIsDevice) {
1988  Width = Target->getFloat128Width();
1989  Align = Target->getFloat128Align();
1990  } else {
1991  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1992  "Expected OpenMP device compilation.");
1993  Width = AuxTarget->getFloat128Width();
1994  Align = AuxTarget->getFloat128Align();
1995  }
1996  break;
1997  case BuiltinType::NullPtr:
1998  Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
1999  Align = Target->getPointerAlign(0); // == sizeof(void*)
2000  break;
2001  case BuiltinType::ObjCId:
2002  case BuiltinType::ObjCClass:
2003  case BuiltinType::ObjCSel:
2004  Width = Target->getPointerWidth(0);
2005  Align = Target->getPointerAlign(0);
2006  break;
2007  case BuiltinType::OCLSampler:
2008  case BuiltinType::OCLEvent:
2009  case BuiltinType::OCLClkEvent:
2010  case BuiltinType::OCLQueue:
2011  case BuiltinType::OCLReserveID:
2012 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2013  case BuiltinType::Id:
2014 #include "clang/Basic/OpenCLImageTypes.def"
2015 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2016  case BuiltinType::Id:
2017 #include "clang/Basic/OpenCLExtensionTypes.def"
2018  AS = getTargetAddressSpace(
2019  Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)));
2020  Width = Target->getPointerWidth(AS);
2021  Align = Target->getPointerAlign(AS);
2022  break;
2023  // The SVE types are effectively target-specific. The length of an
2024  // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2025  // of 128 bits. There is one predicate bit for each vector byte, so the
2026  // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2027  //
2028  // Because the length is only known at runtime, we use a dummy value
2029  // of 0 for the static length. The alignment values are those defined
2030  // by the Procedure Call Standard for the Arm Architecture.
2031 #define SVE_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, IsSigned, IsFP)\
2032  case BuiltinType::Id: \
2033  Width = 0; \
2034  Align = 128; \
2035  break;
2036 #define SVE_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2037  case BuiltinType::Id: \
2038  Width = 0; \
2039  Align = 16; \
2040  break;
2041 #include "clang/Basic/AArch64SVEACLETypes.def"
2042  }
2043  break;
2044  case Type::ObjCObjectPointer:
2045  Width = Target->getPointerWidth(0);
2046  Align = Target->getPointerAlign(0);
2047  break;
2048  case Type::BlockPointer:
2049  AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
2050  Width = Target->getPointerWidth(AS);
2051  Align = Target->getPointerAlign(AS);
2052  break;
2053  case Type::LValueReference:
2054  case Type::RValueReference:
2055  // alignof and sizeof should never enter this code path here, so we go
2056  // the pointer route.
2057  AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
2058  Width = Target->getPointerWidth(AS);
2059  Align = Target->getPointerAlign(AS);
2060  break;
2061  case Type::Pointer:
2062  AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
2063  Width = Target->getPointerWidth(AS);
2064  Align = Target->getPointerAlign(AS);
2065  break;
2066  case Type::MemberPointer: {
2067  const auto *MPT = cast<MemberPointerType>(T);
2068  CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2069  Width = MPI.Width;
2070  Align = MPI.Align;
2071  break;
2072  }
2073  case Type::Complex: {
2074  // Complex types have the same alignment as their elements, but twice the
2075  // size.
2076  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2077  Width = EltInfo.Width * 2;
2078  Align = EltInfo.Align;
2079  break;
2080  }
2081  case Type::ObjCObject:
2082  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2083  case Type::Adjusted:
2084  case Type::Decayed:
2085  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2086  case Type::ObjCInterface: {
2087  const auto *ObjCI = cast<ObjCInterfaceType>(T);
2088  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2089  Width = toBits(Layout.getSize());
2090  Align = toBits(Layout.getAlignment());
2091  break;
2092  }
2093  case Type::Record:
2094  case Type::Enum: {
2095  const auto *TT = cast<TagType>(T);
2096 
2097  if (TT->getDecl()->isInvalidDecl()) {
2098  Width = 8;
2099  Align = 8;
2100  break;
2101  }
2102 
2103  if (const auto *ET = dyn_cast<EnumType>(TT)) {
2104  const EnumDecl *ED = ET->getDecl();
2105  TypeInfo Info =
2107  if (unsigned AttrAlign = ED->getMaxAlignment()) {
2108  Info.Align = AttrAlign;
2109  Info.AlignIsRequired = true;
2110  }
2111  return Info;
2112  }
2113 
2114  const auto *RT = cast<RecordType>(TT);
2115  const RecordDecl *RD = RT->getDecl();
2116  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2117  Width = toBits(Layout.getSize());
2118  Align = toBits(Layout.getAlignment());
2119  AlignIsRequired = RD->hasAttr<AlignedAttr>();
2120  break;
2121  }
2122 
2123  case Type::SubstTemplateTypeParm:
2124  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2125  getReplacementType().getTypePtr());
2126 
2127  case Type::Auto:
2128  case Type::DeducedTemplateSpecialization: {
2129  const auto *A = cast<DeducedType>(T);
2130  assert(!A->getDeducedType().isNull() &&
2131  "cannot request the size of an undeduced or dependent auto type");
2132  return getTypeInfo(A->getDeducedType().getTypePtr());
2133  }
2134 
2135  case Type::Paren:
2136  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2137 
2138  case Type::MacroQualified:
2139  return getTypeInfo(
2140  cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
2141 
2142  case Type::ObjCTypeParam:
2143  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2144 
2145  case Type::Typedef: {
2146  const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
2147  TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
2148  // If the typedef has an aligned attribute on it, it overrides any computed
2149  // alignment we have. This violates the GCC documentation (which says that
2150  // attribute(aligned) can only round up) but matches its implementation.
2151  if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
2152  Align = AttrAlign;
2153  AlignIsRequired = true;
2154  } else {
2155  Align = Info.Align;
2156  AlignIsRequired = Info.AlignIsRequired;
2157  }
2158  Width = Info.Width;
2159  break;
2160  }
2161 
2162  case Type::Elaborated:
2163  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2164 
2165  case Type::Attributed:
2166  return getTypeInfo(
2167  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2168 
2169  case Type::Atomic: {
2170  // Start with the base type information.
2171  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2172  Width = Info.Width;
2173  Align = Info.Align;
2174 
2175  if (!Width) {
2176  // An otherwise zero-sized type should still generate an
2177  // atomic operation.
2178  Width = Target->getCharWidth();
2179  assert(Align);
2180  } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2181  // If the size of the type doesn't exceed the platform's max
2182  // atomic promotion width, make the size and alignment more
2183  // favorable to atomic operations:
2184 
2185  // Round the size up to a power of 2.
2186  if (!llvm::isPowerOf2_64(Width))
2187  Width = llvm::NextPowerOf2(Width);
2188 
2189  // Set the alignment equal to the size.
2190  Align = static_cast<unsigned>(Width);
2191  }
2192  }
2193  break;
2194 
2195  case Type::Pipe:
2196  Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global));
2197  Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global));
2198  break;
2199  }
2200 
2201  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2202  return TypeInfo(Width, Align, AlignIsRequired);
2203 }
2204 
2205 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2206  UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2207  if (I != MemoizedUnadjustedAlign.end())
2208  return I->second;
2209 
2210  unsigned UnadjustedAlign;
2211  if (const auto *RT = T->getAs<RecordType>()) {
2212  const RecordDecl *RD = RT->getDecl();
2213  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2214  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2215  } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2216  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2217  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2218  } else {
2219  UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
2220  }
2221 
2222  MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2223  return UnadjustedAlign;
2224 }
2225 
2227  unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
2228  // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
2229  if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
2230  getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
2231  getTargetInfo().getABI() == "elfv1-qpx" &&
2232  T->isSpecificBuiltinType(BuiltinType::Double))
2233  SimdAlign = 256;
2234  return SimdAlign;
2235 }
2236 
2237 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2239  return CharUnits::fromQuantity(BitSize / getCharWidth());
2240 }
2241 
2242 /// toBits - Convert a size in characters to a size in characters.
2243 int64_t ASTContext::toBits(CharUnits CharSize) const {
2244  return CharSize.getQuantity() * getCharWidth();
2245 }
2246 
2247 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2248 /// This method does not work on incomplete types.
2250  return getTypeInfoInChars(T).first;
2251 }
2253  return getTypeInfoInChars(T).first;
2254 }
2255 
2256 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2257 /// characters. This method does not work on incomplete types.
2259  return toCharUnitsFromBits(getTypeAlign(T));
2260 }
2262  return toCharUnitsFromBits(getTypeAlign(T));
2263 }
2264 
2265 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2266 /// type, in characters, before alignment adustments. This method does
2267 /// not work on incomplete types.
2270 }
2273 }
2274 
2275 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2276 /// type for the current target in bits. This can be different than the ABI
2277 /// alignment in cases where it is beneficial for performance to overalign
2278 /// a data type.
2279 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2280  TypeInfo TI = getTypeInfo(T);
2281  unsigned ABIAlign = TI.Align;
2282 
2283  T = T->getBaseElementTypeUnsafe();
2284 
2285  // The preferred alignment of member pointers is that of a pointer.
2286  if (T->isMemberPointerType())
2288 
2289  if (!Target->allowsLargerPreferedTypeAlignment())
2290  return ABIAlign;
2291 
2292  // Double and long long should be naturally aligned if possible.
2293  if (const auto *CT = T->getAs<ComplexType>())
2294  T = CT->getElementType().getTypePtr();
2295  if (const auto *ET = T->getAs<EnumType>())
2296  T = ET->getDecl()->getIntegerType().getTypePtr();
2297  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2298  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2299  T->isSpecificBuiltinType(BuiltinType::ULongLong))
2300  // Don't increase the alignment if an alignment attribute was specified on a
2301  // typedef declaration.
2302  if (!TI.AlignIsRequired)
2303  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2304 
2305  return ABIAlign;
2306 }
2307 
2308 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2309 /// for __attribute__((aligned)) on this target, to be used if no alignment
2310 /// value is specified.
2313 }
2314 
2315 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2316 /// to a global variable of the specified type.
2318  uint64_t TypeSize = getTypeSize(T.getTypePtr());
2319  return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign(TypeSize));
2320 }
2321 
2322 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2323 /// should be given to a global variable of the specified type.
2326 }
2327 
2330  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2331  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2332  Offset += Layout->getBaseClassOffset(Base);
2333  Layout = &getASTRecordLayout(Base);
2334  }
2335  return Offset;
2336 }
2337 
2338 /// DeepCollectObjCIvars -
2339 /// This routine first collects all declared, but not synthesized, ivars in
2340 /// super class and then collects all ivars, including those synthesized for
2341 /// current class. This routine is used for implementation of current class
2342 /// when all ivars, declared and synthesized are known.
2344  bool leafClass,
2345  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2346  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2347  DeepCollectObjCIvars(SuperClass, false, Ivars);
2348  if (!leafClass) {
2349  for (const auto *I : OI->ivars())
2350  Ivars.push_back(I);
2351  } else {
2352  auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2353  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2354  Iv= Iv->getNextIvar())
2355  Ivars.push_back(Iv);
2356  }
2357 }
2358 
2359 /// CollectInheritedProtocols - Collect all protocols in current class and
2360 /// those inherited by it.
2362  llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2363  if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2364  // We can use protocol_iterator here instead of
2365  // all_referenced_protocol_iterator since we are walking all categories.
2366  for (auto *Proto : OI->all_referenced_protocols()) {
2367  CollectInheritedProtocols(Proto, Protocols);
2368  }
2369 
2370  // Categories of this Interface.
2371  for (const auto *Cat : OI->visible_categories())
2372  CollectInheritedProtocols(Cat, Protocols);
2373 
2374  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2375  while (SD) {
2376  CollectInheritedProtocols(SD, Protocols);
2377  SD = SD->getSuperClass();
2378  }
2379  } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2380  for (auto *Proto : OC->protocols()) {
2381  CollectInheritedProtocols(Proto, Protocols);
2382  }
2383  } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2384  // Insert the protocol.
2385  if (!Protocols.insert(
2386  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2387  return;
2388 
2389  for (auto *Proto : OP->protocols())
2390  CollectInheritedProtocols(Proto, Protocols);
2391  }
2392 }
2393 
2395  const RecordDecl *RD) {
2396  assert(RD->isUnion() && "Must be union type");
2397  CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2398 
2399  for (const auto *Field : RD->fields()) {
2400  if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2401  return false;
2402  CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2403  if (FieldSize != UnionSize)
2404  return false;
2405  }
2406  return !RD->field_empty();
2407 }
2408 
2409 static bool isStructEmpty(QualType Ty) {
2410  const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
2411 
2412  if (!RD->field_empty())
2413  return false;
2414 
2415  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
2416  return ClassDecl->isEmpty();
2417 
2418  return true;
2419 }
2420 
2423  const RecordDecl *RD) {
2424  assert(!RD->isUnion() && "Must be struct/class type");
2425  const auto &Layout = Context.getASTRecordLayout(RD);
2426 
2427  int64_t CurOffsetInBits = 0;
2428  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2429  if (ClassDecl->isDynamicClass())
2430  return llvm::None;
2431 
2433  for (const auto Base : ClassDecl->bases()) {
2434  // Empty types can be inherited from, and non-empty types can potentially
2435  // have tail padding, so just make sure there isn't an error.
2436  if (!isStructEmpty(Base.getType())) {
2438  Context, Base.getType()->getAs<RecordType>()->getDecl());
2439  if (!Size)
2440  return llvm::None;
2441  Bases.emplace_back(Base.getType(), Size.getValue());
2442  }
2443  }
2444 
2445  llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L,
2446  const std::pair<QualType, int64_t> &R) {
2447  return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
2448  Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
2449  });
2450 
2451  for (const auto Base : Bases) {
2452  int64_t BaseOffset = Context.toBits(
2453  Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl()));
2454  int64_t BaseSize = Base.second;
2455  if (BaseOffset != CurOffsetInBits)
2456  return llvm::None;
2457  CurOffsetInBits = BaseOffset + BaseSize;
2458  }
2459  }
2460 
2461  for (const auto *Field : RD->fields()) {
2462  if (!Field->getType()->isReferenceType() &&
2463  !Context.hasUniqueObjectRepresentations(Field->getType()))
2464  return llvm::None;
2465 
2466  int64_t FieldSizeInBits =
2467  Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2468  if (Field->isBitField()) {
2469  int64_t BitfieldSize = Field->getBitWidthValue(Context);
2470 
2471  if (BitfieldSize > FieldSizeInBits)
2472  return llvm::None;
2473  FieldSizeInBits = BitfieldSize;
2474  }
2475 
2476  int64_t FieldOffsetInBits = Context.getFieldOffset(Field);
2477 
2478  if (FieldOffsetInBits != CurOffsetInBits)
2479  return llvm::None;
2480 
2481  CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits;
2482  }
2483 
2484  return CurOffsetInBits;
2485 }
2486 
2488  // C++17 [meta.unary.prop]:
2489  // The predicate condition for a template specialization
2490  // has_unique_object_representations<T> shall be
2491  // satisfied if and only if:
2492  // (9.1) - T is trivially copyable, and
2493  // (9.2) - any two objects of type T with the same value have the same
2494  // object representation, where two objects
2495  // of array or non-union class type are considered to have the same value
2496  // if their respective sequences of
2497  // direct subobjects have the same values, and two objects of union type
2498  // are considered to have the same
2499  // value if they have the same active member and the corresponding members
2500  // have the same value.
2501  // The set of scalar types for which this condition holds is
2502  // implementation-defined. [ Note: If a type has padding
2503  // bits, the condition does not hold; otherwise, the condition holds true
2504  // for unsigned integral types. -- end note ]
2505  assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2506 
2507  // Arrays are unique only if their element type is unique.
2508  if (Ty->isArrayType())
2510 
2511  // (9.1) - T is trivially copyable...
2512  if (!Ty.isTriviallyCopyableType(*this))
2513  return false;
2514 
2515  // All integrals and enums are unique.
2516  if (Ty->isIntegralOrEnumerationType())
2517  return true;
2518 
2519  // All other pointers are unique.
2520  if (Ty->isPointerType())
2521  return true;
2522 
2523  if (Ty->isMemberPointerType()) {
2524  const auto *MPT = Ty->getAs<MemberPointerType>();
2525  return !ABI->getMemberPointerInfo(MPT).HasPadding;
2526  }
2527 
2528  if (Ty->isRecordType()) {
2529  const RecordDecl *Record = Ty->getAs<RecordType>()->getDecl();
2530 
2531  if (Record->isInvalidDecl())
2532  return false;
2533 
2534  if (Record->isUnion())
2535  return unionHasUniqueObjectRepresentations(*this, Record);
2536 
2537  Optional<int64_t> StructSize =
2538  structHasUniqueObjectRepresentations(*this, Record);
2539 
2540  return StructSize &&
2541  StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
2542  }
2543 
2544  // FIXME: More cases to handle here (list by rsmith):
2545  // vectors (careful about, eg, vector of 3 foo)
2546  // _Complex int and friends
2547  // _Atomic T
2548  // Obj-C block pointers
2549  // Obj-C object pointers
2550  // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2551  // clk_event_t, queue_t, reserve_id_t)
2552  // There're also Obj-C class types and the Obj-C selector type, but I think it
2553  // makes sense for those to return false here.
2554 
2555  return false;
2556 }
2557 
2559  unsigned count = 0;
2560  // Count ivars declared in class extension.
2561  for (const auto *Ext : OI->known_extensions())
2562  count += Ext->ivar_size();
2563 
2564  // Count ivar defined in this class's implementation. This
2565  // includes synthesized ivars.
2566  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2567  count += ImplDecl->ivar_size();
2568 
2569  return count;
2570 }
2571 
2573  if (!E)
2574  return false;
2575 
2576  // nullptr_t is always treated as null.
2577  if (E->getType()->isNullPtrType()) return true;
2578 
2579  if (E->getType()->isAnyPointerType() &&
2582  return true;
2583 
2584  // Unfortunately, __null has type 'int'.
2585  if (isa<GNUNullExpr>(E)) return true;
2586 
2587  return false;
2588 }
2589 
2590 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2591 /// exists.
2593  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2594  I = ObjCImpls.find(D);
2595  if (I != ObjCImpls.end())
2596  return cast<ObjCImplementationDecl>(I->second);
2597  return nullptr;
2598 }
2599 
2600 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2601 /// exists.
2603  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2604  I = ObjCImpls.find(D);
2605  if (I != ObjCImpls.end())
2606  return cast<ObjCCategoryImplDecl>(I->second);
2607  return nullptr;
2608 }
2609 
2610 /// Set the implementation of ObjCInterfaceDecl.
2612  ObjCImplementationDecl *ImplD) {
2613  assert(IFaceD && ImplD && "Passed null params");
2614  ObjCImpls[IFaceD] = ImplD;
2615 }
2616 
2617 /// Set the implementation of ObjCCategoryDecl.
2619  ObjCCategoryImplDecl *ImplD) {
2620  assert(CatD && ImplD && "Passed null params");
2621  ObjCImpls[CatD] = ImplD;
2622 }
2623 
2624 const ObjCMethodDecl *
2626  return ObjCMethodRedecls.lookup(MD);
2627 }
2628 
2630  const ObjCMethodDecl *Redecl) {
2631  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2632  ObjCMethodRedecls[MD] = Redecl;
2633 }
2634 
2636  const NamedDecl *ND) const {
2637  if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2638  return ID;
2639  if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2640  return CD->getClassInterface();
2641  if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2642  return IMD->getClassInterface();
2643 
2644  return nullptr;
2645 }
2646 
2647 /// Get the copy initialization expression of VarDecl, or nullptr if
2648 /// none exists.
2651  assert(VD && "Passed null params");
2652  assert(VD->hasAttr<BlocksAttr>() &&
2653  "getBlockVarCopyInits - not __block var");
2654  auto I = BlockVarCopyInits.find(VD);
2655  if (I != BlockVarCopyInits.end())
2656  return I->second;
2657  return {nullptr, false};
2658 }
2659 
2660 /// Set the copy initialization expression of a block var decl.
2662  bool CanThrow) {
2663  assert(VD && CopyExpr && "Passed null params");
2664  assert(VD->hasAttr<BlocksAttr>() &&
2665  "setBlockVarCopyInits - not __block var");
2666  BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2667 }
2668 
2670  unsigned DataSize) const {
2671  if (!DataSize)
2672  DataSize = TypeLoc::getFullDataSizeForType(T);
2673  else
2674  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2675  "incorrect data size provided to CreateTypeSourceInfo!");
2676 
2677  auto *TInfo =
2678  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2679  new (TInfo) TypeSourceInfo(T);
2680  return TInfo;
2681 }
2682 
2684  SourceLocation L) const {
2686  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2687  return DI;
2688 }
2689 
2690 const ASTRecordLayout &
2692  return getObjCLayout(D, nullptr);
2693 }
2694 
2695 const ASTRecordLayout &
2697  const ObjCImplementationDecl *D) const {
2698  return getObjCLayout(D->getClassInterface(), D);
2699 }
2700 
2701 //===----------------------------------------------------------------------===//
2702 // Type creation/memoization methods
2703 //===----------------------------------------------------------------------===//
2704 
2705 QualType
2706 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2707  unsigned fastQuals = quals.getFastQualifiers();
2708  quals.removeFastQualifiers();
2709 
2710  // Check if we've already instantiated this type.
2711  llvm::FoldingSetNodeID ID;
2712  ExtQuals::Profile(ID, baseType, quals);
2713  void *insertPos = nullptr;
2714  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2715  assert(eq->getQualifiers() == quals);
2716  return QualType(eq, fastQuals);
2717  }
2718 
2719  // If the base type is not canonical, make the appropriate canonical type.
2720  QualType canon;
2721  if (!baseType->isCanonicalUnqualified()) {
2722  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2723  canonSplit.Quals.addConsistentQualifiers(quals);
2724  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
2725 
2726  // Re-find the insert position.
2727  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
2728  }
2729 
2730  auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
2731  ExtQualNodes.InsertNode(eq, insertPos);
2732  return QualType(eq, fastQuals);
2733 }
2734 
2736  LangAS AddressSpace) const {
2737  QualType CanT = getCanonicalType(T);
2738  if (CanT.getAddressSpace() == AddressSpace)
2739  return T;
2740 
2741  // If we are composing extended qualifiers together, merge together
2742  // into one ExtQuals node.
2743  QualifierCollector Quals;
2744  const Type *TypeNode = Quals.strip(T);
2745 
2746  // If this type already has an address space specified, it cannot get
2747  // another one.
2748  assert(!Quals.hasAddressSpace() &&
2749  "Type cannot be in multiple addr spaces!");
2750  Quals.addAddressSpace(AddressSpace);
2751 
2752  return getExtQualType(TypeNode, Quals);
2753 }
2754 
2756  // If we are composing extended qualifiers together, merge together
2757  // into one ExtQuals node.
2758  QualifierCollector Quals;
2759  const Type *TypeNode = Quals.strip(T);
2760 
2761  // If the qualifier doesn't have an address space just return it.
2762  if (!Quals.hasAddressSpace())
2763  return T;
2764 
2765  Quals.removeAddressSpace();
2766 
2767  // Removal of the address space can mean there are no longer any
2768  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
2769  // or required.
2770  if (Quals.hasNonFastQualifiers())
2771  return getExtQualType(TypeNode, Quals);
2772  else
2773  return QualType(TypeNode, Quals.getFastQualifiers());
2774 }
2775 
2777  Qualifiers::GC GCAttr) const {
2778  QualType CanT = getCanonicalType(T);
2779  if (CanT.getObjCGCAttr() == GCAttr)
2780  return T;
2781 
2782  if (const auto *ptr = T->getAs<PointerType>()) {
2783  QualType Pointee = ptr->getPointeeType();
2784  if (Pointee->isAnyPointerType()) {
2785  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
2786  return getPointerType(ResultType);
2787  }
2788  }
2789 
2790  // If we are composing extended qualifiers together, merge together
2791  // into one ExtQuals node.
2792  QualifierCollector Quals;
2793  const Type *TypeNode = Quals.strip(T);
2794 
2795  // If this type already has an ObjCGC specified, it cannot get
2796  // another one.
2797  assert(!Quals.hasObjCGCAttr() &&
2798  "Type cannot have multiple ObjCGCs!");
2799  Quals.addObjCGCAttr(GCAttr);
2800 
2801  return getExtQualType(TypeNode, Quals);
2802 }
2803 
2805  FunctionType::ExtInfo Info) {
2806  if (T->getExtInfo() == Info)
2807  return T;
2808 
2809  QualType Result;
2810  if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
2811  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
2812  } else {
2813  const auto *FPT = cast<FunctionProtoType>(T);
2814  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2815  EPI.ExtInfo = Info;
2816  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
2817  }
2818 
2819  return cast<FunctionType>(Result.getTypePtr());
2820 }
2821 
2823  QualType ResultType) {
2824  FD = FD->getMostRecentDecl();
2825  while (true) {
2826  const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
2827  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2828  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
2829  if (FunctionDecl *Next = FD->getPreviousDecl())
2830  FD = Next;
2831  else
2832  break;
2833  }
2835  L->DeducedReturnType(FD, ResultType);
2836 }
2837 
2838 /// Get a function type and produce the equivalent function type with the
2839 /// specified exception specification. Type sugar that can be present on a
2840 /// declaration of a function with an exception specification is permitted
2841 /// and preserved. Other type sugar (for instance, typedefs) is not.
2844  // Might have some parens.
2845  if (const auto *PT = dyn_cast<ParenType>(Orig))
2846  return getParenType(
2847  getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
2848 
2849  // Might be wrapped in a macro qualified type.
2850  if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig))
2851  return getMacroQualifiedType(
2852  getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI),
2853  MQT->getMacroIdentifier());
2854 
2855  // Might have a calling-convention attribute.
2856  if (const auto *AT = dyn_cast<AttributedType>(Orig))
2857  return getAttributedType(
2858  AT->getAttrKind(),
2859  getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
2860  getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
2861 
2862  // Anything else must be a function type. Rebuild it with the new exception
2863  // specification.
2864  const auto *Proto = Orig->getAs<FunctionProtoType>();
2865  return getFunctionType(
2866  Proto->getReturnType(), Proto->getParamTypes(),
2867  Proto->getExtProtoInfo().withExceptionSpec(ESI));
2868 }
2869 
2871  QualType U) {
2872  return hasSameType(T, U) ||
2873  (getLangOpts().CPlusPlus17 &&
2876 }
2877 
2880  bool AsWritten) {
2881  // Update the type.
2882  QualType Updated =
2884  FD->setType(Updated);
2885 
2886  if (!AsWritten)
2887  return;
2888 
2889  // Update the type in the type source information too.
2890  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
2891  // If the type and the type-as-written differ, we may need to update
2892  // the type-as-written too.
2893  if (TSInfo->getType() != FD->getType())
2894  Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
2895 
2896  // FIXME: When we get proper type location information for exceptions,
2897  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
2898  // up the TypeSourceInfo;
2899  assert(TypeLoc::getFullDataSizeForType(Updated) ==
2900  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
2901  "TypeLoc size mismatch from updating exception specification");
2902  TSInfo->overrideType(Updated);
2903  }
2904 }
2905 
2906 /// getComplexType - Return the uniqued reference to the type for a complex
2907 /// number with the specified element type.
2909  // Unique pointers, to guarantee there is only one pointer of a particular
2910  // structure.
2911  llvm::FoldingSetNodeID ID;
2912  ComplexType::Profile(ID, T);
2913 
2914  void *InsertPos = nullptr;
2915  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
2916  return QualType(CT, 0);
2917 
2918  // If the pointee type isn't canonical, this won't be a canonical type either,
2919  // so fill in the canonical type field.
2920  QualType Canonical;
2921  if (!T.isCanonical()) {
2922  Canonical = getComplexType(getCanonicalType(T));
2923 
2924  // Get the new insert position for the node we care about.
2925  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
2926  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2927  }
2928  auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
2929  Types.push_back(New);
2930  ComplexTypes.InsertNode(New, InsertPos);
2931  return QualType(New, 0);
2932 }
2933 
2934 /// getPointerType - Return the uniqued reference to the type for a pointer to
2935 /// the specified type.
2937  // Unique pointers, to guarantee there is only one pointer of a particular
2938  // structure.
2939  llvm::FoldingSetNodeID ID;
2940  PointerType::Profile(ID, T);
2941 
2942  void *InsertPos = nullptr;
2943  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2944  return QualType(PT, 0);
2945 
2946  // If the pointee type isn't canonical, this won't be a canonical type either,
2947  // so fill in the canonical type field.
2948  QualType Canonical;
2949  if (!T.isCanonical()) {
2950  Canonical = getPointerType(getCanonicalType(T));
2951 
2952  // Get the new insert position for the node we care about.
2953  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2954  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2955  }
2956  auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
2957  Types.push_back(New);
2958  PointerTypes.InsertNode(New, InsertPos);
2959  return QualType(New, 0);
2960 }
2961 
2963  llvm::FoldingSetNodeID ID;
2964  AdjustedType::Profile(ID, Orig, New);
2965  void *InsertPos = nullptr;
2966  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2967  if (AT)
2968  return QualType(AT, 0);
2969 
2970  QualType Canonical = getCanonicalType(New);
2971 
2972  // Get the new insert position for the node we care about.
2973  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2974  assert(!AT && "Shouldn't be in the map!");
2975 
2976  AT = new (*this, TypeAlignment)
2977  AdjustedType(Type::Adjusted, Orig, New, Canonical);
2978  Types.push_back(AT);
2979  AdjustedTypes.InsertNode(AT, InsertPos);
2980  return QualType(AT, 0);
2981 }
2982 
2984  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
2985 
2986  QualType Decayed;
2987 
2988  // C99 6.7.5.3p7:
2989  // A declaration of a parameter as "array of type" shall be
2990  // adjusted to "qualified pointer to type", where the type
2991  // qualifiers (if any) are those specified within the [ and ] of
2992  // the array type derivation.
2993  if (T->isArrayType())
2994  Decayed = getArrayDecayedType(T);
2995 
2996  // C99 6.7.5.3p8:
2997  // A declaration of a parameter as "function returning type"
2998  // shall be adjusted to "pointer to function returning type", as
2999  // in 6.3.2.1.
3000  if (T->isFunctionType())
3001  Decayed = getPointerType(T);
3002 
3003  llvm::FoldingSetNodeID ID;
3004  AdjustedType::Profile(ID, T, Decayed);
3005  void *InsertPos = nullptr;
3006  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3007  if (AT)
3008  return QualType(AT, 0);
3009 
3010  QualType Canonical = getCanonicalType(Decayed);
3011 
3012  // Get the new insert position for the node we care about.
3013  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3014  assert(!AT && "Shouldn't be in the map!");
3015 
3016  AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
3017  Types.push_back(AT);
3018  AdjustedTypes.InsertNode(AT, InsertPos);
3019  return QualType(AT, 0);
3020 }
3021 
3022 /// getBlockPointerType - Return the uniqued reference to the type for
3023 /// a pointer to the specified block.
3025  assert(T->isFunctionType() && "block of function types only");
3026  // Unique pointers, to guarantee there is only one block of a particular
3027  // structure.
3028  llvm::FoldingSetNodeID ID;
3030 
3031  void *InsertPos = nullptr;
3032  if (BlockPointerType *PT =
3033  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3034  return QualType(PT, 0);
3035 
3036  // If the block pointee type isn't canonical, this won't be a canonical
3037  // type either so fill in the canonical type field.
3038  QualType Canonical;
3039  if (!T.isCanonical()) {
3040  Canonical = getBlockPointerType(getCanonicalType(T));
3041 
3042  // Get the new insert position for the node we care about.
3043  BlockPointerType *NewIP =
3044  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3045  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3046  }
3047  auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
3048  Types.push_back(New);
3049  BlockPointerTypes.InsertNode(New, InsertPos);
3050  return QualType(New, 0);
3051 }
3052 
3053 /// getLValueReferenceType - Return the uniqued reference to the type for an
3054 /// lvalue reference to the specified type.
3055 QualType
3056 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3057  assert(getCanonicalType(T) != OverloadTy &&
3058  "Unresolved overloaded function type");
3059 
3060  // Unique pointers, to guarantee there is only one pointer of a particular
3061  // structure.
3062  llvm::FoldingSetNodeID ID;
3063  ReferenceType::Profile(ID, T, SpelledAsLValue);
3064 
3065  void *InsertPos = nullptr;
3066  if (LValueReferenceType *RT =
3067  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3068  return QualType(RT, 0);
3069 
3070  const auto *InnerRef = T->getAs<ReferenceType>();
3071 
3072  // If the referencee type isn't canonical, this won't be a canonical type
3073  // either, so fill in the canonical type field.
3074  QualType Canonical;
3075  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
3076  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3077  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
3078 
3079  // Get the new insert position for the node we care about.
3080  LValueReferenceType *NewIP =
3081  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3082  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3083  }
3084 
3085  auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
3086  SpelledAsLValue);
3087  Types.push_back(New);
3088  LValueReferenceTypes.InsertNode(New, InsertPos);
3089 
3090  return QualType(New, 0);
3091 }
3092 
3093 /// getRValueReferenceType - Return the uniqued reference to the type for an
3094 /// rvalue reference to the specified type.
3096  // Unique pointers, to guarantee there is only one pointer of a particular
3097  // structure.
3098  llvm::FoldingSetNodeID ID;
3099  ReferenceType::Profile(ID, T, false);
3100 
3101  void *InsertPos = nullptr;
3102  if (RValueReferenceType *RT =
3103  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3104  return QualType(RT, 0);
3105 
3106  const auto *InnerRef = T->getAs<ReferenceType>();
3107 
3108  // If the referencee type isn't canonical, this won't be a canonical type
3109  // either, so fill in the canonical type field.
3110  QualType Canonical;
3111  if (InnerRef || !T.isCanonical()) {
3112  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3113  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3114 
3115  // Get the new insert position for the node we care about.
3116  RValueReferenceType *NewIP =
3117  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3118  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3119  }
3120 
3121  auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
3122  Types.push_back(New);
3123  RValueReferenceTypes.InsertNode(New, InsertPos);
3124  return QualType(New, 0);
3125 }
3126 
3127 /// getMemberPointerType - Return the uniqued reference to the type for a
3128 /// member pointer to the specified type, in the specified class.
3130  // Unique pointers, to guarantee there is only one pointer of a particular
3131  // structure.
3132  llvm::FoldingSetNodeID ID;
3133  MemberPointerType::Profile(ID, T, Cls);
3134 
3135  void *InsertPos = nullptr;
3136  if (MemberPointerType *PT =
3137  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3138  return QualType(PT, 0);
3139 
3140  // If the pointee or class type isn't canonical, this won't be a canonical
3141  // type either, so fill in the canonical type field.
3142  QualType Canonical;
3143  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3145 
3146  // Get the new insert position for the node we care about.
3147  MemberPointerType *NewIP =
3148  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3149  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3150  }
3151  auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
3152  Types.push_back(New);
3153  MemberPointerTypes.InsertNode(New, InsertPos);
3154  return QualType(New, 0);
3155 }
3156 
3157 /// getConstantArrayType - Return the unique reference to the type for an
3158 /// array of the specified element type.
3160  const llvm::APInt &ArySizeIn,
3162  unsigned IndexTypeQuals) const {
3163  assert((EltTy->isDependentType() ||
3164  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3165  "Constant array of VLAs is illegal!");
3166 
3167  // Convert the array size into a canonical width matching the pointer size for
3168  // the target.
3169  llvm::APInt ArySize(ArySizeIn);
3170  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3171 
3172  llvm::FoldingSetNodeID ID;
3173  ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
3174 
3175  void *InsertPos = nullptr;
3176  if (ConstantArrayType *ATP =
3177  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3178  return QualType(ATP, 0);
3179 
3180  // If the element type isn't canonical or has qualifiers, this won't
3181  // be a canonical type either, so fill in the canonical type field.
3182  QualType Canon;
3183  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3184  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3185  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
3186  ASM, IndexTypeQuals);
3187  Canon = getQualifiedType(Canon, canonSplit.Quals);
3188 
3189  // Get the new insert position for the node we care about.
3190  ConstantArrayType *NewIP =
3191  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3192  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3193  }
3194 
3195  auto *New = new (*this,TypeAlignment)
3196  ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
3197  ConstantArrayTypes.InsertNode(New, InsertPos);
3198  Types.push_back(New);
3199  return QualType(New, 0);
3200 }
3201 
3202 /// getVariableArrayDecayedType - Turns the given type, which may be
3203 /// variably-modified, into the corresponding type with all the known
3204 /// sizes replaced with [*].
3206  // Vastly most common case.
3207  if (!type->isVariablyModifiedType()) return type;
3208 
3209  QualType result;
3210 
3211  SplitQualType split = type.getSplitDesugaredType();
3212  const Type *ty = split.Ty;
3213  switch (ty->getTypeClass()) {
3214 #define TYPE(Class, Base)
3215 #define ABSTRACT_TYPE(Class, Base)
3216 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3217 #include "clang/AST/TypeNodes.def"
3218  llvm_unreachable("didn't desugar past all non-canonical types?");
3219 
3220  // These types should never be variably-modified.
3221  case Type::Builtin:
3222  case Type::Complex:
3223  case Type::Vector:
3224  case Type::DependentVector:
3225  case Type::ExtVector:
3226  case Type::DependentSizedExtVector:
3227  case Type::DependentAddressSpace:
3228  case Type::ObjCObject:
3229  case Type::ObjCInterface:
3230  case Type::ObjCObjectPointer:
3231  case Type::Record:
3232  case Type::Enum:
3233  case Type::UnresolvedUsing:
3234  case Type::TypeOfExpr:
3235  case Type::TypeOf:
3236  case Type::Decltype:
3237  case Type::UnaryTransform:
3238  case Type::DependentName:
3239  case Type::InjectedClassName:
3240  case Type::TemplateSpecialization:
3241  case Type::DependentTemplateSpecialization:
3242  case Type::TemplateTypeParm:
3243  case Type::SubstTemplateTypeParmPack:
3244  case Type::Auto:
3245  case Type::DeducedTemplateSpecialization:
3246  case Type::PackExpansion:
3247  llvm_unreachable("type should never be variably-modified");
3248 
3249  // These types can be variably-modified but should never need to
3250  // further decay.
3251  case Type::FunctionNoProto:
3252  case Type::FunctionProto:
3253  case Type::BlockPointer:
3254  case Type::MemberPointer:
3255  case Type::Pipe:
3256  return type;
3257 
3258  // These types can be variably-modified. All these modifications
3259  // preserve structure except as noted by comments.
3260  // TODO: if we ever care about optimizing VLAs, there are no-op
3261  // optimizations available here.
3262  case Type::Pointer:
3264  cast<PointerType>(ty)->getPointeeType()));
3265  break;
3266 
3267  case Type::LValueReference: {
3268  const auto *lv = cast<LValueReferenceType>(ty);
3269  result = getLValueReferenceType(
3270  getVariableArrayDecayedType(lv->getPointeeType()),
3271  lv->isSpelledAsLValue());
3272  break;
3273  }
3274 
3275  case Type::RValueReference: {
3276  const auto *lv = cast<RValueReferenceType>(ty);
3277  result = getRValueReferenceType(
3278  getVariableArrayDecayedType(lv->getPointeeType()));
3279  break;
3280  }
3281 
3282  case Type::Atomic: {
3283  const auto *at = cast<AtomicType>(ty);
3284  result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3285  break;
3286  }
3287 
3288  case Type::ConstantArray: {
3289  const auto *cat = cast<ConstantArrayType>(ty);
3290  result = getConstantArrayType(
3291  getVariableArrayDecayedType(cat->getElementType()),
3292  cat->getSize(),
3293  cat->getSizeModifier(),
3294  cat->getIndexTypeCVRQualifiers());
3295  break;
3296  }
3297 
3298  case Type::DependentSizedArray: {
3299  const auto *dat = cast<DependentSizedArrayType>(ty);
3300  result = getDependentSizedArrayType(
3301  getVariableArrayDecayedType(dat->getElementType()),
3302  dat->getSizeExpr(),
3303  dat->getSizeModifier(),
3304  dat->getIndexTypeCVRQualifiers(),
3305  dat->getBracketsRange());
3306  break;
3307  }
3308 
3309  // Turn incomplete types into [*] types.
3310  case Type::IncompleteArray: {
3311  const auto *iat = cast<IncompleteArrayType>(ty);
3312  result = getVariableArrayType(
3313  getVariableArrayDecayedType(iat->getElementType()),
3314  /*size*/ nullptr,
3316  iat->getIndexTypeCVRQualifiers(),
3317  SourceRange());
3318  break;
3319  }
3320 
3321  // Turn VLA types into [*] types.
3322  case Type::VariableArray: {
3323  const auto *vat = cast<VariableArrayType>(ty);
3324  result = getVariableArrayType(
3325  getVariableArrayDecayedType(vat->getElementType()),
3326  /*size*/ nullptr,
3328  vat->getIndexTypeCVRQualifiers(),
3329  vat->getBracketsRange());
3330  break;
3331  }
3332  }
3333 
3334  // Apply the top-level qualifiers from the original.
3335  return getQualifiedType(result, split.Quals);
3336 }
3337 
3338 /// getVariableArrayType - Returns a non-unique reference to the type for a
3339 /// variable array of the specified element type.
3341  Expr *NumElts,
3343  unsigned IndexTypeQuals,
3344  SourceRange Brackets) const {
3345  // Since we don't unique expressions, it isn't possible to unique VLA's
3346  // that have an expression provided for their size.
3347  QualType Canon;
3348 
3349  // Be sure to pull qualifiers off the element type.
3350  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3351  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3352  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3353  IndexTypeQuals, Brackets);
3354  Canon = getQualifiedType(Canon, canonSplit.Quals);
3355  }
3356 
3357  auto *New = new (*this, TypeAlignment)
3358  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3359 
3360  VariableArrayTypes.push_back(New);
3361  Types.push_back(New);
3362  return QualType(New, 0);
3363 }
3364 
3365 /// getDependentSizedArrayType - Returns a non-unique reference to
3366 /// the type for a dependently-sized array of the specified element
3367 /// type.
3369  Expr *numElements,
3371  unsigned elementTypeQuals,
3372  SourceRange brackets) const {
3373  assert((!numElements || numElements->isTypeDependent() ||
3374  numElements->isValueDependent()) &&
3375  "Size must be type- or value-dependent!");
3376 
3377  // Dependently-sized array types that do not have a specified number
3378  // of elements will have their sizes deduced from a dependent
3379  // initializer. We do no canonicalization here at all, which is okay
3380  // because they can't be used in most locations.
3381  if (!numElements) {
3382  auto *newType
3383  = new (*this, TypeAlignment)
3384  DependentSizedArrayType(*this, elementType, QualType(),
3385  numElements, ASM, elementTypeQuals,
3386  brackets);
3387  Types.push_back(newType);
3388  return QualType(newType, 0);
3389  }
3390 
3391  // Otherwise, we actually build a new type every time, but we
3392  // also build a canonical type.
3393 
3394  SplitQualType canonElementType = getCanonicalType(elementType).split();
3395 
3396  void *insertPos = nullptr;
3397  llvm::FoldingSetNodeID ID;
3399  QualType(canonElementType.Ty, 0),
3400  ASM, elementTypeQuals, numElements);
3401 
3402  // Look for an existing type with these properties.
3403  DependentSizedArrayType *canonTy =
3404  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3405 
3406  // If we don't have one, build one.
3407  if (!canonTy) {
3408  canonTy = new (*this, TypeAlignment)
3409  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3410  QualType(), numElements, ASM, elementTypeQuals,
3411  brackets);
3412  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3413  Types.push_back(canonTy);
3414  }
3415 
3416  // Apply qualifiers from the element type to the array.
3417  QualType canon = getQualifiedType(QualType(canonTy,0),
3418  canonElementType.Quals);
3419 
3420  // If we didn't need extra canonicalization for the element type or the size
3421  // expression, then just use that as our result.
3422  if (QualType(canonElementType.Ty, 0) == elementType &&
3423  canonTy->getSizeExpr() == numElements)
3424  return canon;
3425 
3426  // Otherwise, we need to build a type which follows the spelling
3427  // of the element type.
3428  auto *sugaredType
3429  = new (*this, TypeAlignment)
3430  DependentSizedArrayType(*this, elementType, canon, numElements,
3431  ASM, elementTypeQuals, brackets);
3432  Types.push_back(sugaredType);
3433  return QualType(sugaredType, 0);
3434 }
3435 
3438  unsigned elementTypeQuals) const {
3439  llvm::FoldingSetNodeID ID;
3440  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3441 
3442  void *insertPos = nullptr;
3443  if (IncompleteArrayType *iat =
3444  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3445  return QualType(iat, 0);
3446 
3447  // If the element type isn't canonical, this won't be a canonical type
3448  // either, so fill in the canonical type field. We also have to pull
3449  // qualifiers off the element type.
3450  QualType canon;
3451 
3452  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3453  SplitQualType canonSplit = getCanonicalType(elementType).split();
3454  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3455  ASM, elementTypeQuals);
3456  canon = getQualifiedType(canon, canonSplit.Quals);
3457 
3458  // Get the new insert position for the node we care about.
3459  IncompleteArrayType *existing =
3460  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3461  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3462  }
3463 
3464  auto *newType = new (*this, TypeAlignment)
3465  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3466 
3467  IncompleteArrayTypes.InsertNode(newType, insertPos);
3468  Types.push_back(newType);
3469  return QualType(newType, 0);
3470 }
3471 
3472 /// getVectorType - Return the unique reference to a vector type of
3473 /// the specified element type and size. VectorType must be a built-in type.
3474 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
3475  VectorType::VectorKind VecKind) const {
3476  assert(vecType->isBuiltinType());
3477 
3478  // Check if we've already instantiated a vector of this type.
3479  llvm::FoldingSetNodeID ID;
3480  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
3481 
3482  void *InsertPos = nullptr;
3483  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3484  return QualType(VTP, 0);
3485 
3486  // If the element type isn't canonical, this won't be a canonical type either,
3487  // so fill in the canonical type field.
3488  QualType Canonical;
3489  if (!vecType.isCanonical()) {
3490  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
3491 
3492  // Get the new insert position for the node we care about.
3493  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3494  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3495  }
3496  auto *New = new (*this, TypeAlignment)
3497  VectorType(vecType, NumElts, Canonical, VecKind);
3498  VectorTypes.InsertNode(New, InsertPos);
3499  Types.push_back(New);
3500  return QualType(New, 0);
3501 }
3502 
3503 QualType
3505  SourceLocation AttrLoc,
3506  VectorType::VectorKind VecKind) const {
3507  llvm::FoldingSetNodeID ID;
3508  DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
3509  VecKind);
3510  void *InsertPos = nullptr;
3511  DependentVectorType *Canon =
3512  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3513  DependentVectorType *New;
3514 
3515  if (Canon) {
3516  New = new (*this, TypeAlignment) DependentVectorType(
3517  *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
3518  } else {
3519  QualType CanonVecTy = getCanonicalType(VecType);
3520  if (CanonVecTy == VecType) {
3521  New = new (*this, TypeAlignment) DependentVectorType(
3522  *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
3523 
3524  DependentVectorType *CanonCheck =
3525  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3526  assert(!CanonCheck &&
3527  "Dependent-sized vector_size canonical type broken");
3528  (void)CanonCheck;
3529  DependentVectorTypes.InsertNode(New, InsertPos);
3530  } else {
3531  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3532  SourceLocation());
3533  New = new (*this, TypeAlignment) DependentVectorType(
3534  *this, VecType, Canon, SizeExpr, AttrLoc, VecKind);
3535  }
3536  }
3537 
3538  Types.push_back(New);
3539  return QualType(New, 0);
3540 }
3541 
3542 /// getExtVectorType - Return the unique reference to an extended vector type of
3543 /// the specified element type and size. VectorType must be a built-in type.
3544 QualType
3545 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
3546  assert(vecType->isBuiltinType() || vecType->isDependentType());
3547 
3548  // Check if we've already instantiated a vector of this type.
3549  llvm::FoldingSetNodeID ID;
3550  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
3552  void *InsertPos = nullptr;
3553  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3554  return QualType(VTP, 0);
3555 
3556  // If the element type isn't canonical, this won't be a canonical type either,
3557  // so fill in the canonical type field.
3558  QualType Canonical;
3559  if (!vecType.isCanonical()) {
3560  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
3561 
3562  // Get the new insert position for the node we care about.
3563  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3564  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3565  }
3566  auto *New = new (*this, TypeAlignment)
3567  ExtVectorType(vecType, NumElts, Canonical);
3568  VectorTypes.InsertNode(New, InsertPos);
3569  Types.push_back(New);
3570  return QualType(New, 0);
3571 }
3572 
3573 QualType
3575  Expr *SizeExpr,
3576  SourceLocation AttrLoc) const {
3577  llvm::FoldingSetNodeID ID;
3579  SizeExpr);
3580 
3581  void *InsertPos = nullptr;
3583  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3585  if (Canon) {
3586  // We already have a canonical version of this array type; use it as
3587  // the canonical type for a newly-built type.
3588  New = new (*this, TypeAlignment)
3589  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
3590  SizeExpr, AttrLoc);
3591  } else {
3592  QualType CanonVecTy = getCanonicalType(vecType);
3593  if (CanonVecTy == vecType) {
3594  New = new (*this, TypeAlignment)
3595  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
3596  AttrLoc);
3597 
3598  DependentSizedExtVectorType *CanonCheck
3599  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3600  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
3601  (void)CanonCheck;
3602  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
3603  } else {
3604  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3605  SourceLocation());
3606  New = new (*this, TypeAlignment)
3607  DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
3608  }
3609  }
3610 
3611  Types.push_back(New);
3612  return QualType(New, 0);
3613 }
3614 
3616  Expr *AddrSpaceExpr,
3617  SourceLocation AttrLoc) const {
3618  assert(AddrSpaceExpr->isInstantiationDependent());
3619 
3620  QualType canonPointeeType = getCanonicalType(PointeeType);
3621 
3622  void *insertPos = nullptr;
3623  llvm::FoldingSetNodeID ID;
3624  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
3625  AddrSpaceExpr);
3626 
3627  DependentAddressSpaceType *canonTy =
3628  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
3629 
3630  if (!canonTy) {
3631  canonTy = new (*this, TypeAlignment)
3632  DependentAddressSpaceType(*this, canonPointeeType,
3633  QualType(), AddrSpaceExpr, AttrLoc);
3634  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
3635  Types.push_back(canonTy);
3636  }
3637 
3638  if (canonPointeeType == PointeeType &&
3639  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
3640  return QualType(canonTy, 0);
3641 
3642  auto *sugaredType
3643  = new (*this, TypeAlignment)
3644  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
3645  AddrSpaceExpr, AttrLoc);
3646  Types.push_back(sugaredType);
3647  return QualType(sugaredType, 0);
3648 }
3649 
3650 /// Determine whether \p T is canonical as the result type of a function.
3652  return T.isCanonical() &&
3655 }
3656 
3657 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
3658 QualType
3660  const FunctionType::ExtInfo &Info) const {
3661  // Unique functions, to guarantee there is only one function of a particular
3662  // structure.
3663  llvm::FoldingSetNodeID ID;
3664  FunctionNoProtoType::Profile(ID, ResultTy, Info);
3665 
3666  void *InsertPos = nullptr;
3667  if (FunctionNoProtoType *FT =
3668  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
3669  return QualType(FT, 0);
3670 
3671  QualType Canonical;
3672  if (!isCanonicalResultType(ResultTy)) {
3673  Canonical =
3675 
3676  // Get the new insert position for the node we care about.
3677  FunctionNoProtoType *NewIP =
3678  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3679  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3680  }
3681 
3682  auto *New = new (*this, TypeAlignment)
3683  FunctionNoProtoType(ResultTy, Canonical, Info);
3684  Types.push_back(New);
3685  FunctionNoProtoTypes.InsertNode(New, InsertPos);
3686  return QualType(New, 0);
3687 }
3688 
3691  CanQualType CanResultType = getCanonicalType(ResultType);
3692 
3693  // Canonical result types do not have ARC lifetime qualifiers.
3694  if (CanResultType.getQualifiers().hasObjCLifetime()) {
3695  Qualifiers Qs = CanResultType.getQualifiers();
3696  Qs.removeObjCLifetime();
3698  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
3699  }
3700 
3701  return CanResultType;
3702 }
3703 
3705  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
3706  if (ESI.Type == EST_None)
3707  return true;
3708  if (!NoexceptInType)
3709  return false;
3710 
3711  // C++17 onwards: exception specification is part of the type, as a simple
3712  // boolean "can this function type throw".
3713  if (ESI.Type == EST_BasicNoexcept)
3714  return true;
3715 
3716  // A noexcept(expr) specification is (possibly) canonical if expr is
3717  // value-dependent.
3718  if (ESI.Type == EST_DependentNoexcept)
3719  return true;
3720 
3721  // A dynamic exception specification is canonical if it only contains pack
3722  // expansions (so we can't tell whether it's non-throwing) and all its
3723  // contained types are canonical.
3724  if (ESI.Type == EST_Dynamic) {
3725  bool AnyPackExpansions = false;
3726  for (QualType ET : ESI.Exceptions) {
3727  if (!ET.isCanonical())
3728  return false;
3729  if (ET->getAs<PackExpansionType>())
3730  AnyPackExpansions = true;
3731  }
3732  return AnyPackExpansions;
3733  }
3734 
3735  return false;
3736 }
3737 
3738 QualType ASTContext::getFunctionTypeInternal(
3739  QualType ResultTy, ArrayRef<QualType> ArgArray,
3740  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
3741  size_t NumArgs = ArgArray.size();
3742 
3743  // Unique functions, to guarantee there is only one function of a particular
3744  // structure.
3745  llvm::FoldingSetNodeID ID;
3746  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
3747  *this, true);
3748 
3749  QualType Canonical;
3750  bool Unique = false;
3751 
3752  void *InsertPos = nullptr;
3753  if (FunctionProtoType *FPT =
3754  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
3755  QualType Existing = QualType(FPT, 0);
3756 
3757  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
3758  // it so long as our exception specification doesn't contain a dependent
3759  // noexcept expression, or we're just looking for a canonical type.
3760  // Otherwise, we're going to need to create a type
3761  // sugar node to hold the concrete expression.
3762  if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
3763  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
3764  return Existing;
3765 
3766  // We need a new type sugar node for this one, to hold the new noexcept
3767  // expression. We do no canonicalization here, but that's OK since we don't
3768  // expect to see the same noexcept expression much more than once.
3769  Canonical = getCanonicalType(Existing);
3770  Unique = true;
3771  }
3772 
3773  bool NoexceptInType = getLangOpts().CPlusPlus17;
3774  bool IsCanonicalExceptionSpec =
3775  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
3776 
3777  // Determine whether the type being created is already canonical or not.
3778  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
3779  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
3780  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
3781  if (!ArgArray[i].isCanonicalAsParam())
3782  isCanonical = false;
3783 
3784  if (OnlyWantCanonical)
3785  assert(isCanonical &&
3786  "given non-canonical parameters constructing canonical type");
3787 
3788  // If this type isn't canonical, get the canonical version of it if we don't
3789  // already have it. The exception spec is only partially part of the
3790  // canonical type, and only in C++17 onwards.
3791  if (!isCanonical && Canonical.isNull()) {
3792  SmallVector<QualType, 16> CanonicalArgs;
3793  CanonicalArgs.reserve(NumArgs);
3794  for (unsigned i = 0; i != NumArgs; ++i)
3795  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
3796 
3797  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
3798  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
3799  CanonicalEPI.HasTrailingReturn = false;
3800 
3801  if (IsCanonicalExceptionSpec) {
3802  // Exception spec is already OK.
3803  } else if (NoexceptInType) {
3804  switch (EPI.ExceptionSpec.Type) {
3806  // We don't know yet. It shouldn't matter what we pick here; no-one
3807  // should ever look at this.
3808  LLVM_FALLTHROUGH;
3809  case EST_None: case EST_MSAny: case EST_NoexceptFalse:
3810  CanonicalEPI.ExceptionSpec.Type = EST_None;
3811  break;
3812 
3813  // A dynamic exception specification is almost always "not noexcept",
3814  // with the exception that a pack expansion might expand to no types.
3815  case EST_Dynamic: {
3816  bool AnyPacks = false;
3817  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
3818  if (ET->getAs<PackExpansionType>())
3819  AnyPacks = true;
3820  ExceptionTypeStorage.push_back(getCanonicalType(ET));
3821  }
3822  if (!AnyPacks)
3823  CanonicalEPI.ExceptionSpec.Type = EST_None;
3824  else {
3825  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
3826  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
3827  }
3828  break;
3829  }
3830 
3831  case EST_DynamicNone:
3832  case EST_BasicNoexcept:
3833  case EST_NoexceptTrue:
3834  case EST_NoThrow:
3835  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
3836  break;
3837 
3838  case EST_DependentNoexcept:
3839  llvm_unreachable("dependent noexcept is already canonical");
3840  }
3841  } else {
3843  }
3844 
3845  // Adjust the canonical function result type.
3846  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
3847  Canonical =
3848  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
3849 
3850  // Get the new insert position for the node we care about.
3851  FunctionProtoType *NewIP =
3852  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3853  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3854  }
3855 
3856  // Compute the needed size to hold this FunctionProtoType and the
3857  // various trailing objects.
3858  auto ESH = FunctionProtoType::getExceptionSpecSize(
3859  EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
3860  size_t Size = FunctionProtoType::totalSizeToAlloc<
3863  FunctionProtoType::ExtParameterInfo, Qualifiers>(
3864  NumArgs, FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
3865  ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
3866  EPI.ExtParameterInfos ? NumArgs : 0,
3867  EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
3868 
3869  auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
3870  FunctionProtoType::ExtProtoInfo newEPI = EPI;
3871  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
3872  Types.push_back(FTP);
3873  if (!Unique)
3874  FunctionProtoTypes.InsertNode(FTP, InsertPos);
3875  return QualType(FTP, 0);
3876 }
3877 
3878 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
3879  llvm::FoldingSetNodeID ID;
3880  PipeType::Profile(ID, T, ReadOnly);
3881 
3882  void *InsertPos = nullptr;
3883  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
3884  return QualType(PT, 0);
3885 
3886  // If the pipe element type isn't canonical, this won't be a canonical type
3887  // either, so fill in the canonical type field.
3888  QualType Canonical;
3889  if (!T.isCanonical()) {
3890  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
3891 
3892  // Get the new insert position for the node we care about.
3893  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
3894  assert(!NewIP && "Shouldn't be in the map!");
3895  (void)NewIP;
3896  }
3897  auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
3898  Types.push_back(New);
3899  PipeTypes.InsertNode(New, InsertPos);
3900  return QualType(New, 0);
3901 }
3902 
3904  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
3905  return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
3906  : Ty;
3907 }
3908 
3910  return getPipeType(T, true);
3911 }
3912 
3914  return getPipeType(T, false);
3915 }
3916 
3917 #ifndef NDEBUG
3919  if (!isa<CXXRecordDecl>(D)) return false;
3920  const auto *RD = cast<CXXRecordDecl>(D);
3921  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
3922  return true;
3923  if (RD->getDescribedClassTemplate() &&
3924  !isa<ClassTemplateSpecializationDecl>(RD))
3925  return true;
3926  return false;
3927 }
3928 #endif
3929 
3930 /// getInjectedClassNameType - Return the unique reference to the
3931 /// injected class name type for the specified templated declaration.
3933  QualType TST) const {
3934  assert(NeedsInjectedClassNameType(Decl));
3935  if (Decl->TypeForDecl) {
3936  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3937  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
3938  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
3939  Decl->TypeForDecl = PrevDecl->TypeForDecl;
3940  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3941  } else {
3942  Type *newType =
3943  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
3944  Decl->TypeForDecl = newType;
3945  Types.push_back(newType);
3946  }
3947  return QualType(Decl->TypeForDecl, 0);
3948 }
3949 
3950 /// getTypeDeclType - Return the unique reference to the type for the
3951 /// specified type declaration.
3952 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
3953  assert(Decl && "Passed null for Decl param");
3954  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
3955 
3956  if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
3957  return getTypedefType(Typedef);
3958 
3959  assert(!isa<TemplateTypeParmDecl>(Decl) &&
3960  "Template type parameter types are always available.");
3961 
3962  if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
3963  assert(Record->isFirstDecl() && "struct/union has previous declaration");
3964  assert(!NeedsInjectedClassNameType(Record));
3965  return getRecordType(Record);
3966  } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
3967  assert(Enum->isFirstDecl() && "enum has previous declaration");
3968  return getEnumType(Enum);
3969  } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
3970  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
3971  Decl->TypeForDecl = newType;
3972  Types.push_back(newType);
3973  } else
3974  llvm_unreachable("TypeDecl without a type?");
3975 
3976  return QualType(Decl->TypeForDecl, 0);
3977 }
3978 
3979 /// getTypedefType - Return the unique reference to the type for the
3980 /// specified typedef name decl.
3981 QualType
3983  QualType Canonical) const {
3984  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3985 
3986  if (Canonical.isNull())
3987  Canonical = getCanonicalType(Decl->getUnderlyingType());
3988  auto *newType = new (*this, TypeAlignment)
3989  TypedefType(Type::Typedef, Decl, Canonical);
3990  Decl->TypeForDecl = newType;
3991  Types.push_back(newType);
3992  return QualType(newType, 0);
3993 }
3994 
3996  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
3997 
3998  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
3999  if (PrevDecl->TypeForDecl)
4000  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4001 
4002  auto *newType = new (*this, TypeAlignment) RecordType(Decl);
4003  Decl->TypeForDecl = newType;
4004  Types.push_back(newType);
4005  return QualType(newType, 0);
4006 }
4007 
4009  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4010 
4011  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
4012  if (PrevDecl->TypeForDecl)
4013  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4014 
4015  auto *newType = new (*this, TypeAlignment) EnumType(Decl);
4016  Decl->TypeForDecl = newType;
4017  Types.push_back(newType);
4018  return QualType(newType, 0);
4019 }
4020 
4022  QualType modifiedType,
4023  QualType equivalentType) {
4024  llvm::FoldingSetNodeID id;
4025  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
4026 
4027  void *insertPos = nullptr;
4028  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
4029  if (type) return QualType(type, 0);
4030 
4031  QualType canon = getCanonicalType(equivalentType);
4032  type = new (*this, TypeAlignment)
4033  AttributedType(canon, attrKind, modifiedType, equivalentType);
4034 
4035  Types.push_back(type);
4036  AttributedTypes.InsertNode(type, insertPos);
4037 
4038  return QualType(type, 0);
4039 }
4040 
4041 /// Retrieve a substitution-result type.
4042 QualType
4044  QualType Replacement) const {
4045  assert(Replacement.isCanonical()
4046  && "replacement types must always be canonical");
4047 
4048  llvm::FoldingSetNodeID ID;
4049  SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
4050  void *InsertPos = nullptr;
4051  SubstTemplateTypeParmType *SubstParm
4052  = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4053 
4054  if (!SubstParm) {
4055  SubstParm = new (*this, TypeAlignment)
4056  SubstTemplateTypeParmType(Parm, Replacement);
4057  Types.push_back(SubstParm);
4058  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
4059  }
4060 
4061  return QualType(SubstParm, 0);
4062 }
4063 
4064 /// Retrieve a
4066  const TemplateTypeParmType *Parm,
4067  const TemplateArgument &ArgPack) {
4068 #ifndef NDEBUG
4069  for (const auto &P : ArgPack.pack_elements()) {
4070  assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
4071  assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
4072  }
4073 #endif
4074 
4075  llvm::FoldingSetNodeID ID;
4076  SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
4077  void *InsertPos = nullptr;
4078  if (SubstTemplateTypeParmPackType *SubstParm
4079  = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
4080  return QualType(SubstParm, 0);
4081 
4082  QualType Canon;
4083  if (!Parm->isCanonicalUnqualified()) {
4084  Canon = getCanonicalType(QualType(Parm, 0));
4085  Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
4086  ArgPack);
4087  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
4088  }
4089 
4090  auto *SubstParm
4091  = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
4092  ArgPack);
4093  Types.push_back(SubstParm);
4094  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
4095  return QualType(SubstParm, 0);
4096 }
4097 
4098 /// Retrieve the template type parameter type for a template
4099 /// parameter or parameter pack with the given depth, index, and (optionally)
4100 /// name.
4102  bool ParameterPack,
4103  TemplateTypeParmDecl *TTPDecl) const {
4104  llvm::FoldingSetNodeID ID;
4105  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4106  void *InsertPos = nullptr;
4107  TemplateTypeParmType *TypeParm
4108  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4109 
4110  if (TypeParm)
4111  return QualType(TypeParm, 0);
4112 
4113  if (TTPDecl) {
4114  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4115  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
4116 
4117  TemplateTypeParmType *TypeCheck
4118  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4119  assert(!TypeCheck && "Template type parameter canonical type broken");
4120  (void)TypeCheck;
4121  } else
4122  TypeParm = new (*this, TypeAlignment)
4123  TemplateTypeParmType(Depth, Index, ParameterPack);
4124 
4125  Types.push_back(TypeParm);
4126  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
4127 
4128  return QualType(TypeParm, 0);
4129 }
4130 
4133  SourceLocation NameLoc,
4134  const TemplateArgumentListInfo &Args,
4135  QualType Underlying) const {
4136  assert(!Name.getAsDependentTemplateName() &&
4137  "No dependent template names here!");
4138  QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
4139 
4144  TL.setTemplateNameLoc(NameLoc);
4145  TL.setLAngleLoc(Args.getLAngleLoc());
4146  TL.setRAngleLoc(Args.getRAngleLoc());
4147  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4148  TL.setArgLocInfo(i, Args[i].getLocInfo());
4149  return DI;
4150 }
4151 
4152 QualType
4154  const TemplateArgumentListInfo &Args,
4155  QualType Underlying) const {
4156  assert(!Template.getAsDependentTemplateName() &&
4157  "No dependent template names here!");
4158 
4160  ArgVec.reserve(Args.size());
4161  for (const TemplateArgumentLoc &Arg : Args.arguments())
4162  ArgVec.push_back(Arg.getArgument());
4163 
4164  return getTemplateSpecializationType(Template, ArgVec, Underlying);
4165 }
4166 
4167 #ifndef NDEBUG
4169  for (const TemplateArgument &Arg : Args)
4170  if (Arg.isPackExpansion())
4171  return true;
4172 
4173  return true;
4174 }
4175 #endif
4176 
4177 QualType
4180  QualType Underlying) const {
4181  assert(!Template.getAsDependentTemplateName() &&
4182  "No dependent template names here!");
4183  // Look through qualified template names.
4184  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4185  Template = TemplateName(QTN->getTemplateDecl());
4186 
4187  bool IsTypeAlias =
4188  Template.getAsTemplateDecl() &&
4189  isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
4190  QualType CanonType;
4191  if (!Underlying.isNull())
4192  CanonType = getCanonicalType(Underlying);
4193  else {
4194  // We can get here with an alias template when the specialization contains
4195  // a pack expansion that does not match up with a parameter pack.
4196  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
4197  "Caller must compute aliased type");
4198  IsTypeAlias = false;
4199  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
4200  }
4201 
4202  // Allocate the (non-canonical) template specialization type, but don't
4203  // try to unique it: these types typically have location information that
4204  // we don't unique and don't want to lose.
4205  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
4206  sizeof(TemplateArgument) * Args.size() +
4207  (IsTypeAlias? sizeof(QualType) : 0),
4208  TypeAlignment);
4209  auto *Spec
4210  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4211  IsTypeAlias ? Underlying : QualType());
4212 
4213  Types.push_back(Spec);
4214  return QualType(Spec, 0);
4215 }
4216 
4218  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4219  assert(!Template.getAsDependentTemplateName() &&
4220  "No dependent template names here!");
4221 
4222  // Look through qualified template names.
4223  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4224  Template = TemplateName(QTN->getTemplateDecl());
4225 
4226  // Build the canonical template specialization type.
4227  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4229  unsigned NumArgs = Args.size();
4230  CanonArgs.reserve(NumArgs);
4231  for (const TemplateArgument &Arg : Args)
4232  CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
4233 
4234  // Determine whether this canonical template specialization type already
4235  // exists.
4236  llvm::FoldingSetNodeID ID;
4237  TemplateSpecializationType::Profile(ID, CanonTemplate,
4238  CanonArgs, *this);
4239 
4240  void *InsertPos = nullptr;
4242  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4243 
4244  if (!Spec) {
4245  // Allocate a new canonical template specialization type.
4246  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
4247  sizeof(TemplateArgument) * NumArgs),
4248  TypeAlignment);
4249  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
4250  CanonArgs,
4251  QualType(), QualType());
4252  Types.push_back(Spec);
4253  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
4254  }
4255 
4256  assert(Spec->isDependentType() &&
4257  "Non-dependent template-id type must have a canonical type");
4258  return QualType(Spec, 0);
4259 }
4260 
4262  NestedNameSpecifier *NNS,
4263  QualType NamedType,
4264  TagDecl *OwnedTagDecl) const {
4265  llvm::FoldingSetNodeID ID;
4266  ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
4267 
4268  void *InsertPos = nullptr;
4269  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4270  if (T)
4271  return QualType(T, 0);
4272 
4273  QualType Canon = NamedType;
4274  if (!Canon.isCanonical()) {
4275  Canon = getCanonicalType(NamedType);
4276  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4277  assert(!CheckT && "Elaborated canonical type broken");
4278  (void)CheckT;
4279  }
4280 
4281  void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
4282  TypeAlignment);
4283  T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
4284 
4285  Types.push_back(T);
4286  ElaboratedTypes.InsertNode(T, InsertPos);
4287  return QualType(T, 0);
4288 }
4289 
4290 QualType
4292  llvm::FoldingSetNodeID ID;
4293  ParenType::Profile(ID, InnerType);
4294 
4295  void *InsertPos = nullptr;
4296  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4297  if (T)
4298  return QualType(T, 0);
4299 
4300  QualType Canon = InnerType;
4301  if (!Canon.isCanonical()) {
4302  Canon = getCanonicalType(InnerType);
4303  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4304  assert(!CheckT && "Paren canonical type broken");
4305  (void)CheckT;
4306  }
4307 
4308  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
4309  Types.push_back(T);
4310  ParenTypes.InsertNode(T, InsertPos);
4311  return QualType(T, 0);
4312 }
4313 
4314 QualType
4316  const IdentifierInfo *MacroII) const {
4317  QualType Canon = UnderlyingTy;
4318  if (!Canon.isCanonical())
4319  Canon = getCanonicalType(UnderlyingTy);
4320 
4321  auto *newType = new (*this, TypeAlignment)
4322  MacroQualifiedType(UnderlyingTy, Canon, MacroII);
4323  Types.push_back(newType);
4324  return QualType(newType, 0);
4325 }
4326 
4328  NestedNameSpecifier *NNS,
4329  const IdentifierInfo *Name,
4330  QualType Canon) const {
4331  if (Canon.isNull()) {
4333  if (CanonNNS != NNS)
4334  Canon = getDependentNameType(Keyword, CanonNNS, Name);
4335  }
4336 
4337  llvm::FoldingSetNodeID ID;
4338  DependentNameType::Profile(ID, Keyword, NNS, Name);
4339 
4340  void *InsertPos = nullptr;
4342  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
4343  if (T)
4344  return QualType(T, 0);
4345 
4346  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
4347  Types.push_back(T);
4348  DependentNameTypes.InsertNode(T, InsertPos);
4349  return QualType(T, 0);
4350 }
4351 
4352 QualType
4354  ElaboratedTypeKeyword Keyword,
4355  NestedNameSpecifier *NNS,
4356  const IdentifierInfo *Name,
4357  const TemplateArgumentListInfo &Args) const {
4358  // TODO: avoid this copy
4360  for (unsigned I = 0, E = Args.size(); I != E; ++I)
4361  ArgCopy.push_back(Args[I].getArgument());
4362  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
4363 }
4364 
4365 QualType
4367  ElaboratedTypeKeyword Keyword,
4368  NestedNameSpecifier *NNS,
4369  const IdentifierInfo *Name,
4370  ArrayRef<TemplateArgument> Args) const {
4371  assert((!NNS || NNS->isDependent()) &&
4372  "nested-name-specifier must be dependent");
4373 
4374  llvm::FoldingSetNodeID ID;
4375  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
4376  Name, Args);
4377 
4378  void *InsertPos = nullptr;
4380  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4381  if (T)
4382  return QualType(T, 0);
4383 
4385 
4386  ElaboratedTypeKeyword CanonKeyword = Keyword;
4387  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
4388 
4389  bool AnyNonCanonArgs = false;
4390  unsigned NumArgs = Args.size();
4391  SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
4392  for (unsigned I = 0; I != NumArgs; ++I) {
4393  CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
4394  if (!CanonArgs[I].structurallyEquals(Args[I]))
4395  AnyNonCanonArgs = true;
4396  }
4397 
4398  QualType Canon;
4399  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
4400  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
4401  Name,
4402  CanonArgs);
4403 
4404  // Find the insert position again.
4405  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4406  }
4407 
4408  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
4409  sizeof(TemplateArgument) * NumArgs),
4410  TypeAlignment);
4411  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
4412  Name, Args, Canon);
4413  Types.push_back(T);
4414  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
4415  return QualType(T, 0);
4416 }
4417 
4419  TemplateArgument Arg;
4420  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
4421  QualType ArgType = getTypeDeclType(TTP);
4422  if (TTP->isParameterPack())
4423  ArgType = getPackExpansionType(ArgType, None);
4424 
4425  Arg = TemplateArgument(ArgType);
4426  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
4427  Expr *E = new (*this) DeclRefExpr(
4428  *this, NTTP, /*enclosing*/ false,
4429  NTTP->getType().getNonLValueExprType(*this),
4430  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
4431 
4432  if (NTTP->isParameterPack())
4433  E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
4434  None);
4435  Arg = TemplateArgument(E);
4436  } else {
4437  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
4438  if (TTP->isParameterPack())
4440  else
4441  Arg = TemplateArgument(TemplateName(TTP));
4442  }
4443 
4444  if (Param->isTemplateParameterPack())
4445  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
4446 
4447  return Arg;
4448 }
4449 
4450 void
4453  Args.reserve(Args.size() + Params->size());
4454 
4455  for (NamedDecl *Param : *Params)
4456  Args.push_back(getInjectedTemplateArg(Param));
4457 }
4458 
4460  Optional<unsigned> NumExpansions) {
4461  llvm::FoldingSetNodeID ID;
4462  PackExpansionType::Profile(ID, Pattern, NumExpansions);
4463 
4464  // A deduced type can deduce to a pack, eg
4465  // auto ...x = some_pack;
4466  // That declaration isn't (yet) valid, but is created as part of building an
4467  // init-capture pack:
4468  // [...x = some_pack] {}
4469  assert((Pattern->containsUnexpandedParameterPack() ||
4470  Pattern->getContainedDeducedType()) &&
4471  "Pack expansions must expand one or more parameter packs");
4472  void *InsertPos = nullptr;
4474  = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4475  if (T)
4476  return QualType(T, 0);
4477 
4478  QualType Canon;
4479  if (!Pattern.isCanonical()) {
4480  Canon = getCanonicalType(Pattern);
4481  // The canonical type might not contain an unexpanded parameter pack, if it
4482  // contains an alias template specialization which ignores one of its
4483  // parameters.
4484  if (Canon->containsUnexpandedParameterPack()) {
4485  Canon = getPackExpansionType(Canon, NumExpansions);
4486 
4487  // Find the insert position again, in case we inserted an element into
4488  // PackExpansionTypes and invalidated our insert position.
4489  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4490  }
4491  }
4492 
4493  T = new (*this, TypeAlignment)
4494  PackExpansionType(Pattern, Canon, NumExpansions);
4495  Types.push_back(T);
4496  PackExpansionTypes.InsertNode(T, InsertPos);
4497  return QualType(T, 0);
4498 }
4499 
4500 /// CmpProtocolNames - Comparison predicate for sorting protocols
4501 /// alphabetically.
4502 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
4503  ObjCProtocolDecl *const *RHS) {
4504  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
4505 }
4506 
4508  if (Protocols.empty()) return true;
4509 
4510  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
4511  return false;
4512 
4513  for (unsigned i = 1; i != Protocols.size(); ++i)
4514  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
4515  Protocols[i]->getCanonicalDecl() != Protocols[i])
4516  return false;
4517  return true;
4518 }
4519 
4520 static void
4522  // Sort protocols, keyed by name.
4523  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
4524 
4525  // Canonicalize.
4526  for (ObjCProtocolDecl *&P : Protocols)
4527  P = P->getCanonicalDecl();
4528 
4529  // Remove duplicates.
4530  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
4531  Protocols.erase(ProtocolsEnd, Protocols.end());
4532 }
4533 
4535  ObjCProtocolDecl * const *Protocols,
4536  unsigned NumProtocols) const {
4537  return getObjCObjectType(BaseType, {},
4538  llvm::makeArrayRef(Protocols, NumProtocols),
4539  /*isKindOf=*/false);
4540 }
4541 
4543  QualType baseType,
4544  ArrayRef<QualType> typeArgs,
4545  ArrayRef<ObjCProtocolDecl *> protocols,
4546  bool isKindOf) const {
4547  // If the base type is an interface and there aren't any protocols or
4548  // type arguments to add, then the interface type will do just fine.
4549  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
4550  isa<ObjCInterfaceType>(baseType))
4551  return baseType;
4552 
4553  // Look in the folding set for an existing type.
4554  llvm::FoldingSetNodeID ID;
4555  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
4556  void *InsertPos = nullptr;
4557  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
4558  return QualType(QT, 0);
4559 
4560  // Determine the type arguments to be used for canonicalization,
4561  // which may be explicitly specified here or written on the base
4562  // type.
4563  ArrayRef<QualType> effectiveTypeArgs = typeArgs;
4564  if (effectiveTypeArgs.empty()) {
4565  if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
4566  effectiveTypeArgs = baseObject->getTypeArgs();
4567  }
4568 
4569  // Build the canonical type, which has the canonical base type and a
4570  // sorted-and-uniqued list of protocols and the type arguments
4571  // canonicalized.
4572  QualType canonical;
4573  bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
4574  effectiveTypeArgs.end(),
4575  [&](QualType type) {
4576  return type.isCanonical();
4577  });
4578  bool protocolsSorted = areSortedAndUniqued(protocols);
4579  if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
4580  // Determine the canonical type arguments.
4581  ArrayRef<QualType> canonTypeArgs;
4582  SmallVector<QualType, 4> canonTypeArgsVec;
4583  if (!typeArgsAreCanonical) {
4584  canonTypeArgsVec.reserve(effectiveTypeArgs.size());
4585  for (auto typeArg : effectiveTypeArgs)
4586  canonTypeArgsVec.push_back(getCanonicalType(typeArg));
4587  canonTypeArgs = canonTypeArgsVec;
4588  } else {
4589  canonTypeArgs = effectiveTypeArgs;
4590  }
4591 
4592  ArrayRef<ObjCProtocolDecl *> canonProtocols;
4593  SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
4594  if (!protocolsSorted) {
4595  canonProtocolsVec.append(protocols.begin(), protocols.end());
4596  SortAndUniqueProtocols(canonProtocolsVec);
4597  canonProtocols = canonProtocolsVec;
4598  } else {
4599  canonProtocols = protocols;
4600  }
4601 
4602  canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
4603  canonProtocols, isKindOf);
4604 
4605  // Regenerate InsertPos.
4606  ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
4607  }
4608 
4609  unsigned size = sizeof(ObjCObjectTypeImpl);
4610  size += typeArgs.size() * sizeof(QualType);
4611  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4612  void *mem = Allocate(size, TypeAlignment);
4613  auto *T =
4614  new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
4615  isKindOf);
4616 
4617  Types.push_back(T);
4618  ObjCObjectTypes.InsertNode(T, InsertPos);
4619  return QualType(T, 0);
4620 }
4621 
4622 /// Apply Objective-C protocol qualifiers to the given type.
4623 /// If this is for the canonical type of a type parameter, we can apply
4624 /// protocol qualifiers on the ObjCObjectPointerType.
4625 QualType
4627  ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
4628  bool allowOnPointerType) const {
4629  hasError = false;
4630 
4631  if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
4632  return getObjCTypeParamType(objT->getDecl(), protocols);
4633  }
4634 
4635  // Apply protocol qualifiers to ObjCObjectPointerType.
4636  if (allowOnPointerType) {
4637  if (const auto *objPtr =
4638  dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
4639  const ObjCObjectType *objT = objPtr->getObjectType();
4640  // Merge protocol lists and construct ObjCObjectType.
4641  SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
4642  protocolsVec.append(objT->qual_begin(),
4643  objT->qual_end());
4644  protocolsVec.append(protocols.begin(), protocols.end());
4645  ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
4646  type = getObjCObjectType(
4647  objT->getBaseType(),
4648  objT->getTypeArgsAsWritten(),
4649  protocols,
4650  objT->isKindOfTypeAsWritten());
4651  return getObjCObjectPointerType(type);
4652  }
4653  }
4654 
4655  // Apply protocol qualifiers to ObjCObjectType.
4656  if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
4657  // FIXME: Check for protocols to which the class type is already
4658  // known to conform.
4659 
4660  return getObjCObjectType(objT->getBaseType(),
4661  objT->getTypeArgsAsWritten(),
4662  protocols,
4663  objT->isKindOfTypeAsWritten());
4664  }
4665 
4666  // If the canonical type is ObjCObjectType, ...
4667  if (type->isObjCObjectType()) {
4668  // Silently overwrite any existing protocol qualifiers.
4669  // TODO: determine whether that's the right thing to do.
4670 
4671  // FIXME: Check for protocols to which the class type is already
4672  // known to conform.
4673  return getObjCObjectType(type, {}, protocols, false);
4674  }
4675 
4676  // id<protocol-list>
4677  if (type->isObjCIdType()) {
4678  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4679  type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
4680  objPtr->isKindOfType());
4681  return getObjCObjectPointerType(type);
4682  }
4683 
4684  // Class<protocol-list>
4685  if (type->isObjCClassType()) {
4686  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4687  type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
4688  objPtr->isKindOfType());
4689  return getObjCObjectPointerType(type);
4690  }
4691 
4692  hasError = true;
4693  return type;
4694 }
4695 
4696 QualType
4698  ArrayRef<ObjCProtocolDecl *> protocols,
4699  QualType Canonical) const {
4700  // Look in the folding set for an existing type.
4701  llvm::FoldingSetNodeID ID;
4702  ObjCTypeParamType::Profile(ID, Decl, protocols);
4703  void *InsertPos = nullptr;
4704  if (ObjCTypeParamType *TypeParam =
4705  ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
4706  return QualType(TypeParam, 0);
4707 
4708  if (Canonical.isNull()) {
4709  // We canonicalize to the underlying type.
4710  Canonical = getCanonicalType(Decl->getUnderlyingType());
4711  if (!protocols.empty()) {
4712  // Apply the protocol qualifers.
4713  bool hasError;
4715  Canonical, protocols, hasError, true /*allowOnPointerType*/));
4716  assert(!hasError && "Error when apply protocol qualifier to bound type");
4717  }
4718  }
4719 
4720  unsigned size = sizeof(ObjCTypeParamType);
4721  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4722  void *mem = Allocate(size, TypeAlignment);
4723  auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
4724 
4725  Types.push_back(newType);
4726  ObjCTypeParamTypes.InsertNode(newType, InsertPos);
4727  return QualType(newType, 0);
4728 }
4729 
4730 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
4731 /// protocol list adopt all protocols in QT's qualified-id protocol
4732 /// list.
4734  ObjCInterfaceDecl *IC) {
4735  if (!QT->isObjCQualifiedIdType())
4736  return false;
4737 
4738  if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
4739  // If both the right and left sides have qualifiers.
4740  for (auto *Proto : OPT->quals()) {
4741  if (!IC->ClassImplementsProtocol(Proto, false))
4742  return false;
4743  }
4744  return true;
4745  }
4746  return false;
4747 }
4748 
4749 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
4750 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
4751 /// of protocols.
4753  ObjCInterfaceDecl *IDecl) {
4754  if (!QT->isObjCQualifiedIdType())
4755  return false;
4756  const auto *OPT = QT->getAs<ObjCObjectPointerType>();
4757  if (!OPT)
4758  return false;
4759  if (!IDecl->hasDefinition())
4760  return false;
4761  llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
4762  CollectInheritedProtocols(IDecl, InheritedProtocols);
4763  if (InheritedProtocols.empty())
4764  return false;
4765  // Check that if every protocol in list of id<plist> conforms to a protocol
4766  // of IDecl's, then bridge casting is ok.
4767  bool Conforms = false;
4768  for (auto *Proto : OPT->quals()) {
4769  Conforms = false;
4770  for (auto *PI : InheritedProtocols) {
4771  if (ProtocolCompatibleWithProtocol(Proto, PI)) {
4772  Conforms = true;
4773  break;
4774  }
4775  }
4776  if (!Conforms)
4777  break;
4778  }
4779  if (Conforms)
4780  return true;
4781 
4782  for (auto *PI : InheritedProtocols) {
4783  // If both the right and left sides have qualifiers.
4784  bool Adopts = false;
4785  for (auto *Proto : OPT->quals()) {
4786  // return 'true' if 'PI' is in the inheritance hierarchy of Proto
4787  if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
4788  break;
4789  }
4790  if (!Adopts)
4791  return false;
4792  }
4793  return true;
4794 }
4795 
4796 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
4797 /// the given object type.
4799  llvm::FoldingSetNodeID ID;
4800  ObjCObjectPointerType::Profile(ID, ObjectT);
4801 
4802  void *InsertPos = nullptr;
4803  if (ObjCObjectPointerType *QT =
4804  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4805  return QualType(QT, 0);
4806 
4807  // Find the canonical object type.
4808  QualType Canonical;
4809  if (!ObjectT.isCanonical()) {
4810  Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
4811 
4812  // Regenerate InsertPos.
4813  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4814  }
4815 
4816  // No match.
4817  void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
4818  auto *QType =
4819  new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
4820 
4821  Types.push_back(QType);
4822  ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
4823  return QualType(QType, 0);
4824 }
4825 
4826 /// getObjCInterfaceType - Return the unique reference to the type for the
4827 /// specified ObjC interface decl. The list of protocols is optional.
4829  ObjCInterfaceDecl *PrevDecl) const {
4830  if (Decl->TypeForDecl)
4831  return QualType(Decl->TypeForDecl, 0);
4832 
4833  if (PrevDecl) {
4834  assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
4835  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4836  return QualType(PrevDecl->TypeForDecl, 0);
4837  }
4838 
4839  // Prefer the definition, if there is one.
4840  if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
4841  Decl = Def;
4842 
4843  void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
4844  auto *T = new (Mem) ObjCInterfaceType(Decl);
4845  Decl->TypeForDecl = T;
4846  Types.push_back(T);
4847  return QualType(T, 0);
4848 }
4849 
4850 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
4851 /// TypeOfExprType AST's (since expression's are never shared). For example,
4852 /// multiple declarations that refer to "typeof(x)" all contain different
4853 /// DeclRefExpr's. This doesn't effect the type checker, since it operates
4854 /// on canonical type's (which are always unique).
4856  TypeOfExprType *toe;
4857  if (tofExpr->isTypeDependent()) {
4858  llvm::FoldingSetNodeID ID;
4859  DependentTypeOfExprType::Profile(ID, *this, tofExpr);
4860 
4861  void *InsertPos = nullptr;
4863  = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
4864  if (Canon) {
4865  // We already have a "canonical" version of an identical, dependent
4866  // typeof(expr) type. Use that as our canonical type.
4867  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
4868  QualType((TypeOfExprType*)Canon, 0));
4869  } else {
4870  // Build a new, canonical typeof(expr) type.
4871  Canon
4872  = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
4873  DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
4874  toe = Canon;
4875  }
4876  } else {
4877  QualType Canonical = getCanonicalType(tofExpr->getType());
4878  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
4879  }
4880  Types.push_back(toe);
4881  return QualType(toe, 0);
4882 }
4883 
4884 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
4885 /// TypeOfType nodes. The only motivation to unique these nodes would be
4886 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
4887 /// an issue. This doesn't affect the type checker, since it operates
4888 /// on canonical types (which are always unique).
4890  QualType Canonical = getCanonicalType(tofType);
4891  auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
4892  Types.push_back(tot);
4893  return QualType(tot, 0);
4894 }
4895 
4896 /// Unlike many "get<Type>" functions, we don't unique DecltypeType
4897 /// nodes. This would never be helpful, since each such type has its own
4898 /// expression, and would not give a significant memory saving, since there
4899 /// is an Expr tree under each such type.
4901  DecltypeType *dt;
4902 
4903  // C++11 [temp.type]p2:
4904  // If an expression e involves a template parameter, decltype(e) denotes a
4905  // unique dependent type. Two such decltype-specifiers refer to the same
4906  // type only if their expressions are equivalent (14.5.6.1).
4907  if (e->isInstantiationDependent()) {
4908  llvm::FoldingSetNodeID ID;
4909  DependentDecltypeType::Profile(ID, *this, e);
4910 
4911  void *InsertPos = nullptr;
4912  DependentDecltypeType *Canon
4913  = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
4914  if (!Canon) {
4915  // Build a new, canonical decltype(expr) type.
4916  Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
4917  DependentDecltypeTypes.InsertNode(Canon, InsertPos);
4918  }
4919  dt = new (*this, TypeAlignment)
4920  DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
4921  } else {
4922  dt = new (*this, TypeAlignment)
4923  DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
4924  }
4925  Types.push_back(dt);
4926  return QualType(dt, 0);
4927 }
4928 
4929 /// getUnaryTransformationType - We don't unique these, since the memory
4930 /// savings are minimal and these are rare.
4932  QualType UnderlyingType,
4934  const {
4935  UnaryTransformType *ut = nullptr;
4936 
4937  if (BaseType->isDependentType()) {
4938  // Look in the folding set for an existing type.
4939  llvm::FoldingSetNodeID ID;
4941 
4942  void *InsertPos = nullptr;
4944  = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
4945 
4946  if (!Canon) {
4947  // Build a new, canonical __underlying_type(type) type.
4948  Canon = new (*this, TypeAlignment)
4950  Kind);
4951  DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
4952  }
4953  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4954  QualType(), Kind,
4955  QualType(Canon, 0));
4956  } else {
4957  QualType CanonType = getCanonicalType(UnderlyingType);
4958  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4959  UnderlyingType, Kind,
4960  CanonType);
4961  }
4962  Types.push_back(ut);
4963  return QualType(ut, 0);
4964 }
4965 
4966 /// getAutoType - Return the uniqued reference to the 'auto' type which has been
4967 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the
4968 /// canonical deduced-but-dependent 'auto' type.
4970  bool IsDependent, bool IsPack) const {
4971  assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
4972  if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
4973  return getAutoDeductType();
4974 
4975  // Look in the folding set for an existing type.
4976  void *InsertPos = nullptr;
4977  llvm::FoldingSetNodeID ID;
4978  AutoType::Profile(ID, DeducedType, Keyword, IsDependent, IsPack);
4979  if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
4980  return QualType(AT, 0);
4981 
4982  auto *AT = new (*this, TypeAlignment)
4983  AutoType(DeducedType, Keyword, IsDependent, IsPack);
4984  Types.push_back(AT);
4985  if (InsertPos)
4986  AutoTypes.InsertNode(AT, InsertPos);
4987  return QualType(AT, 0);
4988 }
4989 
4990 /// Return the uniqued reference to the deduced template specialization type
4991 /// which has been deduced to the given type, or to the canonical undeduced
4992 /// such type, or the canonical deduced-but-dependent such type.
4994  TemplateName Template, QualType DeducedType, bool IsDependent) const {
4995  // Look in the folding set for an existing type.
4996  void *InsertPos = nullptr;
4997  llvm::FoldingSetNodeID ID;
4998  DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
4999  IsDependent);
5001  DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
5002  return QualType(DTST, 0);
5003 
5004  auto *DTST = new (*this, TypeAlignment)
5005  DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
5006  Types.push_back(DTST);
5007  if (InsertPos)
5008  DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
5009  return QualType(DTST, 0);
5010 }
5011 
5012 /// getAtomicType - Return the uniqued reference to the atomic type for
5013 /// the given value type.
5015  // Unique pointers, to guarantee there is only one pointer of a particular
5016  // structure.
5017  llvm::FoldingSetNodeID ID;
5018  AtomicType::Profile(ID, T);
5019 
5020  void *InsertPos = nullptr;
5021  if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
5022  return QualType(AT, 0);
5023 
5024  // If the atomic value type isn't canonical, this won't be a canonical type
5025  // either, so fill in the canonical type field.
5026  QualType Canonical;
5027  if (!T.isCanonical()) {
5028  Canonical = getAtomicType(getCanonicalType(T));
5029 
5030  // Get the new insert position for the node we care about.
5031  AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
5032  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5033  }
5034  auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
5035  Types.push_back(New);
5036  AtomicTypes.InsertNode(New, InsertPos);
5037  return QualType(New, 0);
5038 }
5039 
5040 /// getAutoDeductType - Get type pattern for deducing against 'auto'.
5042  if (AutoDeductTy.isNull())
5045  /*dependent*/false, /*pack*/false),
5046  0);
5047  return AutoDeductTy;
5048 }
5049 
5050 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
5052  if (AutoRRefDeductTy.isNull())
5054  assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
5055  return AutoRRefDeductTy;
5056 }
5057 
5058 /// getTagDeclType - Return the unique reference to the type for the
5059 /// specified TagDecl (struct/union/class/enum) decl.
5061  assert(Decl);
5062  // FIXME: What is the design on getTagDeclType when it requires casting
5063  // away const? mutable?
5064  return getTypeDeclType(const_cast<TagDecl*>(Decl));
5065 }
5066 
5067 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
5068 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
5069 /// needs to agree with the definition in <stddef.h>.
5071  return getFromTargetType(Target->getSizeType());
5072 }
5073 
5074 /// Return the unique signed counterpart of the integer type
5075 /// corresponding to size_t.
5077  return getFromTargetType(Target->getSignedSizeType());
5078 }
5079 
5080 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
5082  return getFromTargetType(Target->getIntMaxType());
5083 }
5084 
5085 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
5087  return getFromTargetType(Target->getUIntMaxType());
5088 }
5089 
5090 /// getSignedWCharType - Return the type of "signed wchar_t".
5091 /// Used when in C++, as a GCC extension.
5093  // FIXME: derive from "Target" ?
5094  return WCharTy;
5095 }
5096 
5097 /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
5098 /// Used when in C++, as a GCC extension.
5100  // FIXME: derive from "Target" ?
5101  return UnsignedIntTy;
5102 }
5103 
5105  return getFromTargetType(Target->getIntPtrType());
5106 }
5107 
5110 }
5111 
5112 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
5113 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
5115  return getFromTargetType(Target->getPtrDiffType(0));
5116 }
5117 
5118 /// Return the unique unsigned counterpart of "ptrdiff_t"
5119 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
5120 /// in the definition of %tu format specifier.
5122  return getFromTargetType(Target->getUnsignedPtrDiffType(0));
5123 }
5124 
5125 /// Return the unique type for "pid_t" defined in
5126 /// <sys/types.h>. We need this to compute the correct type for vfork().
5128  return getFromTargetType(Target->getProcessIDType());
5129 }
5130 
5131 //===----------------------------------------------------------------------===//
5132 // Type Operators
5133 //===----------------------------------------------------------------------===//
5134 
5136  // Push qualifiers into arrays, and then discard any remaining
5137  // qualifiers.
5138  T = getCanonicalType(T);
5140  const Type *Ty = T.getTypePtr();
5141  QualType Result;
5142  if (isa<ArrayType>(Ty)) {
5143  Result = getArrayDecayedType(QualType(Ty,0));
5144  } else if (isa<FunctionType>(Ty)) {
5145  Result = getPointerType(QualType(Ty, 0));
5146  } else {
5147  Result = QualType(Ty, 0);
5148  }
5149 
5150  return CanQualType::CreateUnsafe(Result);
5151 }
5152 
5154  Qualifiers &quals) {
5155  SplitQualType splitType = type.getSplitUnqualifiedType();
5156 
5157  // FIXME: getSplitUnqualifiedType() actually walks all the way to
5158  // the unqualified desugared type and then drops it on the floor.
5159  // We then have to strip that sugar back off with
5160  // getUnqualifiedDesugaredType(), which is silly.
5161  const auto *AT =
5162  dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
5163 
5164  // If we don't have an array, just use the results in splitType.
5165  if (!AT) {
5166  quals = splitType.Quals;
5167  return QualType(splitType.Ty, 0);
5168  }
5169 
5170  // Otherwise, recurse on the array's element type.
5171  QualType elementType = AT->getElementType();
5172  QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
5173 
5174  // If that didn't change the element type, AT has no qualifiers, so we
5175  // can just use the results in splitType.
5176  if (elementType == unqualElementType) {
5177  assert(quals.empty()); // from the recursive call
5178  quals = splitType.Quals;
5179  return QualType(splitType.Ty, 0);
5180  }
5181 
5182  // Otherwise, add in the qualifiers from the outermost type, then
5183  // build the type back up.
5184  quals.addConsistentQualifiers(splitType.Quals);
5185 
5186  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
5187  return getConstantArrayType(unqualElementType, CAT->getSize(),
5188  CAT->getSizeModifier(), 0);
5189  }
5190 
5191  if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
5192  return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
5193  }
5194 
5195  if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
5196  return getVariableArrayType(unqualElementType,
5197  VAT->getSizeExpr(),
5198  VAT->getSizeModifier(),
5199  VAT->getIndexTypeCVRQualifiers(),
5200  VAT->getBracketsRange());
5201  }
5202 
5203  const auto *DSAT = cast<DependentSizedArrayType>(AT);
5204  return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
5205  DSAT->getSizeModifier(), 0,
5206  SourceRange());
5207 }
5208 
5209 /// Attempt to unwrap two types that may both be array types with the same bound
5210 /// (or both be array types of unknown bound) for the purpose of comparing the
5211 /// cv-decomposition of two types per C++ [conv.qual].
5213  bool UnwrappedAny = false;
5214  while (true) {
5215  auto *AT1 = getAsArrayType(T1);
5216  if (!AT1) return UnwrappedAny;
5217 
5218  auto *AT2 = getAsArrayType(T2);
5219  if (!AT2) return UnwrappedAny;
5220 
5221  // If we don't have two array types with the same constant bound nor two
5222  // incomplete array types, we've unwrapped everything we can.
5223  if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
5224  auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
5225  if (!CAT2 || CAT1->getSize() != CAT2->getSize())
5226  return UnwrappedAny;
5227  } else if (!isa<IncompleteArrayType>(AT1) ||
5228  !isa<IncompleteArrayType>(AT2)) {
5229  return UnwrappedAny;
5230  }
5231 
5232  T1 = AT1->getElementType();
5233  T2 = AT2->getElementType();
5234  UnwrappedAny = true;
5235  }
5236 }
5237 
5238 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
5239 ///
5240 /// If T1 and T2 are both pointer types of the same kind, or both array types
5241 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is
5242 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
5243 ///
5244 /// This function will typically be called in a loop that successively
5245 /// "unwraps" pointer and pointer-to-member types to compare them at each
5246 /// level.
5247 ///
5248 /// \return \c true if a pointer type was unwrapped, \c false if we reached a
5249 /// pair of types that can't be unwrapped further.
5251  UnwrapSimilarArrayTypes(T1, T2);
5252 
5253  const auto *T1PtrType = T1->getAs<PointerType>();
5254  const auto *T2PtrType = T2->getAs<PointerType>();
5255  if (T1PtrType && T2PtrType) {
5256  T1 = T1PtrType->getPointeeType();
5257  T2 = T2PtrType->getPointeeType();
5258  return true;
5259  }
5260 
5261  const auto *T1MPType = T1->getAs<MemberPointerType>();
5262  const auto *T2MPType = T2->getAs<MemberPointerType>();
5263  if (T1MPType && T2MPType &&
5264  hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
5265  QualType(T2MPType->getClass(), 0))) {
5266  T1 = T1MPType->getPointeeType();
5267  T2 = T2MPType->getPointeeType();
5268  return true;
5269  }
5270 
5271  if (getLangOpts().ObjC) {
5272  const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
5273  const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
5274  if (T1OPType && T2OPType) {
5275  T1 = T1OPType->getPointeeType();
5276  T2 = T2OPType->getPointeeType();
5277  return true;
5278  }
5279  }
5280 
5281  // FIXME: Block pointers, too?
5282 
5283  return false;
5284 }
5285 
5287  while (true) {
5288  Qualifiers Quals;
5289  T1 = getUnqualifiedArrayType(T1, Quals);
5290  T2 = getUnqualifiedArrayType(T2, Quals);
5291  if (hasSameType(T1, T2))
5292  return true;
5293  if (!UnwrapSimilarTypes(T1, T2))
5294  return false;
5295  }
5296 }
5297 
5299  while (true) {
5300  Qualifiers Quals1, Quals2;
5301  T1 = getUnqualifiedArrayType(T1, Quals1);
5302  T2 = getUnqualifiedArrayType(T2, Quals2);
5303 
5304  Quals1.removeCVRQualifiers();
5305  Quals2.removeCVRQualifiers();
5306  if (Quals1 != Quals2)
5307  return false;
5308 
5309  if (hasSameType(T1, T2))
5310  return true;
5311 
5312  if (!UnwrapSimilarTypes(T1, T2))
5313  return false;
5314  }
5315 }
5316 
5319  SourceLocation NameLoc) const {
5320  switch (Name.getKind()) {
5323  // DNInfo work in progress: CHECKME: what about DNLoc?
5325  NameLoc);
5326 
5329  // DNInfo work in progress: CHECKME: what about DNLoc?
5330  return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
5331  }
5332 
5335  return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
5336  }
5337 
5340  DeclarationName DName;
5341  if (DTN->isIdentifier()) {
5343  return DeclarationNameInfo(DName, NameLoc);
5344  } else {
5346  // DNInfo work in progress: FIXME: source locations?
5347  DeclarationNameLoc DNLoc;
5350  return DeclarationNameInfo(DName, NameLoc, DNLoc);
5351  }
5352  }
5353 
5357  return DeclarationNameInfo(subst->getParameter()->getDeclName(),
5358  NameLoc);
5359  }
5360 
5365  NameLoc);
5366  }
5367  }
5368 
5369  llvm_unreachable("bad template name kind!");
5370 }
5371 
5373  switch (Name.getKind()) {
5375  case TemplateName::Template: {
5376  TemplateDecl *Template = Name.getAsTemplateDecl();
5377  if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
5378  Template = getCanonicalTemplateTemplateParmDecl(TTP);
5379 
5380  // The canonical template name is the canonical template declaration.
5381  return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
5382  }
5383 
5386  llvm_unreachable("cannot canonicalize unresolved template");
5387 
5390  assert(DTN && "Non-dependent template names must refer to template decls.");
5391  return DTN->CanonicalTemplateName;
5392  }
5393 
5397  return getCanonicalTemplateName(subst->getReplacement());
5398  }
5399 
5403  TemplateTemplateParmDecl *canonParameter
5404  = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
5405  TemplateArgument canonArgPack
5407  return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
5408  }
5409  }
5410 
5411  llvm_unreachable("bad template name!");
5412 }
5413 
5415  X = getCanonicalTemplateName(X);
5416  Y = getCanonicalTemplateName(Y);
5417  return X.getAsVoidPointer() == Y.getAsVoidPointer();
5418 }
5419 
5422  switch (Arg.getKind()) {
5424  return Arg;
5425 
5427  return Arg;
5428 
5430  auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
5431  return TemplateArgument(D, Arg.getParamTypeForDecl());
5432  }
5433 
5436  /*isNullPtr*/true);
5437 
5440 
5444  Arg.getNumTemplateExpansions());
5445 
5448 
5451 
5452  case TemplateArgument::Pack: {
5453  if (Arg.pack_size() == 0)
5454  return Arg;
5455 
5456  auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
5457  unsigned Idx = 0;
5459  AEnd = Arg.pack_end();
5460  A != AEnd; (void)++A, ++Idx)
5461  CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
5462 
5463  return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
5464  }
5465  }
5466 
5467  // Silence GCC warning
5468  llvm_unreachable("Unhandled template argument kind");
5469 }
5470 
5473  if (!NNS)
5474  return nullptr;
5475 
5476  switch (NNS->getKind()) {
5478  // Canonicalize the prefix but keep the identifier the same.
5479  return NestedNameSpecifier::Create(*this,
5481  NNS->getAsIdentifier());
5482 
5484  // A namespace is canonical; build a nested-name-specifier with
5485  // this namespace and no prefix.
5486  return NestedNameSpecifier::Create(*this, nullptr,
5488 
5490  // A namespace is canonical; build a nested-name-specifier with
5491  // this namespace and no prefix.
5492  return NestedNameSpecifier::Create(*this, nullptr,
5494  ->getOriginalNamespace());
5495 
5498  QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
5499 
5500  // If we have some kind of dependent-named type (e.g., "typename T::type"),
5501  // break it apart into its prefix and identifier, then reconsititute those
5502  // as the canonical nested-name-specifier. This is required to canonicalize
5503  // a dependent nested-name-specifier involving typedefs of dependent-name
5504  // types, e.g.,
5505  // typedef typename T::type T1;
5506  // typedef typename T1::type T2;
5507  if (const auto *DNT = T->getAs<DependentNameType>())
5508  return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
5509  const_cast<IdentifierInfo *>(DNT->getIdentifier()));
5510 
5511  // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
5512  // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
5513  // first place?
5514  return NestedNameSpecifier::Create(*this, nullptr, false,
5515  const_cast<Type *>(T.getTypePtr()));
5516  }
5517 
5520  // The global specifier and __super specifer are canonical and unique.
5521  return NNS;
5522  }
5523 
5524  llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
5525 }
5526 
5528  // Handle the non-qualified case efficiently.
5529  if (!T.hasLocalQualifiers()) {
5530  // Handle the common positive case fast.
5531  if (const auto *AT = dyn_cast<ArrayType>(T))
5532  return AT;
5533  }
5534 
5535  // Handle the common negative case fast.
5536  if (!isa<ArrayType>(T.getCan