clang  10.0.0svn
ASTContext.cpp
Go to the documentation of this file.
1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ASTContext interface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/AST/ASTContext.h"
14 #include "CXXABI.h"
15 #include "Interp/Context.h"
16 #include "clang/AST/APValue.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/AttrIterator.h"
21 #include "clang/AST/CharUnits.h"
22 #include "clang/AST/Comment.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclBase.h"
25 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/DeclOpenMP.h"
29 #include "clang/AST/DeclTemplate.h"
31 #include "clang/AST/Expr.h"
32 #include "clang/AST/ExprCXX.h"
34 #include "clang/AST/Mangle.h"
38 #include "clang/AST/RecordLayout.h"
40 #include "clang/AST/Stmt.h"
41 #include "clang/AST/TemplateBase.h"
42 #include "clang/AST/TemplateName.h"
43 #include "clang/AST/Type.h"
44 #include "clang/AST/TypeLoc.h"
48 #include "clang/Basic/Builtins.h"
51 #include "clang/Basic/FixedPoint.h"
53 #include "clang/Basic/LLVM.h"
55 #include "clang/Basic/Linkage.h"
60 #include "clang/Basic/Specifiers.h"
62 #include "clang/Basic/TargetInfo.h"
63 #include "clang/Basic/XRayLists.h"
64 #include "llvm/ADT/APInt.h"
65 #include "llvm/ADT/APSInt.h"
66 #include "llvm/ADT/ArrayRef.h"
67 #include "llvm/ADT/DenseMap.h"
68 #include "llvm/ADT/DenseSet.h"
69 #include "llvm/ADT/FoldingSet.h"
70 #include "llvm/ADT/None.h"
71 #include "llvm/ADT/Optional.h"
72 #include "llvm/ADT/PointerUnion.h"
73 #include "llvm/ADT/STLExtras.h"
74 #include "llvm/ADT/SmallPtrSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/StringExtras.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/ADT/Triple.h"
79 #include "llvm/Support/Capacity.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/MathExtras.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include <algorithm>
86 #include <cassert>
87 #include <cstddef>
88 #include <cstdint>
89 #include <cstdlib>
90 #include <map>
91 #include <memory>
92 #include <string>
93 #include <tuple>
94 #include <utility>
95 
96 using namespace clang;
97 
100 };
101 
102 /// \returns location that is relevant when searching for Doc comments related
103 /// to \p D.
105  SourceManager &SourceMgr) {
106  assert(D);
107 
108  // User can not attach documentation to implicit declarations.
109  if (D->isImplicit())
110  return {};
111 
112  // User can not attach documentation to implicit instantiations.
113  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
114  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
115  return {};
116  }
117 
118  if (const auto *VD = dyn_cast<VarDecl>(D)) {
119  if (VD->isStaticDataMember() &&
120  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
121  return {};
122  }
123 
124  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
125  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
126  return {};
127  }
128 
129  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
130  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
131  if (TSK == TSK_ImplicitInstantiation ||
132  TSK == TSK_Undeclared)
133  return {};
134  }
135 
136  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
137  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
138  return {};
139  }
140  if (const auto *TD = dyn_cast<TagDecl>(D)) {
141  // When tag declaration (but not definition!) is part of the
142  // decl-specifier-seq of some other declaration, it doesn't get comment
143  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
144  return {};
145  }
146  // TODO: handle comments for function parameters properly.
147  if (isa<ParmVarDecl>(D))
148  return {};
149 
150  // TODO: we could look up template parameter documentation in the template
151  // documentation.
152  if (isa<TemplateTypeParmDecl>(D) ||
153  isa<NonTypeTemplateParmDecl>(D) ||
154  isa<TemplateTemplateParmDecl>(D))
155  return {};
156 
157  // Find declaration location.
158  // For Objective-C declarations we generally don't expect to have multiple
159  // declarators, thus use declaration starting location as the "declaration
160  // location".
161  // For all other declarations multiple declarators are used quite frequently,
162  // so we use the location of the identifier as the "declaration location".
163  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
164  isa<ObjCPropertyDecl>(D) ||
165  isa<RedeclarableTemplateDecl>(D) ||
166  isa<ClassTemplateSpecializationDecl>(D))
167  return D->getBeginLoc();
168  else {
169  const SourceLocation DeclLoc = D->getLocation();
170  if (DeclLoc.isMacroID()) {
171  if (isa<TypedefDecl>(D)) {
172  // If location of the typedef name is in a macro, it is because being
173  // declared via a macro. Try using declaration's starting location as
174  // the "declaration location".
175  return D->getBeginLoc();
176  } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
177  // If location of the tag decl is inside a macro, but the spelling of
178  // the tag name comes from a macro argument, it looks like a special
179  // macro like NS_ENUM is being used to define the tag decl. In that
180  // case, adjust the source location to the expansion loc so that we can
181  // attach the comment to the tag decl.
182  if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
183  TD->isCompleteDefinition())
184  return SourceMgr.getExpansionLoc(DeclLoc);
185  }
186  }
187  return DeclLoc;
188  }
189 
190  return {};
191 }
192 
194  const Decl *D, const SourceLocation RepresentativeLocForDecl,
195  const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
196  // If the declaration doesn't map directly to a location in a file, we
197  // can't find the comment.
198  if (RepresentativeLocForDecl.isInvalid() ||
199  !RepresentativeLocForDecl.isFileID())
200  return nullptr;
201 
202  // If there are no comments anywhere, we won't find anything.
203  if (CommentsInTheFile.empty())
204  return nullptr;
205 
206  // Decompose the location for the declaration and find the beginning of the
207  // file buffer.
208  const std::pair<FileID, unsigned> DeclLocDecomp =
209  SourceMgr.getDecomposedLoc(RepresentativeLocForDecl);
210 
211  // Slow path.
212  auto OffsetCommentBehindDecl =
213  CommentsInTheFile.lower_bound(DeclLocDecomp.second);
214 
215  // First check whether we have a trailing comment.
216  if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
217  RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
218  if ((CommentBehindDecl->isDocumentation() ||
219  LangOpts.CommentOpts.ParseAllComments) &&
220  CommentBehindDecl->isTrailingComment() &&
221  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
222  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
223 
224  // Check that Doxygen trailing comment comes after the declaration, starts
225  // on the same line and in the same file as the declaration.
226  if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) ==
227  Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first,
228  OffsetCommentBehindDecl->first)) {
229  return CommentBehindDecl;
230  }
231  }
232  }
233 
234  // The comment just after the declaration was not a trailing comment.
235  // Let's look at the previous comment.
236  if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
237  return nullptr;
238 
239  auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
240  RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
241 
242  // Check that we actually have a non-member Doxygen comment.
243  if (!(CommentBeforeDecl->isDocumentation() ||
244  LangOpts.CommentOpts.ParseAllComments) ||
245  CommentBeforeDecl->isTrailingComment())
246  return nullptr;
247 
248  // Decompose the end of the comment.
249  const unsigned CommentEndOffset =
250  Comments.getCommentEndOffset(CommentBeforeDecl);
251 
252  // Get the corresponding buffer.
253  bool Invalid = false;
254  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
255  &Invalid).data();
256  if (Invalid)
257  return nullptr;
258 
259  // Extract text between the comment and declaration.
260  StringRef Text(Buffer + CommentEndOffset,
261  DeclLocDecomp.second - CommentEndOffset);
262 
263  // There should be no other declarations or preprocessor directives between
264  // comment and declaration.
265  if (Text.find_first_of(";{}#@") != StringRef::npos)
266  return nullptr;
267 
268  return CommentBeforeDecl;
269 }
270 
272  const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
273 
274  // If the declaration doesn't map directly to a location in a file, we
275  // can't find the comment.
276  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
277  return nullptr;
278 
279  if (ExternalSource && !CommentsLoaded) {
280  ExternalSource->ReadComments();
281  CommentsLoaded = true;
282  }
283 
284  if (Comments.empty())
285  return nullptr;
286 
287  const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
288  const auto CommentsInThisFile = Comments.getCommentsInFile(File);
289  if (!CommentsInThisFile || CommentsInThisFile->empty())
290  return nullptr;
291 
292  return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
293 }
294 
295 /// If we have a 'templated' declaration for a template, adjust 'D' to
296 /// refer to the actual template.
297 /// If we have an implicit instantiation, adjust 'D' to refer to template.
298 static const Decl &adjustDeclToTemplate(const Decl &D) {
299  if (const auto *FD = dyn_cast<FunctionDecl>(&D)) {
300  // Is this function declaration part of a function template?
301  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
302  return *FTD;
303 
304  // Nothing to do if function is not an implicit instantiation.
305  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
306  return D;
307 
308  // Function is an implicit instantiation of a function template?
309  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
310  return *FTD;
311 
312  // Function is instantiated from a member definition of a class template?
313  if (const FunctionDecl *MemberDecl =
315  return *MemberDecl;
316 
317  return D;
318  }
319  if (const auto *VD = dyn_cast<VarDecl>(&D)) {
320  // Static data member is instantiated from a member definition of a class
321  // template?
322  if (VD->isStaticDataMember())
323  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
324  return *MemberDecl;
325 
326  return D;
327  }
328  if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) {
329  // Is this class declaration part of a class template?
330  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
331  return *CTD;
332 
333  // Class is an implicit instantiation of a class template or partial
334  // specialization?
335  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
336  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
337  return D;
338  llvm::PointerUnion<ClassTemplateDecl *,
340  PU = CTSD->getSpecializedTemplateOrPartial();
341  return PU.is<ClassTemplateDecl *>()
342  ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>())
343  : *static_cast<const Decl *>(
345  }
346 
347  // Class is instantiated from a member definition of a class template?
348  if (const MemberSpecializationInfo *Info =
349  CRD->getMemberSpecializationInfo())
350  return *Info->getInstantiatedFrom();
351 
352  return D;
353  }
354  if (const auto *ED = dyn_cast<EnumDecl>(&D)) {
355  // Enum is instantiated from a member definition of a class template?
356  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
357  return *MemberDecl;
358 
359  return D;
360  }
361  // FIXME: Adjust alias templates?
362  return D;
363 }
364 
366  const Decl *D,
367  const Decl **OriginalDecl) const {
368  if (!D) {
369  if (OriginalDecl)
370  OriginalDecl = nullptr;
371  return nullptr;
372  }
373 
374  D = &adjustDeclToTemplate(*D);
375 
376  // Any comment directly attached to D?
377  {
378  auto DeclComment = DeclRawComments.find(D);
379  if (DeclComment != DeclRawComments.end()) {
380  if (OriginalDecl)
381  *OriginalDecl = D;
382  return DeclComment->second;
383  }
384  }
385 
386  // Any comment attached to any redeclaration of D?
387  const Decl *CanonicalD = D->getCanonicalDecl();
388  if (!CanonicalD)
389  return nullptr;
390 
391  {
392  auto RedeclComment = RedeclChainComments.find(CanonicalD);
393  if (RedeclComment != RedeclChainComments.end()) {
394  if (OriginalDecl)
395  *OriginalDecl = RedeclComment->second;
396  auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second);
397  assert(CommentAtRedecl != DeclRawComments.end() &&
398  "This decl is supposed to have comment attached.");
399  return CommentAtRedecl->second;
400  }
401  }
402 
403  // Any redeclarations of D that we haven't checked for comments yet?
404  // We can't use DenseMap::iterator directly since it'd get invalid.
405  auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
406  auto LookupRes = CommentlessRedeclChains.find(CanonicalD);
407  if (LookupRes != CommentlessRedeclChains.end())
408  return LookupRes->second;
409  return nullptr;
410  }();
411 
412  for (const auto Redecl : D->redecls()) {
413  assert(Redecl);
414  // Skip all redeclarations that have been checked previously.
415  if (LastCheckedRedecl) {
416  if (LastCheckedRedecl == Redecl) {
417  LastCheckedRedecl = nullptr;
418  }
419  continue;
420  }
421  const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl);
422  if (RedeclComment) {
423  cacheRawCommentForDecl(*Redecl, *RedeclComment);
424  if (OriginalDecl)
425  *OriginalDecl = Redecl;
426  return RedeclComment;
427  }
428  CommentlessRedeclChains[CanonicalD] = Redecl;
429  }
430 
431  if (OriginalDecl)
432  *OriginalDecl = nullptr;
433  return nullptr;
434 }
435 
437  const RawComment &Comment) const {
438  assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
439  DeclRawComments.try_emplace(&OriginalD, &Comment);
440  const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
441  RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD);
442  CommentlessRedeclChains.erase(CanonicalDecl);
443 }
444 
445 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
447  const DeclContext *DC = ObjCMethod->getDeclContext();
448  if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
449  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
450  if (!ID)
451  return;
452  // Add redeclared method here.
453  for (const auto *Ext : ID->known_extensions()) {
454  if (ObjCMethodDecl *RedeclaredMethod =
455  Ext->getMethod(ObjCMethod->getSelector(),
456  ObjCMethod->isInstanceMethod()))
457  Redeclared.push_back(RedeclaredMethod);
458  }
459  }
460 }
461 
463  const Preprocessor *PP) {
464  if (Comments.empty() || Decls.empty())
465  return;
466 
467  // See if there are any new comments that are not attached to a decl.
468  // The location doesn't have to be precise - we care only about the file.
469  const FileID File =
470  SourceMgr.getDecomposedLoc((*Decls.begin())->getLocation()).first;
471  auto CommentsInThisFile = Comments.getCommentsInFile(File);
472  if (!CommentsInThisFile || CommentsInThisFile->empty() ||
473  CommentsInThisFile->rbegin()->second->isAttached())
474  return;
475 
476  // There is at least one comment not attached to a decl.
477  // Maybe it should be attached to one of Decls?
478  //
479  // Note that this way we pick up not only comments that precede the
480  // declaration, but also comments that *follow* the declaration -- thanks to
481  // the lookahead in the lexer: we've consumed the semicolon and looked
482  // ahead through comments.
483 
484  for (const Decl *D : Decls) {
485  assert(D);
486  if (D->isInvalidDecl())
487  continue;
488 
489  D = &adjustDeclToTemplate(*D);
490 
491  const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
492 
493  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
494  continue;
495 
496  if (DeclRawComments.count(D) > 0)
497  continue;
498 
499  if (RawComment *const DocComment =
500  getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) {
501  cacheRawCommentForDecl(*D, *DocComment);
502  comments::FullComment *FC = DocComment->parse(*this, PP, D);
503  ParsedComments[D->getCanonicalDecl()] = FC;
504  }
505  }
506 }
507 
509  const Decl *D) const {
510  auto *ThisDeclInfo = new (*this) comments::DeclInfo;
511  ThisDeclInfo->CommentDecl = D;
512  ThisDeclInfo->IsFilled = false;
513  ThisDeclInfo->fill();
514  ThisDeclInfo->CommentDecl = FC->getDecl();
515  if (!ThisDeclInfo->TemplateParameters)
516  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
517  comments::FullComment *CFC =
518  new (*this) comments::FullComment(FC->getBlocks(),
519  ThisDeclInfo);
520  return CFC;
521 }
522 
525  return RC ? RC->parse(*this, nullptr, D) : nullptr;
526 }
527 
529  const Decl *D,
530  const Preprocessor *PP) const {
531  if (!D || D->isInvalidDecl())
532  return nullptr;
533  D = &adjustDeclToTemplate(*D);
534 
535  const Decl *Canonical = D->getCanonicalDecl();
536  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
537  ParsedComments.find(Canonical);
538 
539  if (Pos != ParsedComments.end()) {
540  if (Canonical != D) {
541  comments::FullComment *FC = Pos->second;
543  return CFC;
544  }
545  return Pos->second;
546  }
547 
548  const Decl *OriginalDecl = nullptr;
549 
550  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
551  if (!RC) {
552  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
554  const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
555  if (OMD && OMD->isPropertyAccessor())
556  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
557  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
558  return cloneFullComment(FC, D);
559  if (OMD)
560  addRedeclaredMethods(OMD, Overridden);
561  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
562  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
563  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
564  return cloneFullComment(FC, D);
565  }
566  else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
567  // Attach any tag type's documentation to its typedef if latter
568  // does not have one of its own.
569  QualType QT = TD->getUnderlyingType();
570  if (const auto *TT = QT->getAs<TagType>())
571  if (const Decl *TD = TT->getDecl())
572  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
573  return cloneFullComment(FC, D);
574  }
575  else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
576  while (IC->getSuperClass()) {
577  IC = IC->getSuperClass();
578  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
579  return cloneFullComment(FC, D);
580  }
581  }
582  else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
583  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
584  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
585  return cloneFullComment(FC, D);
586  }
587  else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
588  if (!(RD = RD->getDefinition()))
589  return nullptr;
590  // Check non-virtual bases.
591  for (const auto &I : RD->bases()) {
592  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
593  continue;
594  QualType Ty = I.getType();
595  if (Ty.isNull())
596  continue;
597  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
598  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
599  continue;
600 
601  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
602  return cloneFullComment(FC, D);
603  }
604  }
605  // Check virtual bases.
606  for (const auto &I : RD->vbases()) {
607  if (I.getAccessSpecifier() != AS_public)
608  continue;
609  QualType Ty = I.getType();
610  if (Ty.isNull())
611  continue;
612  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
613  if (!(VirtualBase= VirtualBase->getDefinition()))
614  continue;
615  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
616  return cloneFullComment(FC, D);
617  }
618  }
619  }
620  return nullptr;
621  }
622 
623  // If the RawComment was attached to other redeclaration of this Decl, we
624  // should parse the comment in context of that other Decl. This is important
625  // because comments can contain references to parameter names which can be
626  // different across redeclarations.
627  if (D != OriginalDecl && OriginalDecl)
628  return getCommentForDecl(OriginalDecl, PP);
629 
630  comments::FullComment *FC = RC->parse(*this, PP, D);
631  ParsedComments[Canonical] = FC;
632  return FC;
633 }
634 
635 void
636 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
637  TemplateTemplateParmDecl *Parm) {
638  ID.AddInteger(Parm->getDepth());
639  ID.AddInteger(Parm->getPosition());
640  ID.AddBoolean(Parm->isParameterPack());
641 
643  ID.AddInteger(Params->size());
645  PEnd = Params->end();
646  P != PEnd; ++P) {
647  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
648  ID.AddInteger(0);
649  ID.AddBoolean(TTP->isParameterPack());
650  continue;
651  }
652 
653  if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
654  ID.AddInteger(1);
655  ID.AddBoolean(NTTP->isParameterPack());
656  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
657  if (NTTP->isExpandedParameterPack()) {
658  ID.AddBoolean(true);
659  ID.AddInteger(NTTP->getNumExpansionTypes());
660  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
661  QualType T = NTTP->getExpansionType(I);
662  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
663  }
664  } else
665  ID.AddBoolean(false);
666  continue;
667  }
668 
669  auto *TTP = cast<TemplateTemplateParmDecl>(*P);
670  ID.AddInteger(2);
671  Profile(ID, TTP);
672  }
673 }
674 
676 ASTContext::getCanonicalTemplateTemplateParmDecl(
677  TemplateTemplateParmDecl *TTP) const {
678  // Check if we already have a canonical template template parameter.
679  llvm::FoldingSetNodeID ID;
680  CanonicalTemplateTemplateParm::Profile(ID, TTP);
681  void *InsertPos = nullptr;
682  CanonicalTemplateTemplateParm *Canonical
683  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
684  if (Canonical)
685  return Canonical->getParam();
686 
687  // Build a canonical template parameter list.
689  SmallVector<NamedDecl *, 4> CanonParams;
690  CanonParams.reserve(Params->size());
692  PEnd = Params->end();
693  P != PEnd; ++P) {
694  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
695  CanonParams.push_back(
697  SourceLocation(),
698  SourceLocation(),
699  TTP->getDepth(),
700  TTP->getIndex(), nullptr, false,
701  TTP->isParameterPack()));
702  else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
703  QualType T = getCanonicalType(NTTP->getType());
706  if (NTTP->isExpandedParameterPack()) {
707  SmallVector<QualType, 2> ExpandedTypes;
708  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
709  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
710  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
711  ExpandedTInfos.push_back(
712  getTrivialTypeSourceInfo(ExpandedTypes.back()));
713  }
714 
716  SourceLocation(),
717  SourceLocation(),
718  NTTP->getDepth(),
719  NTTP->getPosition(), nullptr,
720  T,
721  TInfo,
722  ExpandedTypes,
723  ExpandedTInfos);
724  } else {
726  SourceLocation(),
727  SourceLocation(),
728  NTTP->getDepth(),
729  NTTP->getPosition(), nullptr,
730  T,
731  NTTP->isParameterPack(),
732  TInfo);
733  }
734  CanonParams.push_back(Param);
735 
736  } else
737  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
738  cast<TemplateTemplateParmDecl>(*P)));
739  }
740 
741  assert(!TTP->getTemplateParameters()->getRequiresClause() &&
742  "Unexpected requires-clause on template template-parameter");
743  Expr *const CanonRequiresClause = nullptr;
744 
745  TemplateTemplateParmDecl *CanonTTP
747  SourceLocation(), TTP->getDepth(),
748  TTP->getPosition(),
749  TTP->isParameterPack(),
750  nullptr,
752  SourceLocation(),
753  CanonParams,
754  SourceLocation(),
755  CanonRequiresClause));
756 
757  // Get the new insert position for the node we care about.
758  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
759  assert(!Canonical && "Shouldn't be in the map!");
760  (void)Canonical;
761 
762  // Create the canonical template template parameter entry.
763  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
764  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
765  return CanonTTP;
766 }
767 
768 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
769  if (!LangOpts.CPlusPlus) return nullptr;
770 
771  switch (T.getCXXABI().getKind()) {
772  case TargetCXXABI::GenericARM: // Same as Itanium at this level
773  case TargetCXXABI::iOS:
774  case TargetCXXABI::iOS64:
780  return CreateItaniumCXXABI(*this);
782  return CreateMicrosoftCXXABI(*this);
783  }
784  llvm_unreachable("Invalid CXXABI type!");
785 }
786 
788  if (!InterpContext) {
789  InterpContext.reset(new interp::Context(*this));
790  }
791  return *InterpContext.get();
792 }
793 
794 static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
795  const LangOptions &LOpts) {
796  if (LOpts.FakeAddressSpaceMap) {
797  // The fake address space map must have a distinct entry for each
798  // language-specific address space.
799  static const unsigned FakeAddrSpaceMap[] = {
800  0, // Default
801  1, // opencl_global
802  3, // opencl_local
803  2, // opencl_constant
804  0, // opencl_private
805  4, // opencl_generic
806  5, // cuda_device
807  6, // cuda_constant
808  7 // cuda_shared
809  };
810  return &FakeAddrSpaceMap;
811  } else {
812  return &T.getAddressSpaceMap();
813  }
814 }
815 
817  const LangOptions &LangOpts) {
818  switch (LangOpts.getAddressSpaceMapMangling()) {
820  return TI.useAddressSpaceMapMangling();
822  return true;
824  return false;
825  }
826  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
827 }
828 
830  IdentifierTable &idents, SelectorTable &sels,
831  Builtin::Context &builtins)
832  : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()),
833  TemplateSpecializationTypes(this_()),
834  DependentTemplateSpecializationTypes(this_()),
835  SubstTemplateTemplateParmPacks(this_()), SourceMgr(SM), LangOpts(LOpts),
836  SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
837  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
838  LangOpts.XRayNeverInstrumentFiles,
839  LangOpts.XRayAttrListFiles, SM)),
840  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
841  BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
842  CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
843  CompCategories(this_()), LastSDM(nullptr, 0) {
844  TUDecl = TranslationUnitDecl::Create(*this);
845  TraversalScope = {TUDecl};
846 }
847 
849  // Release the DenseMaps associated with DeclContext objects.
850  // FIXME: Is this the ideal solution?
851  ReleaseDeclContextMaps();
852 
853  // Call all of the deallocation functions on all of their targets.
854  for (auto &Pair : Deallocations)
855  (Pair.first)(Pair.second);
856 
857  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
858  // because they can contain DenseMaps.
859  for (llvm::DenseMap<const ObjCContainerDecl*,
860  const ASTRecordLayout*>::iterator
861  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
862  // Increment in loop to prevent using deallocated memory.
863  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
864  R->Destroy(*this);
865 
866  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
867  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
868  // Increment in loop to prevent using deallocated memory.
869  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
870  R->Destroy(*this);
871  }
872 
873  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
874  AEnd = DeclAttrs.end();
875  A != AEnd; ++A)
876  A->second->~AttrVec();
877 
878  for (std::pair<const MaterializeTemporaryExpr *, APValue *> &MTVPair :
879  MaterializedTemporaryValues)
880  MTVPair.second->~APValue();
881 
882  for (const auto &Value : ModuleInitializers)
883  Value.second->~PerModuleInitializers();
884 
885  for (APValue *Value : APValueCleanups)
886  Value->~APValue();
887 }
888 
890  /// Contains parents of a node.
892 
893  /// Maps from a node to its parents. This is used for nodes that have
894  /// pointer identity only, which are more common and we can save space by
895  /// only storing a unique pointer to them.
896  using ParentMapPointers = llvm::DenseMap<
897  const void *,
898  llvm::PointerUnion4<const Decl *, const Stmt *,
900 
901  /// Parent map for nodes without pointer identity. We store a full
902  /// DynTypedNode for all keys.
903  using ParentMapOtherNodes = llvm::DenseMap<
905  llvm::PointerUnion4<const Decl *, const Stmt *,
906  ast_type_traits::DynTypedNode *, ParentVector *>>;
907 
908  ParentMapPointers PointerParents;
909  ParentMapOtherNodes OtherParents;
910  class ASTVisitor;
911 
912  static ast_type_traits::DynTypedNode
913  getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
914  if (const auto *D = U.dyn_cast<const Decl *>())
916  if (const auto *S = U.dyn_cast<const Stmt *>())
918  return *U.get<ast_type_traits::DynTypedNode *>();
919  }
920 
921  template <typename NodeTy, typename MapTy>
922  static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
923  const MapTy &Map) {
924  auto I = Map.find(Node);
925  if (I == Map.end()) {
927  }
928  if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
929  return llvm::makeArrayRef(*V);
930  }
931  return getSingleDynTypedNodeFromParentMap(I->second);
932  }
933 
934 public:
935  ParentMap(ASTContext &Ctx);
937  for (const auto &Entry : PointerParents) {
938  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
939  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
940  } else if (Entry.second.is<ParentVector *>()) {
941  delete Entry.second.get<ParentVector *>();
942  }
943  }
944  for (const auto &Entry : OtherParents) {
945  if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
946  delete Entry.second.get<ast_type_traits::DynTypedNode *>();
947  } else if (Entry.second.is<ParentVector *>()) {
948  delete Entry.second.get<ParentVector *>();
949  }
950  }
951  }
952 
953  DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
954  if (Node.getNodeKind().hasPointerIdentity())
955  return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
956  return getDynNodeFromMap(Node, OtherParents);
957  }
958 };
959 
960 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
961  TraversalScope = TopLevelDecls;
962  Parents.reset();
963 }
964 
965 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
966  Deallocations.push_back({Callback, Data});
967 }
968 
969 void
971  ExternalSource = std::move(Source);
972 }
973 
975  llvm::errs() << "\n*** AST Context Stats:\n";
976  llvm::errs() << " " << Types.size() << " types total.\n";
977 
978  unsigned counts[] = {
979 #define TYPE(Name, Parent) 0,
980 #define ABSTRACT_TYPE(Name, Parent)
981 #include "clang/AST/TypeNodes.inc"
982  0 // Extra
983  };
984 
985  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
986  Type *T = Types[i];
987  counts[(unsigned)T->getTypeClass()]++;
988  }
989 
990  unsigned Idx = 0;
991  unsigned TotalBytes = 0;
992 #define TYPE(Name, Parent) \
993  if (counts[Idx]) \
994  llvm::errs() << " " << counts[Idx] << " " << #Name \
995  << " types, " << sizeof(Name##Type) << " each " \
996  << "(" << counts[Idx] * sizeof(Name##Type) \
997  << " bytes)\n"; \
998  TotalBytes += counts[Idx] * sizeof(Name##Type); \
999  ++Idx;
1000 #define ABSTRACT_TYPE(Name, Parent)
1001 #include "clang/AST/TypeNodes.inc"
1002 
1003  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
1004 
1005  // Implicit special member functions.
1006  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
1008  << " implicit default constructors created\n";
1009  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1011  << " implicit copy constructors created\n";
1012  if (getLangOpts().CPlusPlus)
1013  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1015  << " implicit move constructors created\n";
1016  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1018  << " implicit copy assignment operators created\n";
1019  if (getLangOpts().CPlusPlus)
1020  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1022  << " implicit move assignment operators created\n";
1023  llvm::errs() << NumImplicitDestructorsDeclared << "/"
1025  << " implicit destructors created\n";
1026 
1027  if (ExternalSource) {
1028  llvm::errs() << "\n";
1029  ExternalSource->PrintStats();
1030  }
1031 
1032  BumpAlloc.PrintStats();
1033 }
1034 
1036  bool NotifyListeners) {
1037  if (NotifyListeners)
1038  if (auto *Listener = getASTMutationListener())
1040 
1041  MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
1042 }
1043 
1045  auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1046  if (It == MergedDefModules.end())
1047  return;
1048 
1049  auto &Merged = It->second;
1051  for (Module *&M : Merged)
1052  if (!Found.insert(M).second)
1053  M = nullptr;
1054  Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
1055 }
1056 
1057 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1058  if (LazyInitializers.empty())
1059  return;
1060 
1061  auto *Source = Ctx.getExternalSource();
1062  assert(Source && "lazy initializers but no external source");
1063 
1064  auto LazyInits = std::move(LazyInitializers);
1065  LazyInitializers.clear();
1066 
1067  for (auto ID : LazyInits)
1068  Initializers.push_back(Source->GetExternalDecl(ID));
1069 
1070  assert(LazyInitializers.empty() &&
1071  "GetExternalDecl for lazy module initializer added more inits");
1072 }
1073 
1075  // One special case: if we add a module initializer that imports another
1076  // module, and that module's only initializer is an ImportDecl, simplify.
1077  if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1078  auto It = ModuleInitializers.find(ID->getImportedModule());
1079 
1080  // Maybe the ImportDecl does nothing at all. (Common case.)
1081  if (It == ModuleInitializers.end())
1082  return;
1083 
1084  // Maybe the ImportDecl only imports another ImportDecl.
1085  auto &Imported = *It->second;
1086  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1087  Imported.resolve(*this);
1088  auto *OnlyDecl = Imported.Initializers.front();
1089  if (isa<ImportDecl>(OnlyDecl))
1090  D = OnlyDecl;
1091  }
1092  }
1093 
1094  auto *&Inits = ModuleInitializers[M];
1095  if (!Inits)
1096  Inits = new (*this) PerModuleInitializers;
1097  Inits->Initializers.push_back(D);
1098 }
1099 
1101  auto *&Inits = ModuleInitializers[M];
1102  if (!Inits)
1103  Inits = new (*this) PerModuleInitializers;
1104  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1105  IDs.begin(), IDs.end());
1106 }
1107 
1109  auto It = ModuleInitializers.find(M);
1110  if (It == ModuleInitializers.end())
1111  return None;
1112 
1113  auto *Inits = It->second;
1114  Inits->resolve(*this);
1115  return Inits->Initializers;
1116 }
1117 
1119  if (!ExternCContext)
1120  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1121 
1122  return ExternCContext;
1123 }
1124 
1127  const IdentifierInfo *II) const {
1128  auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
1129  BuiltinTemplate->setImplicit();
1130  TUDecl->addDecl(BuiltinTemplate);
1131 
1132  return BuiltinTemplate;
1133 }
1134 
1137  if (!MakeIntegerSeqDecl)
1138  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1140  return MakeIntegerSeqDecl;
1141 }
1142 
1145  if (!TypePackElementDecl)
1146  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1148  return TypePackElementDecl;
1149 }
1150 
1152  RecordDecl::TagKind TK) const {
1153  SourceLocation Loc;
1154  RecordDecl *NewDecl;
1155  if (getLangOpts().CPlusPlus)
1156  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1157  Loc, &Idents.get(Name));
1158  else
1159  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1160  &Idents.get(Name));
1161  NewDecl->setImplicit();
1162  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1163  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1164  return NewDecl;
1165 }
1166 
1168  StringRef Name) const {
1170  TypedefDecl *NewDecl = TypedefDecl::Create(
1171  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1172  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1173  NewDecl->setImplicit();
1174  return NewDecl;
1175 }
1176 
1178  if (!Int128Decl)
1179  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1180  return Int128Decl;
1181 }
1182 
1184  if (!UInt128Decl)
1185  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1186  return UInt128Decl;
1187 }
1188 
1189 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1190  auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1191  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1192  Types.push_back(Ty);
1193 }
1194 
1196  const TargetInfo *AuxTarget) {
1197  assert((!this->Target || this->Target == &Target) &&
1198  "Incorrect target reinitialization");
1199  assert(VoidTy.isNull() && "Context reinitialized?");
1200 
1201  this->Target = &Target;
1202  this->AuxTarget = AuxTarget;
1203 
1204  ABI.reset(createCXXABI(Target));
1205  AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1206  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1207 
1208  // C99 6.2.5p19.
1209  InitBuiltinType(VoidTy, BuiltinType::Void);
1210 
1211  // C99 6.2.5p2.
1212  InitBuiltinType(BoolTy, BuiltinType::Bool);
1213  // C99 6.2.5p3.
1214  if (LangOpts.CharIsSigned)
1215  InitBuiltinType(CharTy, BuiltinType::Char_S);
1216  else
1217  InitBuiltinType(CharTy, BuiltinType::Char_U);
1218  // C99 6.2.5p4.
1219  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1220  InitBuiltinType(ShortTy, BuiltinType::Short);
1221  InitBuiltinType(IntTy, BuiltinType::Int);
1222  InitBuiltinType(LongTy, BuiltinType::Long);
1223  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1224 
1225  // C99 6.2.5p6.
1226  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1227  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1228  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1229  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1230  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1231 
1232  // C99 6.2.5p10.
1233  InitBuiltinType(FloatTy, BuiltinType::Float);
1234  InitBuiltinType(DoubleTy, BuiltinType::Double);
1235  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1236 
1237  // GNU extension, __float128 for IEEE quadruple precision
1238  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1239 
1240  // C11 extension ISO/IEC TS 18661-3
1241  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1242 
1243  // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1244  InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1245  InitBuiltinType(AccumTy, BuiltinType::Accum);
1246  InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1247  InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1248  InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1249  InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1250  InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1251  InitBuiltinType(FractTy, BuiltinType::Fract);
1252  InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1253  InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1254  InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1255  InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1256  InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1257  InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1258  InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1259  InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1260  InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1261  InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1262  InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1263  InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1264  InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1265  InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1266  InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1267  InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1268 
1269  // GNU extension, 128-bit integers.
1270  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1271  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1272 
1273  // C++ 3.9.1p5
1274  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1275  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1276  else // -fshort-wchar makes wchar_t be unsigned.
1277  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1278  if (LangOpts.CPlusPlus && LangOpts.WChar)
1279  WideCharTy = WCharTy;
1280  else {
1281  // C99 (or C++ using -fno-wchar).
1282  WideCharTy = getFromTargetType(Target.getWCharType());
1283  }
1284 
1285  WIntTy = getFromTargetType(Target.getWIntType());
1286 
1287  // C++20 (proposed)
1288  InitBuiltinType(Char8Ty, BuiltinType::Char8);
1289 
1290  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1291  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1292  else // C99
1293  Char16Ty = getFromTargetType(Target.getChar16Type());
1294 
1295  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1296  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1297  else // C99
1298  Char32Ty = getFromTargetType(Target.getChar32Type());
1299 
1300  // Placeholder type for type-dependent expressions whose type is
1301  // completely unknown. No code should ever check a type against
1302  // DependentTy and users should never see it; however, it is here to
1303  // help diagnose failures to properly check for type-dependent
1304  // expressions.
1305  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1306 
1307  // Placeholder type for functions.
1308  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1309 
1310  // Placeholder type for bound members.
1311  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1312 
1313  // Placeholder type for pseudo-objects.
1314  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1315 
1316  // "any" type; useful for debugger-like clients.
1317  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1318 
1319  // Placeholder type for unbridged ARC casts.
1320  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1321 
1322  // Placeholder type for builtin functions.
1323  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1324 
1325  // Placeholder type for OMP array sections.
1326  if (LangOpts.OpenMP)
1327  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1328 
1329  // C99 6.2.5p11.
1334 
1335  // Builtin types for 'id', 'Class', and 'SEL'.
1336  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1337  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1338  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1339 
1340  if (LangOpts.OpenCL) {
1341 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1342  InitBuiltinType(SingletonId, BuiltinType::Id);
1343 #include "clang/Basic/OpenCLImageTypes.def"
1344 
1345  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1346  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1347  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1348  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1349  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1350 
1351 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1352  InitBuiltinType(Id##Ty, BuiltinType::Id);
1353 #include "clang/Basic/OpenCLExtensionTypes.def"
1354  }
1355 
1356  if (Target.hasAArch64SVETypes()) {
1357 #define SVE_TYPE(Name, Id, SingletonId) \
1358  InitBuiltinType(SingletonId, BuiltinType::Id);
1359 #include "clang/Basic/AArch64SVEACLETypes.def"
1360  }
1361 
1362  // Builtin type for __objc_yes and __objc_no
1363  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1364  SignedCharTy : BoolTy);
1365 
1366  ObjCConstantStringType = QualType();
1367 
1368  ObjCSuperType = QualType();
1369 
1370  // void * type
1371  if (LangOpts.OpenCLVersion >= 200) {
1372  auto Q = VoidTy.getQualifiers();
1376  } else {
1378  }
1379 
1380  // nullptr type (C++0x 2.14.7)
1381  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1382 
1383  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1384  InitBuiltinType(HalfTy, BuiltinType::Half);
1385 
1386  // Builtin type used to help define __builtin_va_list.
1387  VaListTagDecl = nullptr;
1388 }
1389 
1391  return SourceMgr.getDiagnostics();
1392 }
1393 
1395  AttrVec *&Result = DeclAttrs[D];
1396  if (!Result) {
1397  void *Mem = Allocate(sizeof(AttrVec));
1398  Result = new (Mem) AttrVec;
1399  }
1400 
1401  return *Result;
1402 }
1403 
1404 /// Erase the attributes corresponding to the given declaration.
1406  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1407  if (Pos != DeclAttrs.end()) {
1408  Pos->second->~AttrVec();
1409  DeclAttrs.erase(Pos);
1410  }
1411 }
1412 
1413 // FIXME: Remove ?
1416  assert(Var->isStaticDataMember() && "Not a static data member");
1418  .dyn_cast<MemberSpecializationInfo *>();
1419 }
1420 
1423  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1424  TemplateOrInstantiation.find(Var);
1425  if (Pos == TemplateOrInstantiation.end())
1426  return {};
1427 
1428  return Pos->second;
1429 }
1430 
1431 void
1434  SourceLocation PointOfInstantiation) {
1435  assert(Inst->isStaticDataMember() && "Not a static data member");
1436  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1438  Tmpl, TSK, PointOfInstantiation));
1439 }
1440 
1441 void
1444  assert(!TemplateOrInstantiation[Inst] &&
1445  "Already noted what the variable was instantiated from");
1446  TemplateOrInstantiation[Inst] = TSI;
1447 }
1448 
1449 NamedDecl *
1451  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1452  if (Pos == InstantiatedFromUsingDecl.end())
1453  return nullptr;
1454 
1455  return Pos->second;
1456 }
1457 
1458 void
1460  assert((isa<UsingDecl>(Pattern) ||
1461  isa<UnresolvedUsingValueDecl>(Pattern) ||
1462  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1463  "pattern decl is not a using decl");
1464  assert((isa<UsingDecl>(Inst) ||
1465  isa<UnresolvedUsingValueDecl>(Inst) ||
1466  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1467  "instantiation did not produce a using decl");
1468  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1469  InstantiatedFromUsingDecl[Inst] = Pattern;
1470 }
1471 
1474  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1475  = InstantiatedFromUsingShadowDecl.find(Inst);
1476  if (Pos == InstantiatedFromUsingShadowDecl.end())
1477  return nullptr;
1478 
1479  return Pos->second;
1480 }
1481 
1482 void
1484  UsingShadowDecl *Pattern) {
1485  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1486  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1487 }
1488 
1490  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1491  = InstantiatedFromUnnamedFieldDecl.find(Field);
1492  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1493  return nullptr;
1494 
1495  return Pos->second;
1496 }
1497 
1499  FieldDecl *Tmpl) {
1500  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1501  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1502  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1503  "Already noted what unnamed field was instantiated from");
1504 
1505  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1506 }
1507 
1510  return overridden_methods(Method).begin();
1511 }
1512 
1515  return overridden_methods(Method).end();
1516 }
1517 
1518 unsigned
1520  auto Range = overridden_methods(Method);
1521  return Range.end() - Range.begin();
1522 }
1523 
1526  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1527  OverriddenMethods.find(Method->getCanonicalDecl());
1528  if (Pos == OverriddenMethods.end())
1529  return overridden_method_range(nullptr, nullptr);
1530  return overridden_method_range(Pos->second.begin(), Pos->second.end());
1531 }
1532 
1534  const CXXMethodDecl *Overridden) {
1535  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1536  OverriddenMethods[Method].push_back(Overridden);
1537 }
1538 
1540  const NamedDecl *D,
1541  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1542  assert(D);
1543 
1544  if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1545  Overridden.append(overridden_methods_begin(CXXMethod),
1546  overridden_methods_end(CXXMethod));
1547  return;
1548  }
1549 
1550  const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1551  if (!Method)
1552  return;
1553 
1555  Method->getOverriddenMethods(OverDecls);
1556  Overridden.append(OverDecls.begin(), OverDecls.end());
1557 }
1558 
1560  assert(!Import->NextLocalImport && "Import declaration already in the chain");
1561  assert(!Import->isFromASTFile() && "Non-local import declaration");
1562  if (!FirstLocalImport) {
1563  FirstLocalImport = Import;
1564  LastLocalImport = Import;
1565  return;
1566  }
1567 
1568  LastLocalImport->NextLocalImport = Import;
1569  LastLocalImport = Import;
1570 }
1571 
1572 //===----------------------------------------------------------------------===//
1573 // Type Sizing and Analysis
1574 //===----------------------------------------------------------------------===//
1575 
1576 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1577 /// scalar floating point type.
1578 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1579  switch (T->castAs<BuiltinType>()->getKind()) {
1580  default:
1581  llvm_unreachable("Not a floating point type!");
1582  case BuiltinType::Float16:
1583  case BuiltinType::Half:
1584  return Target->getHalfFormat();
1585  case BuiltinType::Float: return Target->getFloatFormat();
1586  case BuiltinType::Double: return Target->getDoubleFormat();
1587  case BuiltinType::LongDouble:
1588  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1589  return AuxTarget->getLongDoubleFormat();
1590  return Target->getLongDoubleFormat();
1591  case BuiltinType::Float128:
1592  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1593  return AuxTarget->getFloat128Format();
1594  return Target->getFloat128Format();
1595  }
1596 }
1597 
1598 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1599  unsigned Align = Target->getCharWidth();
1600 
1601  bool UseAlignAttrOnly = false;
1602  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1603  Align = AlignFromAttr;
1604 
1605  // __attribute__((aligned)) can increase or decrease alignment
1606  // *except* on a struct or struct member, where it only increases
1607  // alignment unless 'packed' is also specified.
1608  //
1609  // It is an error for alignas to decrease alignment, so we can
1610  // ignore that possibility; Sema should diagnose it.
1611  if (isa<FieldDecl>(D)) {
1612  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1613  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1614  } else {
1615  UseAlignAttrOnly = true;
1616  }
1617  }
1618  else if (isa<FieldDecl>(D))
1619  UseAlignAttrOnly =
1620  D->hasAttr<PackedAttr>() ||
1621  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1622 
1623  // If we're using the align attribute only, just ignore everything
1624  // else about the declaration and its type.
1625  if (UseAlignAttrOnly) {
1626  // do nothing
1627  } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1628  QualType T = VD->getType();
1629  if (const auto *RT = T->getAs<ReferenceType>()) {
1630  if (ForAlignof)
1631  T = RT->getPointeeType();
1632  else
1633  T = getPointerType(RT->getPointeeType());
1634  }
1635  QualType BaseT = getBaseElementType(T);
1636  if (T->isFunctionType())
1637  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1638  else if (!BaseT->isIncompleteType()) {
1639  // Adjust alignments of declarations with array type by the
1640  // large-array alignment on the target.
1641  if (const ArrayType *arrayType = getAsArrayType(T)) {
1642  unsigned MinWidth = Target->getLargeArrayMinWidth();
1643  if (!ForAlignof && MinWidth) {
1644  if (isa<VariableArrayType>(arrayType))
1645  Align = std::max(Align, Target->getLargeArrayAlign());
1646  else if (isa<ConstantArrayType>(arrayType) &&
1647  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1648  Align = std::max(Align, Target->getLargeArrayAlign());
1649  }
1650  }
1651  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1652  if (BaseT.getQualifiers().hasUnaligned())
1653  Align = Target->getCharWidth();
1654  if (const auto *VD = dyn_cast<VarDecl>(D)) {
1655  if (VD->hasGlobalStorage() && !ForAlignof) {
1656  uint64_t TypeSize = getTypeSize(T.getTypePtr());
1657  Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
1658  }
1659  }
1660  }
1661 
1662  // Fields can be subject to extra alignment constraints, like if
1663  // the field is packed, the struct is packed, or the struct has a
1664  // a max-field-alignment constraint (#pragma pack). So calculate
1665  // the actual alignment of the field within the struct, and then
1666  // (as we're expected to) constrain that by the alignment of the type.
1667  if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1668  const RecordDecl *Parent = Field->getParent();
1669  // We can only produce a sensible answer if the record is valid.
1670  if (!Parent->isInvalidDecl()) {
1671  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1672 
1673  // Start with the record's overall alignment.
1674  unsigned FieldAlign = toBits(Layout.getAlignment());
1675 
1676  // Use the GCD of that and the offset within the record.
1677  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1678  if (Offset > 0) {
1679  // Alignment is always a power of 2, so the GCD will be a power of 2,
1680  // which means we get to do this crazy thing instead of Euclid's.
1681  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1682  if (LowBitOfOffset < FieldAlign)
1683  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1684  }
1685 
1686  Align = std::min(Align, FieldAlign);
1687  }
1688  }
1689  }
1690 
1691  return toCharUnitsFromBits(Align);
1692 }
1693 
1694 // getTypeInfoDataSizeInChars - Return the size of a type, in
1695 // chars. If the type is a record, its data size is returned. This is
1696 // the size of the memcpy that's performed when assigning this type
1697 // using a trivial copy/move assignment operator.
1698 std::pair<CharUnits, CharUnits>
1700  std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
1701 
1702  // In C++, objects can sometimes be allocated into the tail padding
1703  // of a base-class subobject. We decide whether that's possible
1704  // during class layout, so here we can just trust the layout results.
1705  if (getLangOpts().CPlusPlus) {
1706  if (const auto *RT = T->getAs<RecordType>()) {
1707  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1708  sizeAndAlign.first = layout.getDataSize();
1709  }
1710  }
1711 
1712  return sizeAndAlign;
1713 }
1714 
1715 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1716 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1717 std::pair<CharUnits, CharUnits>
1719  const ConstantArrayType *CAT) {
1720  std::pair<CharUnits, CharUnits> EltInfo =
1721  Context.getTypeInfoInChars(CAT->getElementType());
1722  uint64_t Size = CAT->getSize().getZExtValue();
1723  assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
1724  (uint64_t)(-1)/Size) &&
1725  "Overflow in array type char size evaluation");
1726  uint64_t Width = EltInfo.first.getQuantity() * Size;
1727  unsigned Align = EltInfo.second.getQuantity();
1728  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1729  Context.getTargetInfo().getPointerWidth(0) == 64)
1730  Width = llvm::alignTo(Width, Align);
1731  return std::make_pair(CharUnits::fromQuantity(Width),
1732  CharUnits::fromQuantity(Align));
1733 }
1734 
1735 std::pair<CharUnits, CharUnits>
1737  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1738  return getConstantArrayInfoInChars(*this, CAT);
1739  TypeInfo Info = getTypeInfo(T);
1740  return std::make_pair(toCharUnitsFromBits(Info.Width),
1741  toCharUnitsFromBits(Info.Align));
1742 }
1743 
1744 std::pair<CharUnits, CharUnits>
1746  return getTypeInfoInChars(T.getTypePtr());
1747 }
1748 
1750  return getTypeInfo(T).AlignIsRequired;
1751 }
1752 
1754  return isAlignmentRequired(T.getTypePtr());
1755 }
1756 
1758  // An alignment on a typedef overrides anything else.
1759  if (const auto *TT = T->getAs<TypedefType>())
1760  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1761  return Align;
1762 
1763  // If we have an (array of) complete type, we're done.
1764  T = getBaseElementType(T);
1765  if (!T->isIncompleteType())
1766  return getTypeAlign(T);
1767 
1768  // If we had an array type, its element type might be a typedef
1769  // type with an alignment attribute.
1770  if (const auto *TT = T->getAs<TypedefType>())
1771  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1772  return Align;
1773 
1774  // Otherwise, see if the declaration of the type had an attribute.
1775  if (const auto *TT = T->getAs<TagType>())
1776  return TT->getDecl()->getMaxAlignment();
1777 
1778  return 0;
1779 }
1780 
1782  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1783  if (I != MemoizedTypeInfo.end())
1784  return I->second;
1785 
1786  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1787  TypeInfo TI = getTypeInfoImpl(T);
1788  MemoizedTypeInfo[T] = TI;
1789  return TI;
1790 }
1791 
1792 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1793 /// method does not work on incomplete types.
1794 ///
1795 /// FIXME: Pointers into different addr spaces could have different sizes and
1796 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1797 /// should take a QualType, &c.
1798 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1799  uint64_t Width = 0;
1800  unsigned Align = 8;
1801  bool AlignIsRequired = false;
1802  unsigned AS = 0;
1803  switch (T->getTypeClass()) {
1804 #define TYPE(Class, Base)
1805 #define ABSTRACT_TYPE(Class, Base)
1806 #define NON_CANONICAL_TYPE(Class, Base)
1807 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1808 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1809  case Type::Class: \
1810  assert(!T->isDependentType() && "should not see dependent types here"); \
1811  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1812 #include "clang/AST/TypeNodes.inc"
1813  llvm_unreachable("Should not see dependent types");
1814 
1815  case Type::FunctionNoProto:
1816  case Type::FunctionProto:
1817  // GCC extension: alignof(function) = 32 bits
1818  Width = 0;
1819  Align = 32;
1820  break;
1821 
1822  case Type::IncompleteArray:
1823  case Type::VariableArray:
1824  Width = 0;
1825  Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
1826  break;
1827 
1828  case Type::ConstantArray: {
1829  const auto *CAT = cast<ConstantArrayType>(T);
1830 
1831  TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
1832  uint64_t Size = CAT->getSize().getZExtValue();
1833  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1834  "Overflow in array type bit size evaluation");
1835  Width = EltInfo.Width * Size;
1836  Align = EltInfo.Align;
1837  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1838  getTargetInfo().getPointerWidth(0) == 64)
1839  Width = llvm::alignTo(Width, Align);
1840  break;
1841  }
1842  case Type::ExtVector:
1843  case Type::Vector: {
1844  const auto *VT = cast<VectorType>(T);
1845  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1846  Width = EltInfo.Width * VT->getNumElements();
1847  Align = Width;
1848  // If the alignment is not a power of 2, round up to the next power of 2.
1849  // This happens for non-power-of-2 length vectors.
1850  if (Align & (Align-1)) {
1851  Align = llvm::NextPowerOf2(Align);
1852  Width = llvm::alignTo(Width, Align);
1853  }
1854  // Adjust the alignment based on the target max.
1855  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1856  if (TargetVectorAlign && TargetVectorAlign < Align)
1857  Align = TargetVectorAlign;
1858  break;
1859  }
1860 
1861  case Type::Builtin:
1862  switch (cast<BuiltinType>(T)->getKind()) {
1863  default: llvm_unreachable("Unknown builtin type!");
1864  case BuiltinType::Void:
1865  // GCC extension: alignof(void) = 8 bits.
1866  Width = 0;
1867  Align = 8;
1868  break;
1869  case BuiltinType::Bool:
1870  Width = Target->getBoolWidth();
1871  Align = Target->getBoolAlign();
1872  break;
1873  case BuiltinType::Char_S:
1874  case BuiltinType::Char_U:
1875  case BuiltinType::UChar:
1876  case BuiltinType::SChar:
1877  case BuiltinType::Char8:
1878  Width = Target->getCharWidth();
1879  Align = Target->getCharAlign();
1880  break;
1881  case BuiltinType::WChar_S:
1882  case BuiltinType::WChar_U:
1883  Width = Target->getWCharWidth();
1884  Align = Target->getWCharAlign();
1885  break;
1886  case BuiltinType::Char16:
1887  Width = Target->getChar16Width();
1888  Align = Target->getChar16Align();
1889  break;
1890  case BuiltinType::Char32:
1891  Width = Target->getChar32Width();
1892  Align = Target->getChar32Align();
1893  break;
1894  case BuiltinType::UShort:
1895  case BuiltinType::Short:
1896  Width = Target->getShortWidth();
1897  Align = Target->getShortAlign();
1898  break;
1899  case BuiltinType::UInt:
1900  case BuiltinType::Int:
1901  Width = Target->getIntWidth();
1902  Align = Target->getIntAlign();
1903  break;
1904  case BuiltinType::ULong:
1905  case BuiltinType::Long:
1906  Width = Target->getLongWidth();
1907  Align = Target->getLongAlign();
1908  break;
1909  case BuiltinType::ULongLong:
1910  case BuiltinType::LongLong:
1911  Width = Target->getLongLongWidth();
1912  Align = Target->getLongLongAlign();
1913  break;
1914  case BuiltinType::Int128:
1915  case BuiltinType::UInt128:
1916  Width = 128;
1917  Align = 128; // int128_t is 128-bit aligned on all targets.
1918  break;
1919  case BuiltinType::ShortAccum:
1920  case BuiltinType::UShortAccum:
1921  case BuiltinType::SatShortAccum:
1922  case BuiltinType::SatUShortAccum:
1923  Width = Target->getShortAccumWidth();
1924  Align = Target->getShortAccumAlign();
1925  break;
1926  case BuiltinType::Accum:
1927  case BuiltinType::UAccum:
1928  case BuiltinType::SatAccum:
1929  case BuiltinType::SatUAccum:
1930  Width = Target->getAccumWidth();
1931  Align = Target->getAccumAlign();
1932  break;
1933  case BuiltinType::LongAccum:
1934  case BuiltinType::ULongAccum:
1935  case BuiltinType::SatLongAccum:
1936  case BuiltinType::SatULongAccum:
1937  Width = Target->getLongAccumWidth();
1938  Align = Target->getLongAccumAlign();
1939  break;
1940  case BuiltinType::ShortFract:
1941  case BuiltinType::UShortFract:
1942  case BuiltinType::SatShortFract:
1943  case BuiltinType::SatUShortFract:
1944  Width = Target->getShortFractWidth();
1945  Align = Target->getShortFractAlign();
1946  break;
1947  case BuiltinType::Fract:
1948  case BuiltinType::UFract:
1949  case BuiltinType::SatFract:
1950  case BuiltinType::SatUFract:
1951  Width = Target->getFractWidth();
1952  Align = Target->getFractAlign();
1953  break;
1954  case BuiltinType::LongFract:
1955  case BuiltinType::ULongFract:
1956  case BuiltinType::SatLongFract:
1957  case BuiltinType::SatULongFract:
1958  Width = Target->getLongFractWidth();
1959  Align = Target->getLongFractAlign();
1960  break;
1961  case BuiltinType::Float16:
1962  case BuiltinType::Half:
1963  if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
1964  !getLangOpts().OpenMPIsDevice) {
1965  Width = Target->getHalfWidth();
1966  Align = Target->getHalfAlign();
1967  } else {
1968  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1969  "Expected OpenMP device compilation.");
1970  Width = AuxTarget->getHalfWidth();
1971  Align = AuxTarget->getHalfAlign();
1972  }
1973  break;
1974  case BuiltinType::Float:
1975  Width = Target->getFloatWidth();
1976  Align = Target->getFloatAlign();
1977  break;
1978  case BuiltinType::Double:
1979  Width = Target->getDoubleWidth();
1980  Align = Target->getDoubleAlign();
1981  break;
1982  case BuiltinType::LongDouble:
1983  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
1984  (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
1985  Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
1986  Width = AuxTarget->getLongDoubleWidth();
1987  Align = AuxTarget->getLongDoubleAlign();
1988  } else {
1989  Width = Target->getLongDoubleWidth();
1990  Align = Target->getLongDoubleAlign();
1991  }
1992  break;
1993  case BuiltinType::Float128:
1994  if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
1995  !getLangOpts().OpenMPIsDevice) {
1996  Width = Target->getFloat128Width();
1997  Align = Target->getFloat128Align();
1998  } else {
1999  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
2000  "Expected OpenMP device compilation.");
2001  Width = AuxTarget->getFloat128Width();
2002  Align = AuxTarget->getFloat128Align();
2003  }
2004  break;
2005  case BuiltinType::NullPtr:
2006  Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
2007  Align = Target->getPointerAlign(0); // == sizeof(void*)
2008  break;
2009  case BuiltinType::ObjCId:
2010  case BuiltinType::ObjCClass:
2011  case BuiltinType::ObjCSel:
2012  Width = Target->getPointerWidth(0);
2013  Align = Target->getPointerAlign(0);
2014  break;
2015  case BuiltinType::OCLSampler:
2016  case BuiltinType::OCLEvent:
2017  case BuiltinType::OCLClkEvent:
2018  case BuiltinType::OCLQueue:
2019  case BuiltinType::OCLReserveID:
2020 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2021  case BuiltinType::Id:
2022 #include "clang/Basic/OpenCLImageTypes.def"
2023 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2024  case BuiltinType::Id:
2025 #include "clang/Basic/OpenCLExtensionTypes.def"
2026  AS = getTargetAddressSpace(
2027  Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)));
2028  Width = Target->getPointerWidth(AS);
2029  Align = Target->getPointerAlign(AS);
2030  break;
2031  // The SVE types are effectively target-specific. The length of an
2032  // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2033  // of 128 bits. There is one predicate bit for each vector byte, so the
2034  // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2035  //
2036  // Because the length is only known at runtime, we use a dummy value
2037  // of 0 for the static length. The alignment values are those defined
2038  // by the Procedure Call Standard for the Arm Architecture.
2039 #define SVE_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, IsSigned, IsFP)\
2040  case BuiltinType::Id: \
2041  Width = 0; \
2042  Align = 128; \
2043  break;
2044 #define SVE_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2045  case BuiltinType::Id: \
2046  Width = 0; \
2047  Align = 16; \
2048  break;
2049 #include "clang/Basic/AArch64SVEACLETypes.def"
2050  }
2051  break;
2052  case Type::ObjCObjectPointer:
2053  Width = Target->getPointerWidth(0);
2054  Align = Target->getPointerAlign(0);
2055  break;
2056  case Type::BlockPointer:
2057  AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
2058  Width = Target->getPointerWidth(AS);
2059  Align = Target->getPointerAlign(AS);
2060  break;
2061  case Type::LValueReference:
2062  case Type::RValueReference:
2063  // alignof and sizeof should never enter this code path here, so we go
2064  // the pointer route.
2065  AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
2066  Width = Target->getPointerWidth(AS);
2067  Align = Target->getPointerAlign(AS);
2068  break;
2069  case Type::Pointer:
2070  AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
2071  Width = Target->getPointerWidth(AS);
2072  Align = Target->getPointerAlign(AS);
2073  break;
2074  case Type::MemberPointer: {
2075  const auto *MPT = cast<MemberPointerType>(T);
2076  CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2077  Width = MPI.Width;
2078  Align = MPI.Align;
2079  break;
2080  }
2081  case Type::Complex: {
2082  // Complex types have the same alignment as their elements, but twice the
2083  // size.
2084  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2085  Width = EltInfo.Width * 2;
2086  Align = EltInfo.Align;
2087  break;
2088  }
2089  case Type::ObjCObject:
2090  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2091  case Type::Adjusted:
2092  case Type::Decayed:
2093  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2094  case Type::ObjCInterface: {
2095  const auto *ObjCI = cast<ObjCInterfaceType>(T);
2096  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2097  Width = toBits(Layout.getSize());
2098  Align = toBits(Layout.getAlignment());
2099  break;
2100  }
2101  case Type::Record:
2102  case Type::Enum: {
2103  const auto *TT = cast<TagType>(T);
2104 
2105  if (TT->getDecl()->isInvalidDecl()) {
2106  Width = 8;
2107  Align = 8;
2108  break;
2109  }
2110 
2111  if (const auto *ET = dyn_cast<EnumType>(TT)) {
2112  const EnumDecl *ED = ET->getDecl();
2113  TypeInfo Info =
2115  if (unsigned AttrAlign = ED->getMaxAlignment()) {
2116  Info.Align = AttrAlign;
2117  Info.AlignIsRequired = true;
2118  }
2119  return Info;
2120  }
2121 
2122  const auto *RT = cast<RecordType>(TT);
2123  const RecordDecl *RD = RT->getDecl();
2124  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2125  Width = toBits(Layout.getSize());
2126  Align = toBits(Layout.getAlignment());
2127  AlignIsRequired = RD->hasAttr<AlignedAttr>();
2128  break;
2129  }
2130 
2131  case Type::SubstTemplateTypeParm:
2132  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2133  getReplacementType().getTypePtr());
2134 
2135  case Type::Auto:
2136  case Type::DeducedTemplateSpecialization: {
2137  const auto *A = cast<DeducedType>(T);
2138  assert(!A->getDeducedType().isNull() &&
2139  "cannot request the size of an undeduced or dependent auto type");
2140  return getTypeInfo(A->getDeducedType().getTypePtr());
2141  }
2142 
2143  case Type::Paren:
2144  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2145 
2146  case Type::MacroQualified:
2147  return getTypeInfo(
2148  cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
2149 
2150  case Type::ObjCTypeParam:
2151  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2152 
2153  case Type::Typedef: {
2154  const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
2155  TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
2156  // If the typedef has an aligned attribute on it, it overrides any computed
2157  // alignment we have. This violates the GCC documentation (which says that
2158  // attribute(aligned) can only round up) but matches its implementation.
2159  if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
2160  Align = AttrAlign;
2161  AlignIsRequired = true;
2162  } else {
2163  Align = Info.Align;
2164  AlignIsRequired = Info.AlignIsRequired;
2165  }
2166  Width = Info.Width;
2167  break;
2168  }
2169 
2170  case Type::Elaborated:
2171  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2172 
2173  case Type::Attributed:
2174  return getTypeInfo(
2175  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2176 
2177  case Type::Atomic: {
2178  // Start with the base type information.
2179  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2180  Width = Info.Width;
2181  Align = Info.Align;
2182 
2183  if (!Width) {
2184  // An otherwise zero-sized type should still generate an
2185  // atomic operation.
2186  Width = Target->getCharWidth();
2187  assert(Align);
2188  } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2189  // If the size of the type doesn't exceed the platform's max
2190  // atomic promotion width, make the size and alignment more
2191  // favorable to atomic operations:
2192 
2193  // Round the size up to a power of 2.
2194  if (!llvm::isPowerOf2_64(Width))
2195  Width = llvm::NextPowerOf2(Width);
2196 
2197  // Set the alignment equal to the size.
2198  Align = static_cast<unsigned>(Width);
2199  }
2200  }
2201  break;
2202 
2203  case Type::Pipe:
2204  Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global));
2205  Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global));
2206  break;
2207  }
2208 
2209  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2210  return TypeInfo(Width, Align, AlignIsRequired);
2211 }
2212 
2213 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2214  UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2215  if (I != MemoizedUnadjustedAlign.end())
2216  return I->second;
2217 
2218  unsigned UnadjustedAlign;
2219  if (const auto *RT = T->getAs<RecordType>()) {
2220  const RecordDecl *RD = RT->getDecl();
2221  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2222  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2223  } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2224  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2225  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2226  } else {
2227  UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
2228  }
2229 
2230  MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2231  return UnadjustedAlign;
2232 }
2233 
2235  unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
2236  // Target ppc64 with QPX: simd default alignment for pointer to double is 32.
2237  if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
2238  getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
2239  getTargetInfo().getABI() == "elfv1-qpx" &&
2240  T->isSpecificBuiltinType(BuiltinType::Double))
2241  SimdAlign = 256;
2242  return SimdAlign;
2243 }
2244 
2245 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2247  return CharUnits::fromQuantity(BitSize / getCharWidth());
2248 }
2249 
2250 /// toBits - Convert a size in characters to a size in characters.
2251 int64_t ASTContext::toBits(CharUnits CharSize) const {
2252  return CharSize.getQuantity() * getCharWidth();
2253 }
2254 
2255 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2256 /// This method does not work on incomplete types.
2258  return getTypeInfoInChars(T).first;
2259 }
2261  return getTypeInfoInChars(T).first;
2262 }
2263 
2264 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2265 /// characters. This method does not work on incomplete types.
2267  return toCharUnitsFromBits(getTypeAlign(T));
2268 }
2270  return toCharUnitsFromBits(getTypeAlign(T));
2271 }
2272 
2273 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2274 /// type, in characters, before alignment adustments. This method does
2275 /// not work on incomplete types.
2278 }
2281 }
2282 
2283 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2284 /// type for the current target in bits. This can be different than the ABI
2285 /// alignment in cases where it is beneficial for performance to overalign
2286 /// a data type.
2287 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2288  TypeInfo TI = getTypeInfo(T);
2289  unsigned ABIAlign = TI.Align;
2290 
2291  T = T->getBaseElementTypeUnsafe();
2292 
2293  // The preferred alignment of member pointers is that of a pointer.
2294  if (T->isMemberPointerType())
2296 
2297  if (!Target->allowsLargerPreferedTypeAlignment())
2298  return ABIAlign;
2299 
2300  // Double and long long should be naturally aligned if possible.
2301  if (const auto *CT = T->getAs<ComplexType>())
2302  T = CT->getElementType().getTypePtr();
2303  if (const auto *ET = T->getAs<EnumType>())
2304  T = ET->getDecl()->getIntegerType().getTypePtr();
2305  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2306  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2307  T->isSpecificBuiltinType(BuiltinType::ULongLong))
2308  // Don't increase the alignment if an alignment attribute was specified on a
2309  // typedef declaration.
2310  if (!TI.AlignIsRequired)
2311  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2312 
2313  return ABIAlign;
2314 }
2315 
2316 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2317 /// for __attribute__((aligned)) on this target, to be used if no alignment
2318 /// value is specified.
2321 }
2322 
2323 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2324 /// to a global variable of the specified type.
2326  uint64_t TypeSize = getTypeSize(T.getTypePtr());
2327  return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign(TypeSize));
2328 }
2329 
2330 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2331 /// should be given to a global variable of the specified type.
2334 }
2335 
2338  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2339  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2340  Offset += Layout->getBaseClassOffset(Base);
2341  Layout = &getASTRecordLayout(Base);
2342  }
2343  return Offset;
2344 }
2345 
2346 /// DeepCollectObjCIvars -
2347 /// This routine first collects all declared, but not synthesized, ivars in
2348 /// super class and then collects all ivars, including those synthesized for
2349 /// current class. This routine is used for implementation of current class
2350 /// when all ivars, declared and synthesized are known.
2352  bool leafClass,
2353  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2354  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2355  DeepCollectObjCIvars(SuperClass, false, Ivars);
2356  if (!leafClass) {
2357  for (const auto *I : OI->ivars())
2358  Ivars.push_back(I);
2359  } else {
2360  auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2361  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2362  Iv= Iv->getNextIvar())
2363  Ivars.push_back(Iv);
2364  }
2365 }
2366 
2367 /// CollectInheritedProtocols - Collect all protocols in current class and
2368 /// those inherited by it.
2370  llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2371  if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2372  // We can use protocol_iterator here instead of
2373  // all_referenced_protocol_iterator since we are walking all categories.
2374  for (auto *Proto : OI->all_referenced_protocols()) {
2375  CollectInheritedProtocols(Proto, Protocols);
2376  }
2377 
2378  // Categories of this Interface.
2379  for (const auto *Cat : OI->visible_categories())
2380  CollectInheritedProtocols(Cat, Protocols);
2381 
2382  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2383  while (SD) {
2384  CollectInheritedProtocols(SD, Protocols);
2385  SD = SD->getSuperClass();
2386  }
2387  } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2388  for (auto *Proto : OC->protocols()) {
2389  CollectInheritedProtocols(Proto, Protocols);
2390  }
2391  } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2392  // Insert the protocol.
2393  if (!Protocols.insert(
2394  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2395  return;
2396 
2397  for (auto *Proto : OP->protocols())
2398  CollectInheritedProtocols(Proto, Protocols);
2399  }
2400 }
2401 
2403  const RecordDecl *RD) {
2404  assert(RD->isUnion() && "Must be union type");
2405  CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2406 
2407  for (const auto *Field : RD->fields()) {
2408  if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2409  return false;
2410  CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2411  if (FieldSize != UnionSize)
2412  return false;
2413  }
2414  return !RD->field_empty();
2415 }
2416 
2417 static bool isStructEmpty(QualType Ty) {
2418  const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
2419 
2420  if (!RD->field_empty())
2421  return false;
2422 
2423  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
2424  return ClassDecl->isEmpty();
2425 
2426  return true;
2427 }
2428 
2431  const RecordDecl *RD) {
2432  assert(!RD->isUnion() && "Must be struct/class type");
2433  const auto &Layout = Context.getASTRecordLayout(RD);
2434 
2435  int64_t CurOffsetInBits = 0;
2436  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2437  if (ClassDecl->isDynamicClass())
2438  return llvm::None;
2439 
2441  for (const auto Base : ClassDecl->bases()) {
2442  // Empty types can be inherited from, and non-empty types can potentially
2443  // have tail padding, so just make sure there isn't an error.
2444  if (!isStructEmpty(Base.getType())) {
2446  Context, Base.getType()->castAs<RecordType>()->getDecl());
2447  if (!Size)
2448  return llvm::None;
2449  Bases.emplace_back(Base.getType(), Size.getValue());
2450  }
2451  }
2452 
2453  llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L,
2454  const std::pair<QualType, int64_t> &R) {
2455  return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
2456  Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
2457  });
2458 
2459  for (const auto Base : Bases) {
2460  int64_t BaseOffset = Context.toBits(
2461  Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl()));
2462  int64_t BaseSize = Base.second;
2463  if (BaseOffset != CurOffsetInBits)
2464  return llvm::None;
2465  CurOffsetInBits = BaseOffset + BaseSize;
2466  }
2467  }
2468 
2469  for (const auto *Field : RD->fields()) {
2470  if (!Field->getType()->isReferenceType() &&
2471  !Context.hasUniqueObjectRepresentations(Field->getType()))
2472  return llvm::None;
2473 
2474  int64_t FieldSizeInBits =
2475  Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2476  if (Field->isBitField()) {
2477  int64_t BitfieldSize = Field->getBitWidthValue(Context);
2478 
2479  if (BitfieldSize > FieldSizeInBits)
2480  return llvm::None;
2481  FieldSizeInBits = BitfieldSize;
2482  }
2483 
2484  int64_t FieldOffsetInBits = Context.getFieldOffset(Field);
2485 
2486  if (FieldOffsetInBits != CurOffsetInBits)
2487  return llvm::None;
2488 
2489  CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits;
2490  }
2491 
2492  return CurOffsetInBits;
2493 }
2494 
2496  // C++17 [meta.unary.prop]:
2497  // The predicate condition for a template specialization
2498  // has_unique_object_representations<T> shall be
2499  // satisfied if and only if:
2500  // (9.1) - T is trivially copyable, and
2501  // (9.2) - any two objects of type T with the same value have the same
2502  // object representation, where two objects
2503  // of array or non-union class type are considered to have the same value
2504  // if their respective sequences of
2505  // direct subobjects have the same values, and two objects of union type
2506  // are considered to have the same
2507  // value if they have the same active member and the corresponding members
2508  // have the same value.
2509  // The set of scalar types for which this condition holds is
2510  // implementation-defined. [ Note: If a type has padding
2511  // bits, the condition does not hold; otherwise, the condition holds true
2512  // for unsigned integral types. -- end note ]
2513  assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2514 
2515  // Arrays are unique only if their element type is unique.
2516  if (Ty->isArrayType())
2518 
2519  // (9.1) - T is trivially copyable...
2520  if (!Ty.isTriviallyCopyableType(*this))
2521  return false;
2522 
2523  // All integrals and enums are unique.
2524  if (Ty->isIntegralOrEnumerationType())
2525  return true;
2526 
2527  // All other pointers are unique.
2528  if (Ty->isPointerType())
2529  return true;
2530 
2531  if (Ty->isMemberPointerType()) {
2532  const auto *MPT = Ty->getAs<MemberPointerType>();
2533  return !ABI->getMemberPointerInfo(MPT).HasPadding;
2534  }
2535 
2536  if (Ty->isRecordType()) {
2537  const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
2538 
2539  if (Record->isInvalidDecl())
2540  return false;
2541 
2542  if (Record->isUnion())
2543  return unionHasUniqueObjectRepresentations(*this, Record);
2544 
2545  Optional<int64_t> StructSize =
2546  structHasUniqueObjectRepresentations(*this, Record);
2547 
2548  return StructSize &&
2549  StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
2550  }
2551 
2552  // FIXME: More cases to handle here (list by rsmith):
2553  // vectors (careful about, eg, vector of 3 foo)
2554  // _Complex int and friends
2555  // _Atomic T
2556  // Obj-C block pointers
2557  // Obj-C object pointers
2558  // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2559  // clk_event_t, queue_t, reserve_id_t)
2560  // There're also Obj-C class types and the Obj-C selector type, but I think it
2561  // makes sense for those to return false here.
2562 
2563  return false;
2564 }
2565 
2567  unsigned count = 0;
2568  // Count ivars declared in class extension.
2569  for (const auto *Ext : OI->known_extensions())
2570  count += Ext->ivar_size();
2571 
2572  // Count ivar defined in this class's implementation. This
2573  // includes synthesized ivars.
2574  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2575  count += ImplDecl->ivar_size();
2576 
2577  return count;
2578 }
2579 
2581  if (!E)
2582  return false;
2583 
2584  // nullptr_t is always treated as null.
2585  if (E->getType()->isNullPtrType()) return true;
2586 
2587  if (E->getType()->isAnyPointerType() &&
2590  return true;
2591 
2592  // Unfortunately, __null has type 'int'.
2593  if (isa<GNUNullExpr>(E)) return true;
2594 
2595  return false;
2596 }
2597 
2598 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2599 /// exists.
2601  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2602  I = ObjCImpls.find(D);
2603  if (I != ObjCImpls.end())
2604  return cast<ObjCImplementationDecl>(I->second);
2605  return nullptr;
2606 }
2607 
2608 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2609 /// exists.
2611  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2612  I = ObjCImpls.find(D);
2613  if (I != ObjCImpls.end())
2614  return cast<ObjCCategoryImplDecl>(I->second);
2615  return nullptr;
2616 }
2617 
2618 /// Set the implementation of ObjCInterfaceDecl.
2620  ObjCImplementationDecl *ImplD) {
2621  assert(IFaceD && ImplD && "Passed null params");
2622  ObjCImpls[IFaceD] = ImplD;
2623 }
2624 
2625 /// Set the implementation of ObjCCategoryDecl.
2627  ObjCCategoryImplDecl *ImplD) {
2628  assert(CatD && ImplD && "Passed null params");
2629  ObjCImpls[CatD] = ImplD;
2630 }
2631 
2632 const ObjCMethodDecl *
2634  return ObjCMethodRedecls.lookup(MD);
2635 }
2636 
2638  const ObjCMethodDecl *Redecl) {
2639  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2640  ObjCMethodRedecls[MD] = Redecl;
2641 }
2642 
2644  const NamedDecl *ND) const {
2645  if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2646  return ID;
2647  if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2648  return CD->getClassInterface();
2649  if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2650  return IMD->getClassInterface();
2651 
2652  return nullptr;
2653 }
2654 
2655 /// Get the copy initialization expression of VarDecl, or nullptr if
2656 /// none exists.
2659  assert(VD && "Passed null params");
2660  assert(VD->hasAttr<BlocksAttr>() &&
2661  "getBlockVarCopyInits - not __block var");
2662  auto I = BlockVarCopyInits.find(VD);
2663  if (I != BlockVarCopyInits.end())
2664  return I->second;
2665  return {nullptr, false};
2666 }
2667 
2668 /// Set the copy initialization expression of a block var decl.
2670  bool CanThrow) {
2671  assert(VD && CopyExpr && "Passed null params");
2672  assert(VD->hasAttr<BlocksAttr>() &&
2673  "setBlockVarCopyInits - not __block var");
2674  BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2675 }
2676 
2678  unsigned DataSize) const {
2679  if (!DataSize)
2680  DataSize = TypeLoc::getFullDataSizeForType(T);
2681  else
2682  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2683  "incorrect data size provided to CreateTypeSourceInfo!");
2684 
2685  auto *TInfo =
2686  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2687  new (TInfo) TypeSourceInfo(T);
2688  return TInfo;
2689 }
2690 
2692  SourceLocation L) const {
2694  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2695  return DI;
2696 }
2697 
2698 const ASTRecordLayout &
2700  return getObjCLayout(D, nullptr);
2701 }
2702 
2703 const ASTRecordLayout &
2705  const ObjCImplementationDecl *D) const {
2706  return getObjCLayout(D->getClassInterface(), D);
2707 }
2708 
2709 //===----------------------------------------------------------------------===//
2710 // Type creation/memoization methods
2711 //===----------------------------------------------------------------------===//
2712 
2713 QualType
2714 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2715  unsigned fastQuals = quals.getFastQualifiers();
2716  quals.removeFastQualifiers();
2717 
2718  // Check if we've already instantiated this type.
2719  llvm::FoldingSetNodeID ID;
2720  ExtQuals::Profile(ID, baseType, quals);
2721  void *insertPos = nullptr;
2722  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2723  assert(eq->getQualifiers() == quals);
2724  return QualType(eq, fastQuals);
2725  }
2726 
2727  // If the base type is not canonical, make the appropriate canonical type.
2728  QualType canon;
2729  if (!baseType->isCanonicalUnqualified()) {
2730  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2731  canonSplit.Quals.addConsistentQualifiers(quals);
2732  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
2733 
2734  // Re-find the insert position.
2735  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
2736  }
2737 
2738  auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
2739  ExtQualNodes.InsertNode(eq, insertPos);
2740  return QualType(eq, fastQuals);
2741 }
2742 
2744  LangAS AddressSpace) const {
2745  QualType CanT = getCanonicalType(T);
2746  if (CanT.getAddressSpace() == AddressSpace)
2747  return T;
2748 
2749  // If we are composing extended qualifiers together, merge together
2750  // into one ExtQuals node.
2751  QualifierCollector Quals;
2752  const Type *TypeNode = Quals.strip(T);
2753 
2754  // If this type already has an address space specified, it cannot get
2755  // another one.
2756  assert(!Quals.hasAddressSpace() &&
2757  "Type cannot be in multiple addr spaces!");
2758  Quals.addAddressSpace(AddressSpace);
2759 
2760  return getExtQualType(TypeNode, Quals);
2761 }
2762 
2764  // If we are composing extended qualifiers together, merge together
2765  // into one ExtQuals node.
2766  QualifierCollector Quals;
2767  const Type *TypeNode = Quals.strip(T);
2768 
2769  // If the qualifier doesn't have an address space just return it.
2770  if (!Quals.hasAddressSpace())
2771  return T;
2772 
2773  Quals.removeAddressSpace();
2774 
2775  // Removal of the address space can mean there are no longer any
2776  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
2777  // or required.
2778  if (Quals.hasNonFastQualifiers())
2779  return getExtQualType(TypeNode, Quals);
2780  else
2781  return QualType(TypeNode, Quals.getFastQualifiers());
2782 }
2783 
2785  Qualifiers::GC GCAttr) const {
2786  QualType CanT = getCanonicalType(T);
2787  if (CanT.getObjCGCAttr() == GCAttr)
2788  return T;
2789 
2790  if (const auto *ptr = T->getAs<PointerType>()) {
2791  QualType Pointee = ptr->getPointeeType();
2792  if (Pointee->isAnyPointerType()) {
2793  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
2794  return getPointerType(ResultType);
2795  }
2796  }
2797 
2798  // If we are composing extended qualifiers together, merge together
2799  // into one ExtQuals node.
2800  QualifierCollector Quals;
2801  const Type *TypeNode = Quals.strip(T);
2802 
2803  // If this type already has an ObjCGC specified, it cannot get
2804  // another one.
2805  assert(!Quals.hasObjCGCAttr() &&
2806  "Type cannot have multiple ObjCGCs!");
2807  Quals.addObjCGCAttr(GCAttr);
2808 
2809  return getExtQualType(TypeNode, Quals);
2810 }
2811 
2813  FunctionType::ExtInfo Info) {
2814  if (T->getExtInfo() == Info)
2815  return T;
2816 
2817  QualType Result;
2818  if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
2819  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
2820  } else {
2821  const auto *FPT = cast<FunctionProtoType>(T);
2822  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2823  EPI.ExtInfo = Info;
2824  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
2825  }
2826 
2827  return cast<FunctionType>(Result.getTypePtr());
2828 }
2829 
2831  QualType ResultType) {
2832  FD = FD->getMostRecentDecl();
2833  while (true) {
2834  const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
2835  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
2836  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
2837  if (FunctionDecl *Next = FD->getPreviousDecl())
2838  FD = Next;
2839  else
2840  break;
2841  }
2843  L->DeducedReturnType(FD, ResultType);
2844 }
2845 
2846 /// Get a function type and produce the equivalent function type with the
2847 /// specified exception specification. Type sugar that can be present on a
2848 /// declaration of a function with an exception specification is permitted
2849 /// and preserved. Other type sugar (for instance, typedefs) is not.
2852  // Might have some parens.
2853  if (const auto *PT = dyn_cast<ParenType>(Orig))
2854  return getParenType(
2855  getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
2856 
2857  // Might be wrapped in a macro qualified type.
2858  if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig))
2859  return getMacroQualifiedType(
2860  getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI),
2861  MQT->getMacroIdentifier());
2862 
2863  // Might have a calling-convention attribute.
2864  if (const auto *AT = dyn_cast<AttributedType>(Orig))
2865  return getAttributedType(
2866  AT->getAttrKind(),
2867  getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
2868  getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
2869 
2870  // Anything else must be a function type. Rebuild it with the new exception
2871  // specification.
2872  const auto *Proto = Orig->castAs<FunctionProtoType>();
2873  return getFunctionType(
2874  Proto->getReturnType(), Proto->getParamTypes(),
2875  Proto->getExtProtoInfo().withExceptionSpec(ESI));
2876 }
2877 
2879  QualType U) {
2880  return hasSameType(T, U) ||
2881  (getLangOpts().CPlusPlus17 &&
2884 }
2885 
2888  bool AsWritten) {
2889  // Update the type.
2890  QualType Updated =
2892  FD->setType(Updated);
2893 
2894  if (!AsWritten)
2895  return;
2896 
2897  // Update the type in the type source information too.
2898  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
2899  // If the type and the type-as-written differ, we may need to update
2900  // the type-as-written too.
2901  if (TSInfo->getType() != FD->getType())
2902  Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
2903 
2904  // FIXME: When we get proper type location information for exceptions,
2905  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
2906  // up the TypeSourceInfo;
2907  assert(TypeLoc::getFullDataSizeForType(Updated) ==
2908  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
2909  "TypeLoc size mismatch from updating exception specification");
2910  TSInfo->overrideType(Updated);
2911  }
2912 }
2913 
2914 /// getComplexType - Return the uniqued reference to the type for a complex
2915 /// number with the specified element type.
2917  // Unique pointers, to guarantee there is only one pointer of a particular
2918  // structure.
2919  llvm::FoldingSetNodeID ID;
2920  ComplexType::Profile(ID, T);
2921 
2922  void *InsertPos = nullptr;
2923  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
2924  return QualType(CT, 0);
2925 
2926  // If the pointee type isn't canonical, this won't be a canonical type either,
2927  // so fill in the canonical type field.
2928  QualType Canonical;
2929  if (!T.isCanonical()) {
2930  Canonical = getComplexType(getCanonicalType(T));
2931 
2932  // Get the new insert position for the node we care about.
2933  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
2934  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2935  }
2936  auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
2937  Types.push_back(New);
2938  ComplexTypes.InsertNode(New, InsertPos);
2939  return QualType(New, 0);
2940 }
2941 
2942 /// getPointerType - Return the uniqued reference to the type for a pointer to
2943 /// the specified type.
2945  // Unique pointers, to guarantee there is only one pointer of a particular
2946  // structure.
2947  llvm::FoldingSetNodeID ID;
2948  PointerType::Profile(ID, T);
2949 
2950  void *InsertPos = nullptr;
2951  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
2952  return QualType(PT, 0);
2953 
2954  // If the pointee type isn't canonical, this won't be a canonical type either,
2955  // so fill in the canonical type field.
2956  QualType Canonical;
2957  if (!T.isCanonical()) {
2958  Canonical = getPointerType(getCanonicalType(T));
2959 
2960  // Get the new insert position for the node we care about.
2961  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
2962  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
2963  }
2964  auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
2965  Types.push_back(New);
2966  PointerTypes.InsertNode(New, InsertPos);
2967  return QualType(New, 0);
2968 }
2969 
2971  llvm::FoldingSetNodeID ID;
2972  AdjustedType::Profile(ID, Orig, New);
2973  void *InsertPos = nullptr;
2974  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2975  if (AT)
2976  return QualType(AT, 0);
2977 
2978  QualType Canonical = getCanonicalType(New);
2979 
2980  // Get the new insert position for the node we care about.
2981  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
2982  assert(!AT && "Shouldn't be in the map!");
2983 
2984  AT = new (*this, TypeAlignment)
2985  AdjustedType(Type::Adjusted, Orig, New, Canonical);
2986  Types.push_back(AT);
2987  AdjustedTypes.InsertNode(AT, InsertPos);
2988  return QualType(AT, 0);
2989 }
2990 
2992  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
2993 
2994  QualType Decayed;
2995 
2996  // C99 6.7.5.3p7:
2997  // A declaration of a parameter as "array of type" shall be
2998  // adjusted to "qualified pointer to type", where the type
2999  // qualifiers (if any) are those specified within the [ and ] of
3000  // the array type derivation.
3001  if (T->isArrayType())
3002  Decayed = getArrayDecayedType(T);
3003 
3004  // C99 6.7.5.3p8:
3005  // A declaration of a parameter as "function returning type"
3006  // shall be adjusted to "pointer to function returning type", as
3007  // in 6.3.2.1.
3008  if (T->isFunctionType())
3009  Decayed = getPointerType(T);
3010 
3011  llvm::FoldingSetNodeID ID;
3012  AdjustedType::Profile(ID, T, Decayed);
3013  void *InsertPos = nullptr;
3014  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3015  if (AT)
3016  return QualType(AT, 0);
3017 
3018  QualType Canonical = getCanonicalType(Decayed);
3019 
3020  // Get the new insert position for the node we care about.
3021  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3022  assert(!AT && "Shouldn't be in the map!");
3023 
3024  AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
3025  Types.push_back(AT);
3026  AdjustedTypes.InsertNode(AT, InsertPos);
3027  return QualType(AT, 0);
3028 }
3029 
3030 /// getBlockPointerType - Return the uniqued reference to the type for
3031 /// a pointer to the specified block.
3033  assert(T->isFunctionType() && "block of function types only");
3034  // Unique pointers, to guarantee there is only one block of a particular
3035  // structure.
3036  llvm::FoldingSetNodeID ID;
3038 
3039  void *InsertPos = nullptr;
3040  if (BlockPointerType *PT =
3041  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3042  return QualType(PT, 0);
3043 
3044  // If the block pointee type isn't canonical, this won't be a canonical
3045  // type either so fill in the canonical type field.
3046  QualType Canonical;
3047  if (!T.isCanonical()) {
3048  Canonical = getBlockPointerType(getCanonicalType(T));
3049 
3050  // Get the new insert position for the node we care about.
3051  BlockPointerType *NewIP =
3052  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3053  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3054  }
3055  auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
3056  Types.push_back(New);
3057  BlockPointerTypes.InsertNode(New, InsertPos);
3058  return QualType(New, 0);
3059 }
3060 
3061 /// getLValueReferenceType - Return the uniqued reference to the type for an
3062 /// lvalue reference to the specified type.
3063 QualType
3064 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3065  assert(getCanonicalType(T) != OverloadTy &&
3066  "Unresolved overloaded function type");
3067 
3068  // Unique pointers, to guarantee there is only one pointer of a particular
3069  // structure.
3070  llvm::FoldingSetNodeID ID;
3071  ReferenceType::Profile(ID, T, SpelledAsLValue);
3072 
3073  void *InsertPos = nullptr;
3074  if (LValueReferenceType *RT =
3075  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3076  return QualType(RT, 0);
3077 
3078  const auto *InnerRef = T->getAs<ReferenceType>();
3079 
3080  // If the referencee type isn't canonical, this won't be a canonical type
3081  // either, so fill in the canonical type field.
3082  QualType Canonical;
3083  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
3084  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3085  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
3086 
3087  // Get the new insert position for the node we care about.
3088  LValueReferenceType *NewIP =
3089  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3090  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3091  }
3092 
3093  auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
3094  SpelledAsLValue);
3095  Types.push_back(New);
3096  LValueReferenceTypes.InsertNode(New, InsertPos);
3097 
3098  return QualType(New, 0);
3099 }
3100 
3101 /// getRValueReferenceType - Return the uniqued reference to the type for an
3102 /// rvalue reference to the specified type.
3104  // Unique pointers, to guarantee there is only one pointer of a particular
3105  // structure.
3106  llvm::FoldingSetNodeID ID;
3107  ReferenceType::Profile(ID, T, false);
3108 
3109  void *InsertPos = nullptr;
3110  if (RValueReferenceType *RT =
3111  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3112  return QualType(RT, 0);
3113 
3114  const auto *InnerRef = T->getAs<ReferenceType>();
3115 
3116  // If the referencee type isn't canonical, this won't be a canonical type
3117  // either, so fill in the canonical type field.
3118  QualType Canonical;
3119  if (InnerRef || !T.isCanonical()) {
3120  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3121  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3122 
3123  // Get the new insert position for the node we care about.
3124  RValueReferenceType *NewIP =
3125  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3126  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3127  }
3128 
3129  auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
3130  Types.push_back(New);
3131  RValueReferenceTypes.InsertNode(New, InsertPos);
3132  return QualType(New, 0);
3133 }
3134 
3135 /// getMemberPointerType - Return the uniqued reference to the type for a
3136 /// member pointer to the specified type, in the specified class.
3138  // Unique pointers, to guarantee there is only one pointer of a particular
3139  // structure.
3140  llvm::FoldingSetNodeID ID;
3141  MemberPointerType::Profile(ID, T, Cls);
3142 
3143  void *InsertPos = nullptr;
3144  if (MemberPointerType *PT =
3145  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3146  return QualType(PT, 0);
3147 
3148  // If the pointee or class type isn't canonical, this won't be a canonical
3149  // type either, so fill in the canonical type field.
3150  QualType Canonical;
3151  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3153 
3154  // Get the new insert position for the node we care about.
3155  MemberPointerType *NewIP =
3156  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3157  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3158  }
3159  auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
3160  Types.push_back(New);
3161  MemberPointerTypes.InsertNode(New, InsertPos);
3162  return QualType(New, 0);
3163 }
3164 
3165 /// getConstantArrayType - Return the unique reference to the type for an
3166 /// array of the specified element type.
3168  const llvm::APInt &ArySizeIn,
3169  const Expr *SizeExpr,
3171  unsigned IndexTypeQuals) const {
3172  assert((EltTy->isDependentType() ||
3173  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3174  "Constant array of VLAs is illegal!");
3175 
3176  // We only need the size as part of the type if it's instantiation-dependent.
3177  if (SizeExpr && !SizeExpr->isInstantiationDependent())
3178  SizeExpr = nullptr;
3179 
3180  // Convert the array size into a canonical width matching the pointer size for
3181  // the target.
3182  llvm::APInt ArySize(ArySizeIn);
3183  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3184 
3185  llvm::FoldingSetNodeID ID;
3186  ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM,
3187  IndexTypeQuals);
3188 
3189  void *InsertPos = nullptr;
3190  if (ConstantArrayType *ATP =
3191  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3192  return QualType(ATP, 0);
3193 
3194  // If the element type isn't canonical or has qualifiers, or the array bound
3195  // is instantiation-dependent, this won't be a canonical type either, so fill
3196  // in the canonical type field.
3197  QualType Canon;
3198  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
3199  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3200  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr,
3201  ASM, IndexTypeQuals);
3202  Canon = getQualifiedType(Canon, canonSplit.Quals);
3203 
3204  // Get the new insert position for the node we care about.
3205  ConstantArrayType *NewIP =
3206  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3207  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3208  }
3209 
3210  void *Mem = Allocate(
3211  ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
3212  TypeAlignment);
3213  auto *New = new (Mem)
3214  ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
3215  ConstantArrayTypes.InsertNode(New, InsertPos);
3216  Types.push_back(New);
3217  return QualType(New, 0);
3218 }
3219 
3220 /// getVariableArrayDecayedType - Turns the given type, which may be
3221 /// variably-modified, into the corresponding type with all the known
3222 /// sizes replaced with [*].
3224  // Vastly most common case.
3225  if (!type->isVariablyModifiedType()) return type;
3226 
3227  QualType result;
3228 
3229  SplitQualType split = type.getSplitDesugaredType();
3230  const Type *ty = split.Ty;
3231  switch (ty->getTypeClass()) {
3232 #define TYPE(Class, Base)
3233 #define ABSTRACT_TYPE(Class, Base)
3234 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3235 #include "clang/AST/TypeNodes.inc"
3236  llvm_unreachable("didn't desugar past all non-canonical types?");
3237 
3238  // These types should never be variably-modified.
3239  case Type::Builtin:
3240  case Type::Complex:
3241  case Type::Vector:
3242  case Type::DependentVector:
3243  case Type::ExtVector:
3244  case Type::DependentSizedExtVector:
3245  case Type::DependentAddressSpace:
3246  case Type::ObjCObject:
3247  case Type::ObjCInterface:
3248  case Type::ObjCObjectPointer:
3249  case Type::Record:
3250  case Type::Enum:
3251  case Type::UnresolvedUsing:
3252  case Type::TypeOfExpr:
3253  case Type::TypeOf:
3254  case Type::Decltype:
3255  case Type::UnaryTransform:
3256  case Type::DependentName:
3257  case Type::InjectedClassName:
3258  case Type::TemplateSpecialization:
3259  case Type::DependentTemplateSpecialization:
3260  case Type::TemplateTypeParm:
3261  case Type::SubstTemplateTypeParmPack:
3262  case Type::Auto:
3263  case Type::DeducedTemplateSpecialization:
3264  case Type::PackExpansion:
3265  llvm_unreachable("type should never be variably-modified");
3266 
3267  // These types can be variably-modified but should never need to
3268  // further decay.
3269  case Type::FunctionNoProto:
3270  case Type::FunctionProto:
3271  case Type::BlockPointer:
3272  case Type::MemberPointer:
3273  case Type::Pipe:
3274  return type;
3275 
3276  // These types can be variably-modified. All these modifications
3277  // preserve structure except as noted by comments.
3278  // TODO: if we ever care about optimizing VLAs, there are no-op
3279  // optimizations available here.
3280  case Type::Pointer:
3282  cast<PointerType>(ty)->getPointeeType()));
3283  break;
3284 
3285  case Type::LValueReference: {
3286  const auto *lv = cast<LValueReferenceType>(ty);
3287  result = getLValueReferenceType(
3288  getVariableArrayDecayedType(lv->getPointeeType()),
3289  lv->isSpelledAsLValue());
3290  break;
3291  }
3292 
3293  case Type::RValueReference: {
3294  const auto *lv = cast<RValueReferenceType>(ty);
3295  result = getRValueReferenceType(
3296  getVariableArrayDecayedType(lv->getPointeeType()));
3297  break;
3298  }
3299 
3300  case Type::Atomic: {
3301  const auto *at = cast<AtomicType>(ty);
3302  result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3303  break;
3304  }
3305 
3306  case Type::ConstantArray: {
3307  const auto *cat = cast<ConstantArrayType>(ty);
3308  result = getConstantArrayType(
3309  getVariableArrayDecayedType(cat->getElementType()),
3310  cat->getSize(),
3311  cat->getSizeExpr(),
3312  cat->getSizeModifier(),
3313  cat->getIndexTypeCVRQualifiers());
3314  break;
3315  }
3316 
3317  case Type::DependentSizedArray: {
3318  const auto *dat = cast<DependentSizedArrayType>(ty);
3319  result = getDependentSizedArrayType(
3320  getVariableArrayDecayedType(dat->getElementType()),
3321  dat->getSizeExpr(),
3322  dat->getSizeModifier(),
3323  dat->getIndexTypeCVRQualifiers(),
3324  dat->getBracketsRange());
3325  break;
3326  }
3327 
3328  // Turn incomplete types into [*] types.
3329  case Type::IncompleteArray: {
3330  const auto *iat = cast<IncompleteArrayType>(ty);
3331  result = getVariableArrayType(
3332  getVariableArrayDecayedType(iat->getElementType()),
3333  /*size*/ nullptr,
3335  iat->getIndexTypeCVRQualifiers(),
3336  SourceRange());
3337  break;
3338  }
3339 
3340  // Turn VLA types into [*] types.
3341  case Type::VariableArray: {
3342  const auto *vat = cast<VariableArrayType>(ty);
3343  result = getVariableArrayType(
3344  getVariableArrayDecayedType(vat->getElementType()),
3345  /*size*/ nullptr,
3347  vat->getIndexTypeCVRQualifiers(),
3348  vat->getBracketsRange());
3349  break;
3350  }
3351  }
3352 
3353  // Apply the top-level qualifiers from the original.
3354  return getQualifiedType(result, split.Quals);
3355 }
3356 
3357 /// getVariableArrayType - Returns a non-unique reference to the type for a
3358 /// variable array of the specified element type.
3360  Expr *NumElts,
3362  unsigned IndexTypeQuals,
3363  SourceRange Brackets) const {
3364  // Since we don't unique expressions, it isn't possible to unique VLA's
3365  // that have an expression provided for their size.
3366  QualType Canon;
3367 
3368  // Be sure to pull qualifiers off the element type.
3369  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3370  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3371  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3372  IndexTypeQuals, Brackets);
3373  Canon = getQualifiedType(Canon, canonSplit.Quals);
3374  }
3375 
3376  auto *New = new (*this, TypeAlignment)
3377  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3378 
3379  VariableArrayTypes.push_back(New);
3380  Types.push_back(New);
3381  return QualType(New, 0);
3382 }
3383 
3384 /// getDependentSizedArrayType - Returns a non-unique reference to
3385 /// the type for a dependently-sized array of the specified element
3386 /// type.
3388  Expr *numElements,
3390  unsigned elementTypeQuals,
3391  SourceRange brackets) const {
3392  assert((!numElements || numElements->isTypeDependent() ||
3393  numElements->isValueDependent()) &&
3394  "Size must be type- or value-dependent!");
3395 
3396  // Dependently-sized array types that do not have a specified number
3397  // of elements will have their sizes deduced from a dependent
3398  // initializer. We do no canonicalization here at all, which is okay
3399  // because they can't be used in most locations.
3400  if (!numElements) {
3401  auto *newType
3402  = new (*this, TypeAlignment)
3403  DependentSizedArrayType(*this, elementType, QualType(),
3404  numElements, ASM, elementTypeQuals,
3405  brackets);
3406  Types.push_back(newType);
3407  return QualType(newType, 0);
3408  }
3409 
3410  // Otherwise, we actually build a new type every time, but we
3411  // also build a canonical type.
3412 
3413  SplitQualType canonElementType = getCanonicalType(elementType).split();
3414 
3415  void *insertPos = nullptr;
3416  llvm::FoldingSetNodeID ID;
3418  QualType(canonElementType.Ty, 0),
3419  ASM, elementTypeQuals, numElements);
3420 
3421  // Look for an existing type with these properties.
3422  DependentSizedArrayType *canonTy =
3423  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3424 
3425  // If we don't have one, build one.
3426  if (!canonTy) {
3427  canonTy = new (*this, TypeAlignment)
3428  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3429  QualType(), numElements, ASM, elementTypeQuals,
3430  brackets);
3431  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3432  Types.push_back(canonTy);
3433  }
3434 
3435  // Apply qualifiers from the element type to the array.
3436  QualType canon = getQualifiedType(QualType(canonTy,0),
3437  canonElementType.Quals);
3438 
3439  // If we didn't need extra canonicalization for the element type or the size
3440  // expression, then just use that as our result.
3441  if (QualType(canonElementType.Ty, 0) == elementType &&
3442  canonTy->getSizeExpr() == numElements)
3443  return canon;
3444 
3445  // Otherwise, we need to build a type which follows the spelling
3446  // of the element type.
3447  auto *sugaredType
3448  = new (*this, TypeAlignment)
3449  DependentSizedArrayType(*this, elementType, canon, numElements,
3450  ASM, elementTypeQuals, brackets);
3451  Types.push_back(sugaredType);
3452  return QualType(sugaredType, 0);
3453 }
3454 
3457  unsigned elementTypeQuals) const {
3458  llvm::FoldingSetNodeID ID;
3459  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3460 
3461  void *insertPos = nullptr;
3462  if (IncompleteArrayType *iat =
3463  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3464  return QualType(iat, 0);
3465 
3466  // If the element type isn't canonical, this won't be a canonical type
3467  // either, so fill in the canonical type field. We also have to pull
3468  // qualifiers off the element type.
3469  QualType canon;
3470 
3471  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3472  SplitQualType canonSplit = getCanonicalType(elementType).split();
3473  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3474  ASM, elementTypeQuals);
3475  canon = getQualifiedType(canon, canonSplit.Quals);
3476 
3477  // Get the new insert position for the node we care about.
3478  IncompleteArrayType *existing =
3479  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3480  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3481  }
3482 
3483  auto *newType = new (*this, TypeAlignment)
3484  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3485 
3486  IncompleteArrayTypes.InsertNode(newType, insertPos);
3487  Types.push_back(newType);
3488  return QualType(newType, 0);
3489 }
3490 
3491 /// getVectorType - Return the unique reference to a vector type of
3492 /// the specified element type and size. VectorType must be a built-in type.
3493 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
3494  VectorType::VectorKind VecKind) const {
3495  assert(vecType->isBuiltinType());
3496 
3497  // Check if we've already instantiated a vector of this type.
3498  llvm::FoldingSetNodeID ID;
3499  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
3500 
3501  void *InsertPos = nullptr;
3502  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3503  return QualType(VTP, 0);
3504 
3505  // If the element type isn't canonical, this won't be a canonical type either,
3506  // so fill in the canonical type field.
3507  QualType Canonical;
3508  if (!vecType.isCanonical()) {
3509  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
3510 
3511  // Get the new insert position for the node we care about.
3512  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3513  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3514  }
3515  auto *New = new (*this, TypeAlignment)
3516  VectorType(vecType, NumElts, Canonical, VecKind);
3517  VectorTypes.InsertNode(New, InsertPos);
3518  Types.push_back(New);
3519  return QualType(New, 0);
3520 }
3521 
3522 QualType
3524  SourceLocation AttrLoc,
3525  VectorType::VectorKind VecKind) const {
3526  llvm::FoldingSetNodeID ID;
3527  DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
3528  VecKind);
3529  void *InsertPos = nullptr;
3530  DependentVectorType *Canon =
3531  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3532  DependentVectorType *New;
3533 
3534  if (Canon) {
3535  New = new (*this, TypeAlignment) DependentVectorType(
3536  *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
3537  } else {
3538  QualType CanonVecTy = getCanonicalType(VecType);
3539  if (CanonVecTy == VecType) {
3540  New = new (*this, TypeAlignment) DependentVectorType(
3541  *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
3542 
3543  DependentVectorType *CanonCheck =
3544  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3545  assert(!CanonCheck &&
3546  "Dependent-sized vector_size canonical type broken");
3547  (void)CanonCheck;
3548  DependentVectorTypes.InsertNode(New, InsertPos);
3549  } else {
3550  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3551  SourceLocation());
3552  New = new (*this, TypeAlignment) DependentVectorType(
3553  *this, VecType, Canon, SizeExpr, AttrLoc, VecKind);
3554  }
3555  }
3556 
3557  Types.push_back(New);
3558  return QualType(New, 0);
3559 }
3560 
3561 /// getExtVectorType - Return the unique reference to an extended vector type of
3562 /// the specified element type and size. VectorType must be a built-in type.
3563 QualType
3564 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
3565  assert(vecType->isBuiltinType() || vecType->isDependentType());
3566 
3567  // Check if we've already instantiated a vector of this type.
3568  llvm::FoldingSetNodeID ID;
3569  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
3571  void *InsertPos = nullptr;
3572  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
3573  return QualType(VTP, 0);
3574 
3575  // If the element type isn't canonical, this won't be a canonical type either,
3576  // so fill in the canonical type field.
3577  QualType Canonical;
3578  if (!vecType.isCanonical()) {
3579  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
3580 
3581  // Get the new insert position for the node we care about.
3582  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3583  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3584  }
3585  auto *New = new (*this, TypeAlignment)
3586  ExtVectorType(vecType, NumElts, Canonical);
3587  VectorTypes.InsertNode(New, InsertPos);
3588  Types.push_back(New);
3589  return QualType(New, 0);
3590 }
3591 
3592 QualType
3594  Expr *SizeExpr,
3595  SourceLocation AttrLoc) const {
3596  llvm::FoldingSetNodeID ID;
3598  SizeExpr);
3599 
3600  void *InsertPos = nullptr;
3602  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3604  if (Canon) {
3605  // We already have a canonical version of this array type; use it as
3606  // the canonical type for a newly-built type.
3607  New = new (*this, TypeAlignment)
3608  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
3609  SizeExpr, AttrLoc);
3610  } else {
3611  QualType CanonVecTy = getCanonicalType(vecType);
3612  if (CanonVecTy == vecType) {
3613  New = new (*this, TypeAlignment)
3614  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
3615  AttrLoc);
3616 
3617  DependentSizedExtVectorType *CanonCheck
3618  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
3619  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
3620  (void)CanonCheck;
3621  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
3622  } else {
3623  QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
3624  SourceLocation());
3625  New = new (*this, TypeAlignment)
3626  DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
3627  }
3628  }
3629 
3630  Types.push_back(New);
3631  return QualType(New, 0);
3632 }
3633 
3635  Expr *AddrSpaceExpr,
3636  SourceLocation AttrLoc) const {
3637  assert(AddrSpaceExpr->isInstantiationDependent());
3638 
3639  QualType canonPointeeType = getCanonicalType(PointeeType);
3640 
3641  void *insertPos = nullptr;
3642  llvm::FoldingSetNodeID ID;
3643  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
3644  AddrSpaceExpr);
3645 
3646  DependentAddressSpaceType *canonTy =
3647  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
3648 
3649  if (!canonTy) {
3650  canonTy = new (*this, TypeAlignment)
3651  DependentAddressSpaceType(*this, canonPointeeType,
3652  QualType(), AddrSpaceExpr, AttrLoc);
3653  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
3654  Types.push_back(canonTy);
3655  }
3656 
3657  if (canonPointeeType == PointeeType &&
3658  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
3659  return QualType(canonTy, 0);
3660 
3661  auto *sugaredType
3662  = new (*this, TypeAlignment)
3663  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
3664  AddrSpaceExpr, AttrLoc);
3665  Types.push_back(sugaredType);
3666  return QualType(sugaredType, 0);
3667 }
3668 
3669 /// Determine whether \p T is canonical as the result type of a function.
3671  return T.isCanonical() &&
3674 }
3675 
3676 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
3677 QualType
3679  const FunctionType::ExtInfo &Info) const {
3680  // Unique functions, to guarantee there is only one function of a particular
3681  // structure.
3682  llvm::FoldingSetNodeID ID;
3683  FunctionNoProtoType::Profile(ID, ResultTy, Info);
3684 
3685  void *InsertPos = nullptr;
3686  if (FunctionNoProtoType *FT =
3687  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
3688  return QualType(FT, 0);
3689 
3690  QualType Canonical;
3691  if (!isCanonicalResultType(ResultTy)) {
3692  Canonical =
3694 
3695  // Get the new insert position for the node we care about.
3696  FunctionNoProtoType *NewIP =
3697  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3698  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3699  }
3700 
3701  auto *New = new (*this, TypeAlignment)
3702  FunctionNoProtoType(ResultTy, Canonical, Info);
3703  Types.push_back(New);
3704  FunctionNoProtoTypes.InsertNode(New, InsertPos);
3705  return QualType(New, 0);
3706 }
3707 
3710  CanQualType CanResultType = getCanonicalType(ResultType);
3711 
3712  // Canonical result types do not have ARC lifetime qualifiers.
3713  if (CanResultType.getQualifiers().hasObjCLifetime()) {
3714  Qualifiers Qs = CanResultType.getQualifiers();
3715  Qs.removeObjCLifetime();
3717  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
3718  }
3719 
3720  return CanResultType;
3721 }
3722 
3724  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
3725  if (ESI.Type == EST_None)
3726  return true;
3727  if (!NoexceptInType)
3728  return false;
3729 
3730  // C++17 onwards: exception specification is part of the type, as a simple
3731  // boolean "can this function type throw".
3732  if (ESI.Type == EST_BasicNoexcept)
3733  return true;
3734 
3735  // A noexcept(expr) specification is (possibly) canonical if expr is
3736  // value-dependent.
3737  if (ESI.Type == EST_DependentNoexcept)
3738  return true;
3739 
3740  // A dynamic exception specification is canonical if it only contains pack
3741  // expansions (so we can't tell whether it's non-throwing) and all its
3742  // contained types are canonical.
3743  if (ESI.Type == EST_Dynamic) {
3744  bool AnyPackExpansions = false;
3745  for (QualType ET : ESI.Exceptions) {
3746  if (!ET.isCanonical())
3747  return false;
3748  if (ET->getAs<PackExpansionType>())
3749  AnyPackExpansions = true;
3750  }
3751  return AnyPackExpansions;
3752  }
3753 
3754  return false;
3755 }
3756 
3757 QualType ASTContext::getFunctionTypeInternal(
3758  QualType ResultTy, ArrayRef<QualType> ArgArray,
3759  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
3760  size_t NumArgs = ArgArray.size();
3761 
3762  // Unique functions, to guarantee there is only one function of a particular
3763  // structure.
3764  llvm::FoldingSetNodeID ID;
3765  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
3766  *this, true);
3767 
3768  QualType Canonical;
3769  bool Unique = false;
3770 
3771  void *InsertPos = nullptr;
3772  if (FunctionProtoType *FPT =
3773  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
3774  QualType Existing = QualType(FPT, 0);
3775 
3776  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
3777  // it so long as our exception specification doesn't contain a dependent
3778  // noexcept expression, or we're just looking for a canonical type.
3779  // Otherwise, we're going to need to create a type
3780  // sugar node to hold the concrete expression.
3781  if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
3782  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
3783  return Existing;
3784 
3785  // We need a new type sugar node for this one, to hold the new noexcept
3786  // expression. We do no canonicalization here, but that's OK since we don't
3787  // expect to see the same noexcept expression much more than once.
3788  Canonical = getCanonicalType(Existing);
3789  Unique = true;
3790  }
3791 
3792  bool NoexceptInType = getLangOpts().CPlusPlus17;
3793  bool IsCanonicalExceptionSpec =
3794  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
3795 
3796  // Determine whether the type being created is already canonical or not.
3797  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
3798  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
3799  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
3800  if (!ArgArray[i].isCanonicalAsParam())
3801  isCanonical = false;
3802 
3803  if (OnlyWantCanonical)
3804  assert(isCanonical &&
3805  "given non-canonical parameters constructing canonical type");
3806 
3807  // If this type isn't canonical, get the canonical version of it if we don't
3808  // already have it. The exception spec is only partially part of the
3809  // canonical type, and only in C++17 onwards.
3810  if (!isCanonical && Canonical.isNull()) {
3811  SmallVector<QualType, 16> CanonicalArgs;
3812  CanonicalArgs.reserve(NumArgs);
3813  for (unsigned i = 0; i != NumArgs; ++i)
3814  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
3815 
3816  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
3817  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
3818  CanonicalEPI.HasTrailingReturn = false;
3819 
3820  if (IsCanonicalExceptionSpec) {
3821  // Exception spec is already OK.
3822  } else if (NoexceptInType) {
3823  switch (EPI.ExceptionSpec.Type) {
3825  // We don't know yet. It shouldn't matter what we pick here; no-one
3826  // should ever look at this.
3827  LLVM_FALLTHROUGH;
3828  case EST_None: case EST_MSAny: case EST_NoexceptFalse:
3829  CanonicalEPI.ExceptionSpec.Type = EST_None;
3830  break;
3831 
3832  // A dynamic exception specification is almost always "not noexcept",
3833  // with the exception that a pack expansion might expand to no types.
3834  case EST_Dynamic: {
3835  bool AnyPacks = false;
3836  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
3837  if (ET->getAs<PackExpansionType>())
3838  AnyPacks = true;
3839  ExceptionTypeStorage.push_back(getCanonicalType(ET));
3840  }
3841  if (!AnyPacks)
3842  CanonicalEPI.ExceptionSpec.Type = EST_None;
3843  else {
3844  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
3845  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
3846  }
3847  break;
3848  }
3849 
3850  case EST_DynamicNone:
3851  case EST_BasicNoexcept:
3852  case EST_NoexceptTrue:
3853  case EST_NoThrow:
3854  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
3855  break;
3856 
3857  case EST_DependentNoexcept:
3858  llvm_unreachable("dependent noexcept is already canonical");
3859  }
3860  } else {
3862  }
3863 
3864  // Adjust the canonical function result type.
3865  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
3866  Canonical =
3867  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
3868 
3869  // Get the new insert position for the node we care about.
3870  FunctionProtoType *NewIP =
3871  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
3872  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3873  }
3874 
3875  // Compute the needed size to hold this FunctionProtoType and the
3876  // various trailing objects.
3877  auto ESH = FunctionProtoType::getExceptionSpecSize(
3878  EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
3879  size_t Size = FunctionProtoType::totalSizeToAlloc<
3882  FunctionProtoType::ExtParameterInfo, Qualifiers>(
3883  NumArgs, FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
3884  ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
3885  EPI.ExtParameterInfos ? NumArgs : 0,
3886  EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
3887 
3888  auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
3889  FunctionProtoType::ExtProtoInfo newEPI = EPI;
3890  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
3891  Types.push_back(FTP);
3892  if (!Unique)
3893  FunctionProtoTypes.InsertNode(FTP, InsertPos);
3894  return QualType(FTP, 0);
3895 }
3896 
3897 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
3898  llvm::FoldingSetNodeID ID;
3899  PipeType::Profile(ID, T, ReadOnly);
3900 
3901  void *InsertPos = nullptr;
3902  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
3903  return QualType(PT, 0);
3904 
3905  // If the pipe element type isn't canonical, this won't be a canonical type
3906  // either, so fill in the canonical type field.
3907  QualType Canonical;
3908  if (!T.isCanonical()) {
3909  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
3910 
3911  // Get the new insert position for the node we care about.
3912  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
3913  assert(!NewIP && "Shouldn't be in the map!");
3914  (void)NewIP;
3915  }
3916  auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
3917  Types.push_back(New);
3918  PipeTypes.InsertNode(New, InsertPos);
3919  return QualType(New, 0);
3920 }
3921 
3923  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
3924  return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
3925  : Ty;
3926 }
3927 
3929  return getPipeType(T, true);
3930 }
3931 
3933  return getPipeType(T, false);
3934 }
3935 
3936 #ifndef NDEBUG
3938  if (!isa<CXXRecordDecl>(D)) return false;
3939  const auto *RD = cast<CXXRecordDecl>(D);
3940  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
3941  return true;
3942  if (RD->getDescribedClassTemplate() &&
3943  !isa<ClassTemplateSpecializationDecl>(RD))
3944  return true;
3945  return false;
3946 }
3947 #endif
3948 
3949 /// getInjectedClassNameType - Return the unique reference to the
3950 /// injected class name type for the specified templated declaration.
3952  QualType TST) const {
3953  assert(NeedsInjectedClassNameType(Decl));
3954  if (Decl->TypeForDecl) {
3955  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3956  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
3957  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
3958  Decl->TypeForDecl = PrevDecl->TypeForDecl;
3959  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
3960  } else {
3961  Type *newType =
3962  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
3963  Decl->TypeForDecl = newType;
3964  Types.push_back(newType);
3965  }
3966  return QualType(Decl->TypeForDecl, 0);
3967 }
3968 
3969 /// getTypeDeclType - Return the unique reference to the type for the
3970 /// specified type declaration.
3971 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
3972  assert(Decl && "Passed null for Decl param");
3973  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
3974 
3975  if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
3976  return getTypedefType(Typedef);
3977 
3978  assert(!isa<TemplateTypeParmDecl>(Decl) &&
3979  "Template type parameter types are always available.");
3980 
3981  if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
3982  assert(Record->isFirstDecl() && "struct/union has previous declaration");
3983  assert(!NeedsInjectedClassNameType(Record));
3984  return getRecordType(Record);
3985  } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
3986  assert(Enum->isFirstDecl() && "enum has previous declaration");
3987  return getEnumType(Enum);
3988  } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
3989  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
3990  Decl->TypeForDecl = newType;
3991  Types.push_back(newType);
3992  } else
3993  llvm_unreachable("TypeDecl without a type?");
3994 
3995  return QualType(Decl->TypeForDecl, 0);
3996 }
3997 
3998 /// getTypedefType - Return the unique reference to the type for the
3999 /// specified typedef name decl.
4000 QualType
4002  QualType Canonical) const {
4003  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4004 
4005  if (Canonical.isNull())
4006  Canonical = getCanonicalType(Decl->getUnderlyingType());
4007  auto *newType = new (*this, TypeAlignment)
4008  TypedefType(Type::Typedef, Decl, Canonical);
4009  Decl->TypeForDecl = newType;
4010  Types.push_back(newType);
4011  return QualType(newType, 0);
4012 }
4013 
4015  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4016 
4017  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
4018  if (PrevDecl->TypeForDecl)
4019  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4020 
4021  auto *newType = new (*this, TypeAlignment) RecordType(Decl);
4022  Decl->TypeForDecl = newType;
4023  Types.push_back(newType);
4024  return QualType(newType, 0);
4025 }
4026 
4028  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4029 
4030  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
4031  if (PrevDecl->TypeForDecl)
4032  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4033 
4034  auto *newType = new (*this, TypeAlignment) EnumType(Decl);
4035  Decl->TypeForDecl = newType;
4036  Types.push_back(newType);
4037  return QualType(newType, 0);
4038 }
4039 
4041  QualType modifiedType,
4042  QualType equivalentType) {
4043  llvm::FoldingSetNodeID id;
4044  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
4045 
4046  void *insertPos = nullptr;
4047  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
4048  if (type) return QualType(type, 0);
4049 
4050  QualType canon = getCanonicalType(equivalentType);
4051  type = new (*this, TypeAlignment)
4052  AttributedType(canon, attrKind, modifiedType, equivalentType);
4053 
4054  Types.push_back(type);
4055  AttributedTypes.InsertNode(type, insertPos);
4056 
4057  return QualType(type, 0);
4058 }
4059 
4060 /// Retrieve a substitution-result type.
4061 QualType
4063  QualType Replacement) const {
4064  assert(Replacement.isCanonical()
4065  && "replacement types must always be canonical");
4066 
4067  llvm::FoldingSetNodeID ID;
4068  SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
4069  void *InsertPos = nullptr;
4070  SubstTemplateTypeParmType *SubstParm
4071  = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4072 
4073  if (!SubstParm) {
4074  SubstParm = new (*this, TypeAlignment)
4075  SubstTemplateTypeParmType(Parm, Replacement);
4076  Types.push_back(SubstParm);
4077  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
4078  }
4079 
4080  return QualType(SubstParm, 0);
4081 }
4082 
4083 /// Retrieve a
4085  const TemplateTypeParmType *Parm,
4086  const TemplateArgument &ArgPack) {
4087 #ifndef NDEBUG
4088  for (const auto &P : ArgPack.pack_elements()) {
4089  assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
4090  assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
4091  }
4092 #endif
4093 
4094  llvm::FoldingSetNodeID ID;
4095  SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
4096  void *InsertPos = nullptr;
4097  if (SubstTemplateTypeParmPackType *SubstParm
4098  = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
4099  return QualType(SubstParm, 0);
4100 
4101  QualType Canon;
4102  if (!Parm->isCanonicalUnqualified()) {
4103  Canon = getCanonicalType(QualType(Parm, 0));
4104  Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
4105  ArgPack);
4106  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
4107  }
4108 
4109  auto *SubstParm
4110  = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
4111  ArgPack);
4112  Types.push_back(SubstParm);
4113  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
4114  return QualType(SubstParm, 0);
4115 }
4116 
4117 /// Retrieve the template type parameter type for a template
4118 /// parameter or parameter pack with the given depth, index, and (optionally)
4119 /// name.
4121  bool ParameterPack,
4122  TemplateTypeParmDecl *TTPDecl) const {
4123  llvm::FoldingSetNodeID ID;
4124  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4125  void *InsertPos = nullptr;
4126  TemplateTypeParmType *TypeParm
4127  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4128 
4129  if (TypeParm)
4130  return QualType(TypeParm, 0);
4131 
4132  if (TTPDecl) {
4133  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4134  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
4135 
4136  TemplateTypeParmType *TypeCheck
4137  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4138  assert(!TypeCheck && "Template type parameter canonical type broken");
4139  (void)TypeCheck;
4140  } else
4141  TypeParm = new (*this, TypeAlignment)
4142  TemplateTypeParmType(Depth, Index, ParameterPack);
4143 
4144  Types.push_back(TypeParm);
4145  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
4146 
4147  return QualType(TypeParm, 0);
4148 }
4149 
4152  SourceLocation NameLoc,
4153  const TemplateArgumentListInfo &Args,
4154  QualType Underlying) const {
4155  assert(!Name.getAsDependentTemplateName() &&
4156  "No dependent template names here!");
4157  QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
4158 
4163  TL.setTemplateNameLoc(NameLoc);
4164  TL.setLAngleLoc(Args.getLAngleLoc());
4165  TL.setRAngleLoc(Args.getRAngleLoc());
4166  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4167  TL.setArgLocInfo(i, Args[i].getLocInfo());
4168  return DI;
4169 }
4170 
4171 QualType
4173  const TemplateArgumentListInfo &Args,
4174  QualType Underlying) const {
4175  assert(!Template.getAsDependentTemplateName() &&
4176  "No dependent template names here!");
4177 
4179  ArgVec.reserve(Args.size());
4180  for (const TemplateArgumentLoc &Arg : Args.arguments())
4181  ArgVec.push_back(Arg.getArgument());
4182 
4183  return getTemplateSpecializationType(Template, ArgVec, Underlying);
4184 }
4185 
4186 #ifndef NDEBUG
4188  for (const TemplateArgument &Arg : Args)
4189  if (Arg.isPackExpansion())
4190  return true;
4191 
4192  return true;
4193 }
4194 #endif
4195 
4196 QualType
4199  QualType Underlying) const {
4200  assert(!Template.getAsDependentTemplateName() &&
4201  "No dependent template names here!");
4202  // Look through qualified template names.
4203  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4204  Template = TemplateName(QTN->getTemplateDecl());
4205 
4206  bool IsTypeAlias =
4207  Template.getAsTemplateDecl() &&
4208  isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
4209  QualType CanonType;
4210  if (!Underlying.isNull())
4211  CanonType = getCanonicalType(Underlying);
4212  else {
4213  // We can get here with an alias template when the specialization contains
4214  // a pack expansion that does not match up with a parameter pack.
4215  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
4216  "Caller must compute aliased type");
4217  IsTypeAlias = false;
4218  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
4219  }
4220 
4221  // Allocate the (non-canonical) template specialization type, but don't
4222  // try to unique it: these types typically have location information that
4223  // we don't unique and don't want to lose.
4224  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
4225  sizeof(TemplateArgument) * Args.size() +
4226  (IsTypeAlias? sizeof(QualType) : 0),
4227  TypeAlignment);
4228  auto *Spec
4229  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4230  IsTypeAlias ? Underlying : QualType());
4231 
4232  Types.push_back(Spec);
4233  return QualType(Spec, 0);
4234 }
4235 
4237  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4238  assert(!Template.getAsDependentTemplateName() &&
4239  "No dependent template names here!");
4240 
4241  // Look through qualified template names.
4242  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4243  Template = TemplateName(QTN->getTemplateDecl());
4244 
4245  // Build the canonical template specialization type.
4246  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4248  unsigned NumArgs = Args.size();
4249  CanonArgs.reserve(NumArgs);
4250  for (const TemplateArgument &Arg : Args)
4251  CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
4252 
4253  // Determine whether this canonical template specialization type already
4254  // exists.
4255  llvm::FoldingSetNodeID ID;
4256  TemplateSpecializationType::Profile(ID, CanonTemplate,
4257  CanonArgs, *this);
4258 
4259  void *InsertPos = nullptr;
4261  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4262 
4263  if (!Spec) {
4264  // Allocate a new canonical template specialization type.
4265  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
4266  sizeof(TemplateArgument) * NumArgs),
4267  TypeAlignment);
4268  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
4269  CanonArgs,
4270  QualType(), QualType());
4271  Types.push_back(Spec);
4272  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
4273  }
4274 
4275  assert(Spec->isDependentType() &&
4276  "Non-dependent template-id type must have a canonical type");
4277  return QualType(Spec, 0);
4278 }
4279 
4281  NestedNameSpecifier *NNS,
4282  QualType NamedType,
4283  TagDecl *OwnedTagDecl) const {
4284  llvm::FoldingSetNodeID ID;
4285  ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
4286 
4287  void *InsertPos = nullptr;
4288  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4289  if (T)
4290  return QualType(T, 0);
4291 
4292  QualType Canon = NamedType;
4293  if (!Canon.isCanonical()) {
4294  Canon = getCanonicalType(NamedType);
4295  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
4296  assert(!CheckT && "Elaborated canonical type broken");
4297  (void)CheckT;
4298  }
4299 
4300  void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
4301  TypeAlignment);
4302  T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
4303 
4304  Types.push_back(T);
4305  ElaboratedTypes.InsertNode(T, InsertPos);
4306  return QualType(T, 0);
4307 }
4308 
4309 QualType
4311  llvm::FoldingSetNodeID ID;
4312  ParenType::Profile(ID, InnerType);
4313 
4314  void *InsertPos = nullptr;
4315  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4316  if (T)
4317  return QualType(T, 0);
4318 
4319  QualType Canon = InnerType;
4320  if (!Canon.isCanonical()) {
4321  Canon = getCanonicalType(InnerType);
4322  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
4323  assert(!CheckT && "Paren canonical type broken");
4324  (void)CheckT;
4325  }
4326 
4327  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
4328  Types.push_back(T);
4329  ParenTypes.InsertNode(T, InsertPos);
4330  return QualType(T, 0);
4331 }
4332 
4333 QualType
4335  const IdentifierInfo *MacroII) const {
4336  QualType Canon = UnderlyingTy;
4337  if (!Canon.isCanonical())
4338  Canon = getCanonicalType(UnderlyingTy);
4339 
4340  auto *newType = new (*this, TypeAlignment)
4341  MacroQualifiedType(UnderlyingTy, Canon, MacroII);
4342  Types.push_back(newType);
4343  return QualType(newType, 0);
4344 }
4345 
4347  NestedNameSpecifier *NNS,
4348  const IdentifierInfo *Name,
4349  QualType Canon) const {
4350  if (Canon.isNull()) {
4352  if (CanonNNS != NNS)
4353  Canon = getDependentNameType(Keyword, CanonNNS, Name);
4354  }
4355 
4356  llvm::FoldingSetNodeID ID;
4357  DependentNameType::Profile(ID, Keyword, NNS, Name);
4358 
4359  void *InsertPos = nullptr;
4361  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
4362  if (T)
4363  return QualType(T, 0);
4364 
4365  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
4366  Types.push_back(T);
4367  DependentNameTypes.InsertNode(T, InsertPos);
4368  return QualType(T, 0);
4369 }
4370 
4371 QualType
4373  ElaboratedTypeKeyword Keyword,
4374  NestedNameSpecifier *NNS,
4375  const IdentifierInfo *Name,
4376  const TemplateArgumentListInfo &Args) const {
4377  // TODO: avoid this copy
4379  for (unsigned I = 0, E = Args.size(); I != E; ++I)
4380  ArgCopy.push_back(Args[I].getArgument());
4381  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
4382 }
4383 
4384 QualType
4386  ElaboratedTypeKeyword Keyword,
4387  NestedNameSpecifier *NNS,
4388  const IdentifierInfo *Name,
4389  ArrayRef<TemplateArgument> Args) const {
4390  assert((!NNS || NNS->isDependent()) &&
4391  "nested-name-specifier must be dependent");
4392 
4393  llvm::FoldingSetNodeID ID;
4394  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
4395  Name, Args);
4396 
4397  void *InsertPos = nullptr;
4399  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4400  if (T)
4401  return QualType(T, 0);
4402 
4404 
4405  ElaboratedTypeKeyword CanonKeyword = Keyword;
4406  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
4407 
4408  bool AnyNonCanonArgs = false;
4409  unsigned NumArgs = Args.size();
4410  SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
4411  for (unsigned I = 0; I != NumArgs; ++I) {
4412  CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
4413  if (!CanonArgs[I].structurallyEquals(Args[I]))
4414  AnyNonCanonArgs = true;
4415  }
4416 
4417  QualType Canon;
4418  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
4419  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
4420  Name,
4421  CanonArgs);
4422 
4423  // Find the insert position again.
4424  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4425  }
4426 
4427  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
4428  sizeof(TemplateArgument) * NumArgs),
4429  TypeAlignment);
4430  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
4431  Name, Args, Canon);
4432  Types.push_back(T);
4433  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
4434  return QualType(T, 0);
4435 }
4436 
4438  TemplateArgument Arg;
4439  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
4440  QualType ArgType = getTypeDeclType(TTP);
4441  if (TTP->isParameterPack())
4442  ArgType = getPackExpansionType(ArgType, None);
4443 
4444  Arg = TemplateArgument(ArgType);
4445  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
4446  Expr *E = new (*this) DeclRefExpr(
4447  *this, NTTP, /*enclosing*/ false,
4448  NTTP->getType().getNonLValueExprType(*this),
4449  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
4450 
4451  if (NTTP->isParameterPack())
4452  E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
4453  None);
4454  Arg = TemplateArgument(E);
4455  } else {
4456  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
4457  if (TTP->isParameterPack())
4459  else
4460  Arg = TemplateArgument(TemplateName(TTP));
4461  }
4462 
4463  if (Param->isTemplateParameterPack())
4464  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
4465 
4466  return Arg;
4467 }
4468 
4469 void
4472  Args.reserve(Args.size() + Params->size());
4473 
4474  for (NamedDecl *Param : *Params)
4475  Args.push_back(getInjectedTemplateArg(Param));
4476 }
4477 
4479  Optional<unsigned> NumExpansions) {
4480  llvm::FoldingSetNodeID ID;
4481  PackExpansionType::Profile(ID, Pattern, NumExpansions);
4482 
4483  // A deduced type can deduce to a pack, eg
4484  // auto ...x = some_pack;
4485  // That declaration isn't (yet) valid, but is created as part of building an
4486  // init-capture pack:
4487  // [...x = some_pack] {}
4488  assert((Pattern->containsUnexpandedParameterPack() ||
4489  Pattern->getContainedDeducedType()) &&
4490  "Pack expansions must expand one or more parameter packs");
4491  void *InsertPos = nullptr;
4493  = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4494  if (T)
4495  return QualType(T, 0);
4496 
4497  QualType Canon;
4498  if (!Pattern.isCanonical()) {
4499  Canon = getCanonicalType(Pattern);
4500  // The canonical type might not contain an unexpanded parameter pack, if it
4501  // contains an alias template specialization which ignores one of its
4502  // parameters.
4503  if (Canon->containsUnexpandedParameterPack()) {
4504  Canon = getPackExpansionType(Canon, NumExpansions);
4505 
4506  // Find the insert position again, in case we inserted an element into
4507  // PackExpansionTypes and invalidated our insert position.
4508  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
4509  }
4510  }
4511 
4512  T = new (*this, TypeAlignment)
4513  PackExpansionType(Pattern, Canon, NumExpansions);
4514  Types.push_back(T);
4515  PackExpansionTypes.InsertNode(T, InsertPos);
4516  return QualType(T, 0);
4517 }
4518 
4519 /// CmpProtocolNames - Comparison predicate for sorting protocols
4520 /// alphabetically.
4521 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
4522  ObjCProtocolDecl *const *RHS) {
4523  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
4524 }
4525 
4527  if (Protocols.empty()) return true;
4528 
4529  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
4530  return false;
4531 
4532  for (unsigned i = 1; i != Protocols.size(); ++i)
4533  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
4534  Protocols[i]->getCanonicalDecl() != Protocols[i])
4535  return false;
4536  return true;
4537 }
4538 
4539 static void
4541  // Sort protocols, keyed by name.
4542  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
4543 
4544  // Canonicalize.
4545  for (ObjCProtocolDecl *&P : Protocols)
4546  P = P->getCanonicalDecl();
4547 
4548  // Remove duplicates.
4549  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
4550  Protocols.erase(ProtocolsEnd, Protocols.end());
4551 }
4552 
4554  ObjCProtocolDecl * const *Protocols,
4555  unsigned NumProtocols) const {
4556  return getObjCObjectType(BaseType, {},
4557  llvm::makeArrayRef(Protocols, NumProtocols),
4558  /*isKindOf=*/false);
4559 }
4560 
4562  QualType baseType,
4563  ArrayRef<QualType> typeArgs,
4564  ArrayRef<ObjCProtocolDecl *> protocols,
4565  bool isKindOf) const {
4566  // If the base type is an interface and there aren't any protocols or
4567  // type arguments to add, then the interface type will do just fine.
4568  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
4569  isa<ObjCInterfaceType>(baseType))
4570  return baseType;
4571 
4572  // Look in the folding set for an existing type.
4573  llvm::FoldingSetNodeID ID;
4574  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
4575  void *InsertPos = nullptr;
4576  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
4577  return QualType(QT, 0);
4578 
4579  // Determine the type arguments to be used for canonicalization,
4580  // which may be explicitly specified here or written on the base
4581  // type.
4582  ArrayRef<QualType> effectiveTypeArgs = typeArgs;
4583  if (effectiveTypeArgs.empty()) {
4584  if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
4585  effectiveTypeArgs = baseObject->getTypeArgs();
4586  }
4587 
4588  // Build the canonical type, which has the canonical base type and a
4589  // sorted-and-uniqued list of protocols and the type arguments
4590  // canonicalized.
4591  QualType canonical;
4592  bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
4593  effectiveTypeArgs.end(),
4594  [&](QualType type) {
4595  return type.isCanonical();
4596  });
4597  bool protocolsSorted = areSortedAndUniqued(protocols);
4598  if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
4599  // Determine the canonical type arguments.
4600  ArrayRef<QualType> canonTypeArgs;
4601  SmallVector<QualType, 4> canonTypeArgsVec;
4602  if (!typeArgsAreCanonical) {
4603  canonTypeArgsVec.reserve(effectiveTypeArgs.size());
4604  for (auto typeArg : effectiveTypeArgs)
4605  canonTypeArgsVec.push_back(getCanonicalType(typeArg));
4606  canonTypeArgs = canonTypeArgsVec;
4607  } else {
4608  canonTypeArgs = effectiveTypeArgs;
4609  }
4610 
4611  ArrayRef<ObjCProtocolDecl *> canonProtocols;
4612  SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
4613  if (!protocolsSorted) {
4614  canonProtocolsVec.append(protocols.begin(), protocols.end());
4615  SortAndUniqueProtocols(canonProtocolsVec);
4616  canonProtocols = canonProtocolsVec;
4617  } else {
4618  canonProtocols = protocols;
4619  }
4620 
4621  canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
4622  canonProtocols, isKindOf);
4623 
4624  // Regenerate InsertPos.
4625  ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
4626  }
4627 
4628  unsigned size = sizeof(ObjCObjectTypeImpl);
4629  size += typeArgs.size() * sizeof(QualType);
4630  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4631  void *mem = Allocate(size, TypeAlignment);
4632  auto *T =
4633  new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
4634  isKindOf);
4635 
4636  Types.push_back(T);
4637  ObjCObjectTypes.InsertNode(T, InsertPos);
4638  return QualType(T, 0);
4639 }
4640 
4641 /// Apply Objective-C protocol qualifiers to the given type.
4642 /// If this is for the canonical type of a type parameter, we can apply
4643 /// protocol qualifiers on the ObjCObjectPointerType.
4644 QualType
4646  ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
4647  bool allowOnPointerType) const {
4648  hasError = false;
4649 
4650  if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
4651  return getObjCTypeParamType(objT->getDecl(), protocols);
4652  }
4653 
4654  // Apply protocol qualifiers to ObjCObjectPointerType.
4655  if (allowOnPointerType) {
4656  if (const auto *objPtr =
4657  dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
4658  const ObjCObjectType *objT = objPtr->getObjectType();
4659  // Merge protocol lists and construct ObjCObjectType.
4660  SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
4661  protocolsVec.append(objT->qual_begin(),
4662  objT->qual_end());
4663  protocolsVec.append(protocols.begin(), protocols.end());
4664  ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
4665  type = getObjCObjectType(
4666  objT->getBaseType(),
4667  objT->getTypeArgsAsWritten(),
4668  protocols,
4669  objT->isKindOfTypeAsWritten());
4670  return getObjCObjectPointerType(type);
4671  }
4672  }
4673 
4674  // Apply protocol qualifiers to ObjCObjectType.
4675  if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
4676  // FIXME: Check for protocols to which the class type is already
4677  // known to conform.
4678 
4679  return getObjCObjectType(objT->getBaseType(),
4680  objT->getTypeArgsAsWritten(),
4681  protocols,
4682  objT->isKindOfTypeAsWritten());
4683  }
4684 
4685  // If the canonical type is ObjCObjectType, ...
4686  if (type->isObjCObjectType()) {
4687  // Silently overwrite any existing protocol qualifiers.
4688  // TODO: determine whether that's the right thing to do.
4689 
4690  // FIXME: Check for protocols to which the class type is already
4691  // known to conform.
4692  return getObjCObjectType(type, {}, protocols, false);
4693  }
4694 
4695  // id<protocol-list>
4696  if (type->isObjCIdType()) {
4697  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4698  type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
4699  objPtr->isKindOfType());
4700  return getObjCObjectPointerType(type);
4701  }
4702 
4703  // Class<protocol-list>
4704  if (type->isObjCClassType()) {
4705  const auto *objPtr = type->castAs<ObjCObjectPointerType>();
4706  type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
4707  objPtr->isKindOfType());
4708  return getObjCObjectPointerType(type);
4709  }
4710 
4711  hasError = true;
4712  return type;
4713 }
4714 
4715 QualType
4717  ArrayRef<ObjCProtocolDecl *> protocols) const {
4718  // Look in the folding set for an existing type.
4719  llvm::FoldingSetNodeID ID;
4720  ObjCTypeParamType::Profile(ID, Decl, protocols);
4721  void *InsertPos = nullptr;
4722  if (ObjCTypeParamType *TypeParam =
4723  ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
4724  return QualType(TypeParam, 0);
4725 
4726  // We canonicalize to the underlying type.
4727  QualType Canonical = getCanonicalType(Decl->getUnderlyingType());
4728  if (!protocols.empty()) {
4729  // Apply the protocol qualifers.
4730  bool hasError;
4732  Canonical, protocols, hasError, true /*allowOnPointerType*/));
4733  assert(!hasError && "Error when apply protocol qualifier to bound type");
4734  }
4735 
4736  unsigned size = sizeof(ObjCTypeParamType);
4737  size += protocols.size() * sizeof(ObjCProtocolDecl *);
4738  void *mem = Allocate(size, TypeAlignment);
4739  auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
4740 
4741  Types.push_back(newType);
4742  ObjCTypeParamTypes.InsertNode(newType, InsertPos);
4743  return QualType(newType, 0);
4744 }
4745 
4746 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
4747 /// protocol list adopt all protocols in QT's qualified-id protocol
4748 /// list.
4750  ObjCInterfaceDecl *IC) {
4751  if (!QT->isObjCQualifiedIdType())
4752  return false;
4753 
4754  if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
4755  // If both the right and left sides have qualifiers.
4756  for (auto *Proto : OPT->quals()) {
4757  if (!IC->ClassImplementsProtocol(Proto, false))
4758  return false;
4759  }
4760  return true;
4761  }
4762  return false;
4763 }
4764 
4765 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
4766 /// QT's qualified-id protocol list adopt all protocols in IDecl's list
4767 /// of protocols.
4769  ObjCInterfaceDecl *IDecl) {
4770  if (!QT->isObjCQualifiedIdType())
4771  return false;
4772  const auto *OPT = QT->getAs<ObjCObjectPointerType>();
4773  if (!OPT)
4774  return false;
4775  if (!IDecl->hasDefinition())
4776  return false;
4777  llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
4778  CollectInheritedProtocols(IDecl, InheritedProtocols);
4779  if (InheritedProtocols.empty())
4780  return false;
4781  // Check that if every protocol in list of id<plist> conforms to a protocol
4782  // of IDecl's, then bridge casting is ok.
4783  bool Conforms = false;
4784  for (auto *Proto : OPT->quals()) {
4785  Conforms = false;
4786  for (auto *PI : InheritedProtocols) {
4787  if (ProtocolCompatibleWithProtocol(Proto, PI)) {
4788  Conforms = true;
4789  break;
4790  }
4791  }
4792  if (!Conforms)
4793  break;
4794  }
4795  if (Conforms)
4796  return true;
4797 
4798  for (auto *PI : InheritedProtocols) {
4799  // If both the right and left sides have qualifiers.
4800  bool Adopts = false;
4801  for (auto *Proto : OPT->quals()) {
4802  // return 'true' if 'PI' is in the inheritance hierarchy of Proto
4803  if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
4804  break;
4805  }
4806  if (!Adopts)
4807  return false;
4808  }
4809  return true;
4810 }
4811 
4812 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
4813 /// the given object type.
4815  llvm::FoldingSetNodeID ID;
4816  ObjCObjectPointerType::Profile(ID, ObjectT);
4817 
4818  void *InsertPos = nullptr;
4819  if (ObjCObjectPointerType *QT =
4820  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4821  return QualType(QT, 0);
4822 
4823  // Find the canonical object type.
4824  QualType Canonical;
4825  if (!ObjectT.isCanonical()) {
4826  Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
4827 
4828  // Regenerate InsertPos.
4829  ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4830  }
4831 
4832  // No match.
4833  void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
4834  auto *QType =
4835  new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
4836 
4837  Types.push_back(QType);
4838  ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
4839  return QualType(QType, 0);
4840 }
4841 
4842 /// getObjCInterfaceType - Return the unique reference to the type for the
4843 /// specified ObjC interface decl. The list of protocols is optional.
4845  ObjCInterfaceDecl *PrevDecl) const {
4846  if (Decl->TypeForDecl)
4847  return QualType(Decl->TypeForDecl, 0);
4848 
4849  if (PrevDecl) {
4850  assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
4851  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4852  return QualType(PrevDecl->TypeForDecl, 0);
4853  }
4854 
4855  // Prefer the definition, if there is one.
4856  if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
4857  Decl = Def;
4858 
4859  void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
4860  auto *T = new (Mem) ObjCInterfaceType(Decl);
4861  Decl->TypeForDecl = T;
4862  Types.push_back(T);
4863  return QualType(T, 0);
4864 }
4865 
4866 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
4867 /// TypeOfExprType AST's (since expression's are never shared). For example,
4868 /// multiple declarations that refer to "typeof(x)" all contain different
4869 /// DeclRefExpr's. This doesn't effect the type checker, since it operates
4870 /// on canonical type's (which are always unique).
4872  TypeOfExprType *toe;
4873  if (tofExpr->isTypeDependent()) {
4874  llvm::FoldingSetNodeID ID;
4875  DependentTypeOfExprType::Profile(ID, *this, tofExpr);
4876 
4877  void *InsertPos = nullptr;
4879  = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
4880  if (Canon) {
4881  // We already have a "canonical" version of an identical, dependent
4882  // typeof(expr) type. Use that as our canonical type.
4883  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
4884  QualType((TypeOfExprType*)Canon, 0));
4885  } else {
4886  // Build a new, canonical typeof(expr) type.
4887  Canon
4888  = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
4889  DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
4890  toe = Canon;
4891  }
4892  } else {
4893  QualType Canonical = getCanonicalType(tofExpr->getType());
4894  toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
4895  }
4896  Types.push_back(toe);
4897  return QualType(toe, 0);
4898 }
4899 
4900 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
4901 /// TypeOfType nodes. The only motivation to unique these nodes would be
4902 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
4903 /// an issue. This doesn't affect the type checker, since it operates
4904 /// on canonical types (which are always unique).
4906  QualType Canonical = getCanonicalType(tofType);
4907  auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
4908  Types.push_back(tot);
4909  return QualType(tot, 0);
4910 }
4911 
4912 /// Unlike many "get<Type>" functions, we don't unique DecltypeType
4913 /// nodes. This would never be helpful, since each such type has its own
4914 /// expression, and would not give a significant memory saving, since there
4915 /// is an Expr tree under each such type.
4917  DecltypeType *dt;
4918 
4919  // C++11 [temp.type]p2:
4920  // If an expression e involves a template parameter, decltype(e) denotes a
4921  // unique dependent type. Two such decltype-specifiers refer to the same
4922  // type only if their expressions are equivalent (14.5.6.1).
4923  if (e->isInstantiationDependent()) {
4924  llvm::FoldingSetNodeID ID;
4925  DependentDecltypeType::Profile(ID, *this, e);
4926 
4927  void *InsertPos = nullptr;
4928  DependentDecltypeType *Canon
4929  = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
4930  if (!Canon) {
4931  // Build a new, canonical decltype(expr) type.
4932  Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
4933  DependentDecltypeTypes.InsertNode(Canon, InsertPos);
4934  }
4935  dt = new (*this, TypeAlignment)
4936  DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
4937  } else {
4938  dt = new (*this, TypeAlignment)
4939  DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
4940  }
4941  Types.push_back(dt);
4942  return QualType(dt, 0);
4943 }
4944 
4945 /// getUnaryTransformationType - We don't unique these, since the memory
4946 /// savings are minimal and these are rare.
4948  QualType UnderlyingType,
4950  const {
4951  UnaryTransformType *ut = nullptr;
4952 
4953  if (BaseType->isDependentType()) {
4954  // Look in the folding set for an existing type.
4955  llvm::FoldingSetNodeID ID;
4957 
4958  void *InsertPos = nullptr;
4960  = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
4961 
4962  if (!Canon) {
4963  // Build a new, canonical __underlying_type(type) type.
4964  Canon = new (*this, TypeAlignment)
4966  Kind);
4967  DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
4968  }
4969  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4970  QualType(), Kind,
4971  QualType(Canon, 0));
4972  } else {
4973  QualType CanonType = getCanonicalType(UnderlyingType);
4974  ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
4975  UnderlyingType, Kind,
4976  CanonType);
4977  }
4978  Types.push_back(ut);
4979  return QualType(ut, 0);
4980 }
4981 
4982 /// getAutoType - Return the uniqued reference to the 'auto' type which has been
4983 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the
4984 /// canonical deduced-but-dependent 'auto' type.
4986  bool IsDependent, bool IsPack) const {
4987  assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
4988  if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !IsDependent)
4989  return getAutoDeductType();
4990 
4991  // Look in the folding set for an existing type.
4992  void *InsertPos = nullptr;
4993  llvm::FoldingSetNodeID ID;
4994  AutoType::Profile(ID, DeducedType, Keyword, IsDependent, IsPack);
4995  if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
4996  return QualType(AT, 0);
4997 
4998  auto *AT = new (*this, TypeAlignment)
4999  AutoType(DeducedType, Keyword, IsDependent, IsPack);
5000  Types.push_back(AT);
5001  if (InsertPos)
5002  AutoTypes.InsertNode(AT, InsertPos);
5003  return QualType(AT, 0);
5004 }
5005 
5006 /// Return the uniqued reference to the deduced template specialization type
5007 /// which has been deduced to the given type, or to the canonical undeduced
5008 /// such type, or the canonical deduced-but-dependent such type.
5010  TemplateName Template, QualType DeducedType, bool IsDependent) const {
5011  // Look in the folding set for an existing type.
5012  void *InsertPos = nullptr;
5013  llvm::FoldingSetNodeID ID;
5014  DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
5015  IsDependent);
5017  DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
5018  return QualType(DTST, 0);
5019 
5020  auto *DTST = new (*this, TypeAlignment)
5021  DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
5022  Types.push_back(DTST);
5023  if (InsertPos)
5024  DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
5025  return QualType(DTST, 0);
5026 }
5027 
5028 /// getAtomicType - Return the uniqued reference to the atomic type for
5029 /// the given value type.
5031  // Unique pointers, to guarantee there is only one pointer of a particular
5032  // structure.
5033  llvm::FoldingSetNodeID ID;
5034  AtomicType::Profile(ID, T);
5035 
5036  void *InsertPos = nullptr;
5037  if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
5038  return QualType(AT, 0);
5039 
5040  // If the atomic value type isn't canonical, this won't be a canonical type
5041  // either, so fill in the canonical type field.
5042  QualType Canonical;
5043  if (!T.isCanonical()) {
5044  Canonical = getAtomicType(getCanonicalType(T));
5045 
5046  // Get the new insert position for the node we care about.
5047  AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
5048  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5049  }
5050  auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
5051  Types.push_back(New);
5052  AtomicTypes.InsertNode(New, InsertPos);
5053  return QualType(New, 0);
5054 }
5055 
5056 /// getAutoDeductType - Get type pattern for deducing against 'auto'.
5058  if (AutoDeductTy.isNull())
5061  /*dependent*/false, /*pack*/false),
5062  0);
5063  return AutoDeductTy;
5064 }
5065 
5066 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
5068  if (AutoRRefDeductTy.isNull())
5070  assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
5071  return AutoRRefDeductTy;
5072 }
5073 
5074 /// getTagDeclType - Return the unique reference to the type for the
5075 /// specified TagDecl (struct/union/class/enum) decl.
5077  assert(Decl);
5078  // FIXME: What is the design on getTagDeclType when it requires casting
5079  // away const? mutable?
5080  return getTypeDeclType(const_cast<TagDecl*>(Decl));
5081 }
5082 
5083 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
5084 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
5085 /// needs to agree with the definition in <stddef.h>.
5087  return getFromTargetType(Target->getSizeType());
5088 }
5089 
5090 /// Return the unique signed counterpart of the integer type
5091 /// corresponding to size_t.
5093  return getFromTargetType(Target->getSignedSizeType());
5094 }
5095 
5096 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
5098  return getFromTargetType(Target->getIntMaxType());
5099 }
5100 
5101 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
5103  return getFromTargetType(Target->getUIntMaxType());
5104 }
5105 
5106 /// getSignedWCharType - Return the type of "signed wchar_t".
5107 /// Used when in C++, as a GCC extension.
5109  // FIXME: derive from "Target" ?
5110  return WCharTy;
5111 }
5112 
5113 /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
5114 /// Used when in C++, as a GCC extension.
5116  // FIXME: derive from "Target" ?
5117  return UnsignedIntTy;
5118 }
5119 
5121  return getFromTargetType(Target->getIntPtrType());
5122 }
5123 
5126 }
5127 
5128 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
5129 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
5131  return getFromTargetType(Target->getPtrDiffType(0));
5132 }
5133 
5134 /// Return the unique unsigned counterpart of "ptrdiff_t"
5135 /// integer type. The standard (C11 7.21.6.1p7) refers to this type
5136 /// in the definition of %tu format specifier.
5138  return getFromTargetType(Target->getUnsignedPtrDiffType(0));
5139 }
5140 
5141 /// Return the unique type for "pid_t" defined in
5142 /// <sys/types.h>. We need this to compute the correct type for vfork().
5144  return getFromTargetType(Target->getProcessIDType());
5145 }
5146 
5147 //===----------------------------------------------------------------------===//
5148 // Type Operators
5149 //===----------------------------------------------------------------------===//
5150 
5152  // Push qualifiers into arrays, and then discard any remaining
5153  // qualifiers.
5154  T = getCanonicalType(T);
5156  const Type *Ty = T.getTypePtr();
5157  QualType Result;
5158  if (isa<ArrayType>(Ty)) {
5159  Result = getArrayDecayedType(QualType(Ty,0));
5160  } else if (isa<FunctionType>(Ty)) {
5161  Result = getPointerType(QualType(Ty, 0));
5162  } else {
5163  Result = QualType(Ty, 0);
5164  }
5165 
5166  return CanQualType::CreateUnsafe(Result);
5167 }
5168 
5170  Qualifiers &quals) {
5171  SplitQualType splitType = type.getSplitUnqualifiedType();
5172 
5173  // FIXME: getSplitUnqualifiedType() actually walks all the way to
5174  // the unqualified desugared type and then drops it on the floor.
5175  // We then have to strip that sugar back off with
5176  // getUnqualifiedDesugaredType(), which is silly.
5177  const auto *AT =
5178  dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
5179 
5180  // If we don't have an array, just use the results in splitType.
5181  if (!AT) {
5182  quals = splitType.Quals;
5183  return QualType(splitType.Ty, 0);
5184  }
5185 
5186  // Otherwise, recurse on the array's element type.
5187  QualType elementType = AT->getElementType();
5188  QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
5189 
5190  // If that didn't change the element type, AT has no qualifiers, so we
5191  // can just use the results in splitType.
5192  if (elementType == unqualElementType) {
5193  assert(quals.empty()); // from the recursive call
5194  quals = splitType.Quals;
5195  return QualType(splitType.Ty, 0);
5196  }
5197 
5198  // Otherwise, add in the qualifiers from the outermost type, then
5199  // build the type back up.
5200  quals.addConsistentQualifiers(splitType.Quals);
5201 
5202  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
5203  return getConstantArrayType(unqualElementType, CAT->getSize(),
5204  CAT->getSizeExpr(), CAT->getSizeModifier(), 0);
5205  }
5206 
5207  if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
5208  return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
5209  }
5210 
5211  if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
5212  return getVariableArrayType(unqualElementType,
5213  VAT->getSizeExpr(),
5214  VAT->getSizeModifier(),
5215  VAT->getIndexTypeCVRQualifiers(),
5216  VAT->getBracketsRange());
5217  }
5218 
5219  const auto *DSAT = cast<DependentSizedArrayType>(AT);
5220  return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
5221  DSAT->getSizeModifier(), 0,
5222  SourceRange());
5223 }
5224 
5225 /// Attempt to unwrap two types that may both be array types with the same bound
5226 /// (or both be array types of unknown bound) for the purpose of comparing the
5227 /// cv-decomposition of two types per C++ [conv.qual].
5229  bool UnwrappedAny = false;
5230  while (true) {
5231  auto *AT1 = getAsArrayType(T1);
5232  if (!AT1) return UnwrappedAny;
5233 
5234  auto *AT2 = getAsArrayType(T2);
5235  if (!AT2) return UnwrappedAny;
5236 
5237  // If we don't have two array types with the same constant bound nor two
5238  // incomplete array types, we've unwrapped everything we can.
5239  if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
5240  auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
5241  if (!CAT2 || CAT1->getSize() != CAT2->getSize())
5242  return UnwrappedAny;
5243  } else if (!isa<IncompleteArrayType>(AT1) ||
5244  !isa<IncompleteArrayType>(AT2)) {
5245  return UnwrappedAny;
5246  }
5247 
5248  T1 = AT1->getElementType();
5249  T2 = AT2->getElementType();
5250  UnwrappedAny = true;
5251  }
5252 }
5253 
5254 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
5255 ///
5256 /// If T1 and T2 are both pointer types of the same kind, or both array types
5257 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is
5258 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
5259 ///
5260 /// This function will typically be called in a loop that successively
5261 /// "unwraps" pointer and pointer-to-member types to compare them at each
5262 /// level.
5263 ///
5264 /// \return \c true if a pointer type was unwrapped, \c false if we reached a
5265 /// pair of types that can't be unwrapped further.
5267  UnwrapSimilarArrayTypes(T1, T2);
5268 
5269  const auto *T1PtrType = T1->getAs<PointerType>();
5270  const auto *T2PtrType = T2->getAs<PointerType>();
5271  if (T1PtrType && T2PtrType) {
5272  T1 = T1PtrType->getPointeeType();
5273  T2 = T2PtrType->getPointeeType();
5274  return true;
5275  }
5276 
5277  const auto *T1MPType = T1->getAs<MemberPointerType>();
5278  const auto *T2MPType = T2->getAs<MemberPointerType>();
5279  if (T1MPType && T2MPType &&
5280  hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
5281  QualType(T2MPType->getClass(), 0))) {
5282  T1 = T1MPType->getPointeeType();
5283  T2 = T2MPType->getPointeeType();
5284  return true;
5285  }
5286 
5287  if (getLangOpts().ObjC) {
5288  const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
5289  const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
5290  if (T1OPType && T2OPType) {
5291  T1 = T1OPType->getPointeeType();
5292  T2 = T2OPType->getPointeeType();
5293  return true;
5294  }
5295  }
5296 
5297  // FIXME: Block pointers, too?
5298 
5299  return false;
5300 }
5301 
5303  while (true) {
5304  Qualifiers Quals;
5305  T1 = getUnqualifiedArrayType(T1, Quals);
5306  T2 = getUnqualifiedArrayType(T2, Quals);
5307  if (hasSameType(T1, T2))
5308  return true;
5309  if (!UnwrapSimilarTypes(T1, T2))
5310  return false;
5311  }
5312 }
5313 
5315  while (true) {
5316  Qualifiers Quals1, Quals2;
5317  T1 = getUnqualifiedArrayType(T1, Quals1);
5318  T2 = getUnqualifiedArrayType(T2, Quals2);
5319 
5320  Quals1.removeCVRQualifiers();
5321  Quals2.removeCVRQualifiers();
5322  if (Quals1 != Quals2)
5323  return false;
5324 
5325  if (hasSameType(T1, T2))
5326  return true;
5327 
5328  if (!UnwrapSimilarTypes(T1, T2))
5329  return false;
5330  }
5331 }
5332 
5335  SourceLocation NameLoc) const {
5336  switch (Name.getKind()) {
5339  // DNInfo work in progress: CHECKME: what about DNLoc?
5341  NameLoc);
5342 
5345  // DNInfo work in progress: CHECKME: what about DNLoc?
5346  return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
5347  }
5348 
5351  return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
5352  }
5353 
5356  DeclarationName DName;
5357  if (DTN->isIdentifier()) {
5359  return DeclarationNameInfo(DName, NameLoc);
5360  } else {
5362  // DNInfo work in progress: FIXME: source locations?
5363  DeclarationNameLoc DNLoc;
5366  return DeclarationNameInfo(DName, NameLoc, DNLoc);
5367  }
5368  }
5369 
5373  return DeclarationNameInfo(subst->getParameter()->getDeclName(),
5374  NameLoc);
5375  }
5376 
5381  NameLoc);
5382  }
5383  }
5384 
5385  llvm_unreachable("bad template name kind!");
5386 }
5387 
5389  switch (Name.getKind()) {
5391  case TemplateName::Template: {
5392  TemplateDecl *Template = Name.getAsTemplateDecl();
5393  if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
5394  Template = getCanonicalTemplateTemplateParmDecl(TTP);
5395 
5396  // The canonical template name is the canonical template declaration.
5397  return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
5398  }
5399 
5402  llvm_unreachable("cannot canonicalize unresolved template");
5403 
5406  assert(DTN && "Non-dependent template names must refer to template decls.");
5407  return DTN->CanonicalTemplateName;
5408  }
5409 
5413  return getCanonicalTemplateName(subst->getReplacement());
5414  }
5415 
5419  TemplateTemplateParmDecl *canonParameter
5420  = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
5421  TemplateArgument canonArgPack
5423  return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
5424  }
5425  }
5426 
5427  llvm_unreachable("bad template name!");
5428 }
5429 
5431  X = getCanonicalTemplateName(X);
5432  Y = getCanonicalTemplateName(Y);
5433  return X.getAsVoidPointer() == Y.getAsVoidPointer();
5434 }
5435 
5438  switch (Arg.getKind()) {
5440  return Arg;
5441 
5443  return Arg;
5444 
5446  auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
5447  return TemplateArgument(D, Arg.getParamTypeForDecl());
5448  }
5449 
5452  /*isNullPtr*/true);
5453 
5456 
5460  Arg.getNumTemplateExpansions());
5461 
5464 
5467 
5468  case TemplateArgument::Pack: {
5469  if (Arg.pack_size() == 0)
5470  return Arg;
5471 
5472  auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
5473  unsigned Idx = 0;
5475  AEnd = Arg.pack_end();
5476  A != AEnd; (void)++A, ++Idx)
5477  CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
5478 
5479  return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
5480  }
5481  }
5482 
5483  // Silence GCC warning
5484  llvm_unreachable("Unhandled template argument kind");
5485 }
5486 
5489  if (!NNS)
5490  return nullptr;
5491 
5492  switch (NNS->getKind()) {
5494  // Canonicalize the prefix but keep the identifier the same.
5495  return NestedNameSpecifier::Create(*this,
5497  NNS->getAsIdentifier());
5498 
5500  // A namespace is canonical; build a nested-name-specifier with
5501  // this namespace and no prefix.
5502  return NestedNameSpecifier::Create(*this, nullptr,
5504 
5506  // A namespace is canonical; build a nested-name-specifier with
5507  // this namespace and no prefix.
5508  return NestedNameSpecifier::Create(*this, nullptr,
5510  ->getOriginalNamespace());
5511 
5514  QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
5515 
5516  // If we have some kind of dependent-named type (e.g., "typename T::type"),
5517  // break it apart into its prefix and identifier, then reconsititute those
5518  // as the canonical nested-name-specifier. This is required to canonicalize
5519  // a dependent nested-name-specifier involving typedefs of dependent-name
5520  // types, e.g.,
5521  // typedef typename T::type T1;
5522  // typedef typename T1::type T2;
5523  if (const auto *DNT = T->getAs<DependentNameType>())
5524  return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
5525  const_cast<IdentifierInfo *>(DNT->getIdentifier()));
5526 
5527  // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
5528  // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
5529  // first place?
5530  return NestedNameSpecifier::Create(*this, nullptr, false,
5531  const_cast<Type *>(T.getTypePtr()));
5532  }
5533 
5534  case