clang  16.0.0git
ASTContext.cpp
Go to the documentation of this file.
1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ASTContext interface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/AST/ASTContext.h"
14 #include "CXXABI.h"
15 #include "Interp/Context.h"
16 #include "clang/AST/APValue.h"
17 #include "clang/AST/ASTConcept.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/AttrIterator.h"
22 #include "clang/AST/CharUnits.h"
23 #include "clang/AST/Comment.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclBase.h"
26 #include "clang/AST/DeclCXX.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/DeclOpenMP.h"
30 #include "clang/AST/DeclTemplate.h"
33 #include "clang/AST/Expr.h"
34 #include "clang/AST/ExprCXX.h"
35 #include "clang/AST/ExprConcepts.h"
37 #include "clang/AST/Mangle.h"
42 #include "clang/AST/RecordLayout.h"
43 #include "clang/AST/Stmt.h"
44 #include "clang/AST/TemplateBase.h"
45 #include "clang/AST/TemplateName.h"
46 #include "clang/AST/Type.h"
47 #include "clang/AST/TypeLoc.h"
51 #include "clang/Basic/Builtins.h"
55 #include "clang/Basic/LLVM.h"
57 #include "clang/Basic/Linkage.h"
58 #include "clang/Basic/Module.h"
63 #include "clang/Basic/Specifiers.h"
65 #include "clang/Basic/TargetInfo.h"
66 #include "clang/Basic/XRayLists.h"
67 #include "llvm/ADT/APFixedPoint.h"
68 #include "llvm/ADT/APInt.h"
69 #include "llvm/ADT/APSInt.h"
70 #include "llvm/ADT/ArrayRef.h"
71 #include "llvm/ADT/DenseMap.h"
72 #include "llvm/ADT/DenseSet.h"
73 #include "llvm/ADT/FoldingSet.h"
74 #include "llvm/ADT/None.h"
75 #include "llvm/ADT/Optional.h"
76 #include "llvm/ADT/PointerUnion.h"
77 #include "llvm/ADT/STLExtras.h"
78 #include "llvm/ADT/SmallPtrSet.h"
79 #include "llvm/ADT/SmallVector.h"
80 #include "llvm/ADT/StringExtras.h"
81 #include "llvm/ADT/StringRef.h"
82 #include "llvm/ADT/Triple.h"
83 #include "llvm/Support/Capacity.h"
84 #include "llvm/Support/Casting.h"
85 #include "llvm/Support/Compiler.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/MD5.h"
88 #include "llvm/Support/MathExtras.h"
89 #include "llvm/Support/raw_ostream.h"
90 #include <algorithm>
91 #include <cassert>
92 #include <cstddef>
93 #include <cstdint>
94 #include <cstdlib>
95 #include <map>
96 #include <memory>
97 #include <string>
98 #include <tuple>
99 #include <utility>
100 
101 using namespace clang;
102 
112 };
113 
114 /// \returns location that is relevant when searching for Doc comments related
115 /// to \p D.
117  SourceManager &SourceMgr) {
118  assert(D);
119 
120  // User can not attach documentation to implicit declarations.
121  if (D->isImplicit())
122  return {};
123 
124  // User can not attach documentation to implicit instantiations.
125  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
126  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
127  return {};
128  }
129 
130  if (const auto *VD = dyn_cast<VarDecl>(D)) {
131  if (VD->isStaticDataMember() &&
132  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
133  return {};
134  }
135 
136  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
137  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
138  return {};
139  }
140 
141  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
142  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
143  if (TSK == TSK_ImplicitInstantiation ||
144  TSK == TSK_Undeclared)
145  return {};
146  }
147 
148  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
149  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
150  return {};
151  }
152  if (const auto *TD = dyn_cast<TagDecl>(D)) {
153  // When tag declaration (but not definition!) is part of the
154  // decl-specifier-seq of some other declaration, it doesn't get comment
155  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
156  return {};
157  }
158  // TODO: handle comments for function parameters properly.
159  if (isa<ParmVarDecl>(D))
160  return {};
161 
162  // TODO: we could look up template parameter documentation in the template
163  // documentation.
164  if (isa<TemplateTypeParmDecl>(D) ||
165  isa<NonTypeTemplateParmDecl>(D) ||
166  isa<TemplateTemplateParmDecl>(D))
167  return {};
168 
169  // Find declaration location.
170  // For Objective-C declarations we generally don't expect to have multiple
171  // declarators, thus use declaration starting location as the "declaration
172  // location".
173  // For all other declarations multiple declarators are used quite frequently,
174  // so we use the location of the identifier as the "declaration location".
175  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
176  isa<ObjCPropertyDecl>(D) ||
177  isa<RedeclarableTemplateDecl>(D) ||
178  isa<ClassTemplateSpecializationDecl>(D) ||
179  // Allow association with Y across {} in `typedef struct X {} Y`.
180  isa<TypedefDecl>(D))
181  return D->getBeginLoc();
182 
183  const SourceLocation DeclLoc = D->getLocation();
184  if (DeclLoc.isMacroID()) {
185  if (isa<TypedefDecl>(D)) {
186  // If location of the typedef name is in a macro, it is because being
187  // declared via a macro. Try using declaration's starting location as
188  // the "declaration location".
189  return D->getBeginLoc();
190  }
191 
192  if (const auto *TD = dyn_cast<TagDecl>(D)) {
193  // If location of the tag decl is inside a macro, but the spelling of
194  // the tag name comes from a macro argument, it looks like a special
195  // macro like NS_ENUM is being used to define the tag decl. In that
196  // case, adjust the source location to the expansion loc so that we can
197  // attach the comment to the tag decl.
198  if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition())
199  return SourceMgr.getExpansionLoc(DeclLoc);
200  }
201  }
202 
203  return DeclLoc;
204 }
205 
207  const Decl *D, const SourceLocation RepresentativeLocForDecl,
208  const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
209  // If the declaration doesn't map directly to a location in a file, we
210  // can't find the comment.
211  if (RepresentativeLocForDecl.isInvalid() ||
212  !RepresentativeLocForDecl.isFileID())
213  return nullptr;
214 
215  // If there are no comments anywhere, we won't find anything.
216  if (CommentsInTheFile.empty())
217  return nullptr;
218 
219  // Decompose the location for the declaration and find the beginning of the
220  // file buffer.
221  const std::pair<FileID, unsigned> DeclLocDecomp =
222  SourceMgr.getDecomposedLoc(RepresentativeLocForDecl);
223 
224  // Slow path.
225  auto OffsetCommentBehindDecl =
226  CommentsInTheFile.lower_bound(DeclLocDecomp.second);
227 
228  // First check whether we have a trailing comment.
229  if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
230  RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
231  if ((CommentBehindDecl->isDocumentation() ||
232  LangOpts.CommentOpts.ParseAllComments) &&
233  CommentBehindDecl->isTrailingComment() &&
234  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
235  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
236 
237  // Check that Doxygen trailing comment comes after the declaration, starts
238  // on the same line and in the same file as the declaration.
239  if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) ==
240  Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first,
241  OffsetCommentBehindDecl->first)) {
242  return CommentBehindDecl;
243  }
244  }
245  }
246 
247  // The comment just after the declaration was not a trailing comment.
248  // Let's look at the previous comment.
249  if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
250  return nullptr;
251 
252  auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
253  RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
254 
255  // Check that we actually have a non-member Doxygen comment.
256  if (!(CommentBeforeDecl->isDocumentation() ||
257  LangOpts.CommentOpts.ParseAllComments) ||
258  CommentBeforeDecl->isTrailingComment())
259  return nullptr;
260 
261  // Decompose the end of the comment.
262  const unsigned CommentEndOffset =
263  Comments.getCommentEndOffset(CommentBeforeDecl);
264 
265  // Get the corresponding buffer.
266  bool Invalid = false;
267  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
268  &Invalid).data();
269  if (Invalid)
270  return nullptr;
271 
272  // Extract text between the comment and declaration.
273  StringRef Text(Buffer + CommentEndOffset,
274  DeclLocDecomp.second - CommentEndOffset);
275 
276  // There should be no other declarations or preprocessor directives between
277  // comment and declaration.
278  if (Text.find_first_of(";{}#@") != StringRef::npos)
279  return nullptr;
280 
281  return CommentBeforeDecl;
282 }
283 
285  const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
286 
287  // If the declaration doesn't map directly to a location in a file, we
288  // can't find the comment.
289  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
290  return nullptr;
291 
292  if (ExternalSource && !CommentsLoaded) {
293  ExternalSource->ReadComments();
294  CommentsLoaded = true;
295  }
296 
297  if (Comments.empty())
298  return nullptr;
299 
300  const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
301  if (!File.isValid()) {
302  return nullptr;
303  }
304  const auto CommentsInThisFile = Comments.getCommentsInFile(File);
305  if (!CommentsInThisFile || CommentsInThisFile->empty())
306  return nullptr;
307 
308  return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
309 }
310 
312  assert(LangOpts.RetainCommentsFromSystemHeaders ||
313  !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
314  Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
315 }
316 
317 /// If we have a 'templated' declaration for a template, adjust 'D' to
318 /// refer to the actual template.
319 /// If we have an implicit instantiation, adjust 'D' to refer to template.
320 static const Decl &adjustDeclToTemplate(const Decl &D) {
321  if (const auto *FD = dyn_cast<FunctionDecl>(&D)) {
322  // Is this function declaration part of a function template?
323  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
324  return *FTD;
325 
326  // Nothing to do if function is not an implicit instantiation.
327  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
328  return D;
329 
330  // Function is an implicit instantiation of a function template?
331  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
332  return *FTD;
333 
334  // Function is instantiated from a member definition of a class template?
335  if (const FunctionDecl *MemberDecl =
337  return *MemberDecl;
338 
339  return D;
340  }
341  if (const auto *VD = dyn_cast<VarDecl>(&D)) {
342  // Static data member is instantiated from a member definition of a class
343  // template?
344  if (VD->isStaticDataMember())
345  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
346  return *MemberDecl;
347 
348  return D;
349  }
350  if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) {
351  // Is this class declaration part of a class template?
352  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
353  return *CTD;
354 
355  // Class is an implicit instantiation of a class template or partial
356  // specialization?
357  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
358  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
359  return D;
360  llvm::PointerUnion<ClassTemplateDecl *,
362  PU = CTSD->getSpecializedTemplateOrPartial();
363  return PU.is<ClassTemplateDecl *>()
364  ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>())
365  : *static_cast<const Decl *>(
367  }
368 
369  // Class is instantiated from a member definition of a class template?
370  if (const MemberSpecializationInfo *Info =
371  CRD->getMemberSpecializationInfo())
372  return *Info->getInstantiatedFrom();
373 
374  return D;
375  }
376  if (const auto *ED = dyn_cast<EnumDecl>(&D)) {
377  // Enum is instantiated from a member definition of a class template?
378  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
379  return *MemberDecl;
380 
381  return D;
382  }
383  // FIXME: Adjust alias templates?
384  return D;
385 }
386 
388  const Decl *D,
389  const Decl **OriginalDecl) const {
390  if (!D) {
391  if (OriginalDecl)
392  OriginalDecl = nullptr;
393  return nullptr;
394  }
395 
396  D = &adjustDeclToTemplate(*D);
397 
398  // Any comment directly attached to D?
399  {
400  auto DeclComment = DeclRawComments.find(D);
401  if (DeclComment != DeclRawComments.end()) {
402  if (OriginalDecl)
403  *OriginalDecl = D;
404  return DeclComment->second;
405  }
406  }
407 
408  // Any comment attached to any redeclaration of D?
409  const Decl *CanonicalD = D->getCanonicalDecl();
410  if (!CanonicalD)
411  return nullptr;
412 
413  {
414  auto RedeclComment = RedeclChainComments.find(CanonicalD);
415  if (RedeclComment != RedeclChainComments.end()) {
416  if (OriginalDecl)
417  *OriginalDecl = RedeclComment->second;
418  auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second);
419  assert(CommentAtRedecl != DeclRawComments.end() &&
420  "This decl is supposed to have comment attached.");
421  return CommentAtRedecl->second;
422  }
423  }
424 
425  // Any redeclarations of D that we haven't checked for comments yet?
426  // We can't use DenseMap::iterator directly since it'd get invalid.
427  auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
428  auto LookupRes = CommentlessRedeclChains.find(CanonicalD);
429  if (LookupRes != CommentlessRedeclChains.end())
430  return LookupRes->second;
431  return nullptr;
432  }();
433 
434  for (const auto Redecl : D->redecls()) {
435  assert(Redecl);
436  // Skip all redeclarations that have been checked previously.
437  if (LastCheckedRedecl) {
438  if (LastCheckedRedecl == Redecl) {
439  LastCheckedRedecl = nullptr;
440  }
441  continue;
442  }
443  const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl);
444  if (RedeclComment) {
445  cacheRawCommentForDecl(*Redecl, *RedeclComment);
446  if (OriginalDecl)
447  *OriginalDecl = Redecl;
448  return RedeclComment;
449  }
450  CommentlessRedeclChains[CanonicalD] = Redecl;
451  }
452 
453  if (OriginalDecl)
454  *OriginalDecl = nullptr;
455  return nullptr;
456 }
457 
459  const RawComment &Comment) const {
460  assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
461  DeclRawComments.try_emplace(&OriginalD, &Comment);
462  const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
463  RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD);
464  CommentlessRedeclChains.erase(CanonicalDecl);
465 }
466 
467 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
469  const DeclContext *DC = ObjCMethod->getDeclContext();
470  if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
471  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
472  if (!ID)
473  return;
474  // Add redeclared method here.
475  for (const auto *Ext : ID->known_extensions()) {
476  if (ObjCMethodDecl *RedeclaredMethod =
477  Ext->getMethod(ObjCMethod->getSelector(),
478  ObjCMethod->isInstanceMethod()))
479  Redeclared.push_back(RedeclaredMethod);
480  }
481  }
482 }
483 
485  const Preprocessor *PP) {
486  if (Comments.empty() || Decls.empty())
487  return;
488 
489  FileID File;
490  for (Decl *D : Decls) {
491  SourceLocation Loc = D->getLocation();
492  if (Loc.isValid()) {
493  // See if there are any new comments that are not attached to a decl.
494  // The location doesn't have to be precise - we care only about the file.
495  File = SourceMgr.getDecomposedLoc(Loc).first;
496  break;
497  }
498  }
499 
500  if (File.isInvalid())
501  return;
502 
503  auto CommentsInThisFile = Comments.getCommentsInFile(File);
504  if (!CommentsInThisFile || CommentsInThisFile->empty() ||
505  CommentsInThisFile->rbegin()->second->isAttached())
506  return;
507 
508  // There is at least one comment not attached to a decl.
509  // Maybe it should be attached to one of Decls?
510  //
511  // Note that this way we pick up not only comments that precede the
512  // declaration, but also comments that *follow* the declaration -- thanks to
513  // the lookahead in the lexer: we've consumed the semicolon and looked
514  // ahead through comments.
515 
516  for (const Decl *D : Decls) {
517  assert(D);
518  if (D->isInvalidDecl())
519  continue;
520 
521  D = &adjustDeclToTemplate(*D);
522 
523  const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
524 
525  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
526  continue;
527 
528  if (DeclRawComments.count(D) > 0)
529  continue;
530 
531  if (RawComment *const DocComment =
532  getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) {
534  comments::FullComment *FC = DocComment->parse(*this, PP, D);
535  ParsedComments[D->getCanonicalDecl()] = FC;
536  }
537  }
538 }
539 
541  const Decl *D) const {
542  auto *ThisDeclInfo = new (*this) comments::DeclInfo;
543  ThisDeclInfo->CommentDecl = D;
544  ThisDeclInfo->IsFilled = false;
545  ThisDeclInfo->fill();
546  ThisDeclInfo->CommentDecl = FC->getDecl();
547  if (!ThisDeclInfo->TemplateParameters)
548  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
549  comments::FullComment *CFC =
550  new (*this) comments::FullComment(FC->getBlocks(),
551  ThisDeclInfo);
552  return CFC;
553 }
554 
557  return RC ? RC->parse(*this, nullptr, D) : nullptr;
558 }
559 
561  const Decl *D,
562  const Preprocessor *PP) const {
563  if (!D || D->isInvalidDecl())
564  return nullptr;
565  D = &adjustDeclToTemplate(*D);
566 
567  const Decl *Canonical = D->getCanonicalDecl();
568  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
569  ParsedComments.find(Canonical);
570 
571  if (Pos != ParsedComments.end()) {
572  if (Canonical != D) {
573  comments::FullComment *FC = Pos->second;
575  return CFC;
576  }
577  return Pos->second;
578  }
579 
580  const Decl *OriginalDecl = nullptr;
581 
582  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
583  if (!RC) {
584  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
586  const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
587  if (OMD && OMD->isPropertyAccessor())
588  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
589  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
590  return cloneFullComment(FC, D);
591  if (OMD)
592  addRedeclaredMethods(OMD, Overridden);
593  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
594  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
595  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
596  return cloneFullComment(FC, D);
597  }
598  else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
599  // Attach any tag type's documentation to its typedef if latter
600  // does not have one of its own.
601  QualType QT = TD->getUnderlyingType();
602  if (const auto *TT = QT->getAs<TagType>())
603  if (const Decl *TD = TT->getDecl())
604  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
605  return cloneFullComment(FC, D);
606  }
607  else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
608  while (IC->getSuperClass()) {
609  IC = IC->getSuperClass();
610  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
611  return cloneFullComment(FC, D);
612  }
613  }
614  else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
615  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
616  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
617  return cloneFullComment(FC, D);
618  }
619  else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
620  if (!(RD = RD->getDefinition()))
621  return nullptr;
622  // Check non-virtual bases.
623  for (const auto &I : RD->bases()) {
624  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
625  continue;
626  QualType Ty = I.getType();
627  if (Ty.isNull())
628  continue;
629  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
630  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
631  continue;
632 
633  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
634  return cloneFullComment(FC, D);
635  }
636  }
637  // Check virtual bases.
638  for (const auto &I : RD->vbases()) {
639  if (I.getAccessSpecifier() != AS_public)
640  continue;
641  QualType Ty = I.getType();
642  if (Ty.isNull())
643  continue;
644  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
645  if (!(VirtualBase= VirtualBase->getDefinition()))
646  continue;
647  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
648  return cloneFullComment(FC, D);
649  }
650  }
651  }
652  return nullptr;
653  }
654 
655  // If the RawComment was attached to other redeclaration of this Decl, we
656  // should parse the comment in context of that other Decl. This is important
657  // because comments can contain references to parameter names which can be
658  // different across redeclarations.
659  if (D != OriginalDecl && OriginalDecl)
660  return getCommentForDecl(OriginalDecl, PP);
661 
662  comments::FullComment *FC = RC->parse(*this, PP, D);
663  ParsedComments[Canonical] = FC;
664  return FC;
665 }
666 
667 void
668 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
669  const ASTContext &C,
670  TemplateTemplateParmDecl *Parm) {
671  ID.AddInteger(Parm->getDepth());
672  ID.AddInteger(Parm->getPosition());
673  ID.AddBoolean(Parm->isParameterPack());
674 
676  ID.AddInteger(Params->size());
678  PEnd = Params->end();
679  P != PEnd; ++P) {
680  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
681  ID.AddInteger(0);
682  ID.AddBoolean(TTP->isParameterPack());
683  const TypeConstraint *TC = TTP->getTypeConstraint();
684  ID.AddBoolean(TC != nullptr);
685  if (TC)
687  /*Canonical=*/true);
688  if (TTP->isExpandedParameterPack()) {
689  ID.AddBoolean(true);
690  ID.AddInteger(TTP->getNumExpansionParameters());
691  } else
692  ID.AddBoolean(false);
693  continue;
694  }
695 
696  if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
697  ID.AddInteger(1);
698  ID.AddBoolean(NTTP->isParameterPack());
699  const Expr *TC = NTTP->getPlaceholderTypeConstraint();
700  ID.AddBoolean(TC != nullptr);
701  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
702  if (TC)
703  TC->Profile(ID, C, /*Canonical=*/true);
704  if (NTTP->isExpandedParameterPack()) {
705  ID.AddBoolean(true);
706  ID.AddInteger(NTTP->getNumExpansionTypes());
707  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
708  QualType T = NTTP->getExpansionType(I);
709  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
710  }
711  } else
712  ID.AddBoolean(false);
713  continue;
714  }
715 
716  auto *TTP = cast<TemplateTemplateParmDecl>(*P);
717  ID.AddInteger(2);
718  Profile(ID, C, TTP);
719  }
720  Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause();
721  ID.AddBoolean(RequiresClause != nullptr);
722  if (RequiresClause)
723  RequiresClause->Profile(ID, C, /*Canonical=*/true);
724 }
725 
726 static Expr *
728  QualType ConstrainedType) {
729  // This is a bit ugly - we need to form a new immediately-declared
730  // constraint that references the new parameter; this would ideally
731  // require semantic analysis (e.g. template<C T> struct S {}; - the
732  // converted arguments of C<T> could be an argument pack if C is
733  // declared as template<typename... T> concept C = ...).
734  // We don't have semantic analysis here so we dig deep into the
735  // ready-made constraint expr and change the thing manually.
737  if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
738  CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
739  else
740  CSE = cast<ConceptSpecializationExpr>(IDC);
741  ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
743  NewConverted.reserve(OldConverted.size());
744  if (OldConverted.front().getKind() == TemplateArgument::Pack) {
745  // The case:
746  // template<typename... T> concept C = true;
747  // template<C<int> T> struct S; -> constraint is C<{T, int}>
748  NewConverted.push_back(ConstrainedType);
749  llvm::append_range(NewConverted,
750  OldConverted.front().pack_elements().drop_front(1));
751  TemplateArgument NewPack(NewConverted);
752 
753  NewConverted.clear();
754  NewConverted.push_back(NewPack);
755  assert(OldConverted.size() == 1 &&
756  "Template parameter pack should be the last parameter");
757  } else {
758  assert(OldConverted.front().getKind() == TemplateArgument::Type &&
759  "Unexpected first argument kind for immediately-declared "
760  "constraint");
761  NewConverted.push_back(ConstrainedType);
762  llvm::append_range(NewConverted, OldConverted.drop_front(1));
763  }
765  C, CSE->getNamedConcept(), NewConverted, nullptr,
767 
768  if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
769  NewIDC = new (C) CXXFoldExpr(
770  OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC,
771  BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr,
772  SourceLocation(), /*NumExpansions=*/None);
773  return NewIDC;
774 }
775 
777 ASTContext::getCanonicalTemplateTemplateParmDecl(
778  TemplateTemplateParmDecl *TTP) const {
779  // Check if we already have a canonical template template parameter.
780  llvm::FoldingSetNodeID ID;
781  CanonicalTemplateTemplateParm::Profile(ID, *this, TTP);
782  void *InsertPos = nullptr;
783  CanonicalTemplateTemplateParm *Canonical
784  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
785  if (Canonical)
786  return Canonical->getParam();
787 
788  // Build a canonical template parameter list.
790  SmallVector<NamedDecl *, 4> CanonParams;
791  CanonParams.reserve(Params->size());
793  PEnd = Params->end();
794  P != PEnd; ++P) {
795  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
798  TTP->getDepth(), TTP->getIndex(), nullptr, false,
799  TTP->isParameterPack(), TTP->hasTypeConstraint(),
800  TTP->isExpandedParameterPack() ?
801  llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None);
802  if (const auto *TC = TTP->getTypeConstraint()) {
803  QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0);
805  *this, TC->getImmediatelyDeclaredConstraint(),
806  ParamAsArgument);
807  NewTTP->setTypeConstraint(
809  DeclarationNameInfo(TC->getNamedConcept()->getDeclName(),
810  SourceLocation()), /*FoundDecl=*/nullptr,
811  // Actually canonicalizing a TemplateArgumentLoc is difficult so we
812  // simply omit the ArgsAsWritten
813  TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
814  }
815  CanonParams.push_back(NewTTP);
816  } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
817  QualType T = getCanonicalType(NTTP->getType());
820  if (NTTP->isExpandedParameterPack()) {
821  SmallVector<QualType, 2> ExpandedTypes;
822  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
823  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
824  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
825  ExpandedTInfos.push_back(
826  getTrivialTypeSourceInfo(ExpandedTypes.back()));
827  }
828 
830  SourceLocation(),
831  SourceLocation(),
832  NTTP->getDepth(),
833  NTTP->getPosition(), nullptr,
834  T,
835  TInfo,
836  ExpandedTypes,
837  ExpandedTInfos);
838  } else {
840  SourceLocation(),
841  SourceLocation(),
842  NTTP->getDepth(),
843  NTTP->getPosition(), nullptr,
844  T,
845  NTTP->isParameterPack(),
846  TInfo);
847  }
848  if (AutoType *AT = T->getContainedAutoType()) {
849  if (AT->isConstrained()) {
852  *this, NTTP->getPlaceholderTypeConstraint(), T));
853  }
854  }
855  CanonParams.push_back(Param);
856 
857  } else
858  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
859  cast<TemplateTemplateParmDecl>(*P)));
860  }
861 
862  Expr *CanonRequiresClause = nullptr;
863  if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause())
864  CanonRequiresClause = RequiresClause;
865 
866  TemplateTemplateParmDecl *CanonTTP
868  SourceLocation(), TTP->getDepth(),
869  TTP->getPosition(),
870  TTP->isParameterPack(),
871  nullptr,
873  SourceLocation(),
874  CanonParams,
875  SourceLocation(),
876  CanonRequiresClause));
877 
878  // Get the new insert position for the node we care about.
879  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
880  assert(!Canonical && "Shouldn't be in the map!");
881  (void)Canonical;
882 
883  // Create the canonical template template parameter entry.
884  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
885  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
886  return CanonTTP;
887 }
888 
890  auto Kind = getTargetInfo().getCXXABI().getKind();
891  return getLangOpts().CXXABI.value_or(Kind);
892 }
893 
894 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
895  if (!LangOpts.CPlusPlus) return nullptr;
896 
897  switch (getCXXABIKind()) {
898  case TargetCXXABI::AppleARM64:
899  case TargetCXXABI::Fuchsia:
900  case TargetCXXABI::GenericARM: // Same as Itanium at this level
901  case TargetCXXABI::iOS:
902  case TargetCXXABI::WatchOS:
903  case TargetCXXABI::GenericAArch64:
904  case TargetCXXABI::GenericMIPS:
905  case TargetCXXABI::GenericItanium:
906  case TargetCXXABI::WebAssembly:
907  case TargetCXXABI::XL:
908  return CreateItaniumCXXABI(*this);
909  case TargetCXXABI::Microsoft:
910  return CreateMicrosoftCXXABI(*this);
911  }
912  llvm_unreachable("Invalid CXXABI type!");
913 }
914 
916  if (!InterpContext) {
917  InterpContext.reset(new interp::Context(*this));
918  }
919  return *InterpContext.get();
920 }
921 
923  if (!ParentMapCtx)
924  ParentMapCtx.reset(new ParentMapContext(*this));
925  return *ParentMapCtx.get();
926 }
927 
928 static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
929  const LangOptions &LOpts) {
930  if (LOpts.FakeAddressSpaceMap) {
931  // The fake address space map must have a distinct entry for each
932  // language-specific address space.
933  static const unsigned FakeAddrSpaceMap[] = {
934  0, // Default
935  1, // opencl_global
936  3, // opencl_local
937  2, // opencl_constant
938  0, // opencl_private
939  4, // opencl_generic
940  5, // opencl_global_device
941  6, // opencl_global_host
942  7, // cuda_device
943  8, // cuda_constant
944  9, // cuda_shared
945  1, // sycl_global
946  5, // sycl_global_device
947  6, // sycl_global_host
948  3, // sycl_local
949  0, // sycl_private
950  10, // ptr32_sptr
951  11, // ptr32_uptr
952  12 // ptr64
953  };
954  return &FakeAddrSpaceMap;
955  } else {
956  return &T.getAddressSpaceMap();
957  }
958 }
959 
961  const LangOptions &LangOpts) {
962  switch (LangOpts.getAddressSpaceMapMangling()) {
964  return TI.useAddressSpaceMapMangling();
966  return true;
968  return false;
969  }
970  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
971 }
972 
974  IdentifierTable &idents, SelectorTable &sels,
975  Builtin::Context &builtins, TranslationUnitKind TUKind)
976  : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
977  FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
978  TemplateSpecializationTypes(this_()),
979  DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
980  SubstTemplateTemplateParmPacks(this_()),
981  CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
982  NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
983  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
984  LangOpts.XRayNeverInstrumentFiles,
985  LangOpts.XRayAttrListFiles, SM)),
986  ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
987  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
988  BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
989  Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
990  CompCategories(this_()), LastSDM(nullptr, 0) {
992 }
993 
995  // Release the DenseMaps associated with DeclContext objects.
996  // FIXME: Is this the ideal solution?
997  ReleaseDeclContextMaps();
998 
999  // Call all of the deallocation functions on all of their targets.
1000  for (auto &Pair : Deallocations)
1001  (Pair.first)(Pair.second);
1002  Deallocations.clear();
1003 
1004  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
1005  // because they can contain DenseMaps.
1006  for (llvm::DenseMap<const ObjCContainerDecl*,
1007  const ASTRecordLayout*>::iterator
1008  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
1009  // Increment in loop to prevent using deallocated memory.
1010  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
1011  R->Destroy(*this);
1012  ObjCLayouts.clear();
1013 
1014  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
1015  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
1016  // Increment in loop to prevent using deallocated memory.
1017  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
1018  R->Destroy(*this);
1019  }
1020  ASTRecordLayouts.clear();
1021 
1022  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
1023  AEnd = DeclAttrs.end();
1024  A != AEnd; ++A)
1025  A->second->~AttrVec();
1026  DeclAttrs.clear();
1027 
1028  for (const auto &Value : ModuleInitializers)
1029  Value.second->~PerModuleInitializers();
1030  ModuleInitializers.clear();
1031 }
1032 
1034 
1035 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
1036  TraversalScope = TopLevelDecls;
1038 }
1039 
1040 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
1041  Deallocations.push_back({Callback, Data});
1042 }
1043 
1044 void
1046  ExternalSource = std::move(Source);
1047 }
1048 
1050  llvm::errs() << "\n*** AST Context Stats:\n";
1051  llvm::errs() << " " << Types.size() << " types total.\n";
1052 
1053  unsigned counts[] = {
1054 #define TYPE(Name, Parent) 0,
1055 #define ABSTRACT_TYPE(Name, Parent)
1056 #include "clang/AST/TypeNodes.inc"
1057  0 // Extra
1058  };
1059 
1060  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
1061  Type *T = Types[i];
1062  counts[(unsigned)T->getTypeClass()]++;
1063  }
1064 
1065  unsigned Idx = 0;
1066  unsigned TotalBytes = 0;
1067 #define TYPE(Name, Parent) \
1068  if (counts[Idx]) \
1069  llvm::errs() << " " << counts[Idx] << " " << #Name \
1070  << " types, " << sizeof(Name##Type) << " each " \
1071  << "(" << counts[Idx] * sizeof(Name##Type) \
1072  << " bytes)\n"; \
1073  TotalBytes += counts[Idx] * sizeof(Name##Type); \
1074  ++Idx;
1075 #define ABSTRACT_TYPE(Name, Parent)
1076 #include "clang/AST/TypeNodes.inc"
1077 
1078  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
1079 
1080  // Implicit special member functions.
1081  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
1083  << " implicit default constructors created\n";
1084  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1086  << " implicit copy constructors created\n";
1087  if (getLangOpts().CPlusPlus)
1088  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1090  << " implicit move constructors created\n";
1091  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1093  << " implicit copy assignment operators created\n";
1094  if (getLangOpts().CPlusPlus)
1095  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1097  << " implicit move assignment operators created\n";
1098  llvm::errs() << NumImplicitDestructorsDeclared << "/"
1100  << " implicit destructors created\n";
1101 
1102  if (ExternalSource) {
1103  llvm::errs() << "\n";
1104  ExternalSource->PrintStats();
1105  }
1106 
1107  BumpAlloc.PrintStats();
1108 }
1109 
1111  bool NotifyListeners) {
1112  if (NotifyListeners)
1113  if (auto *Listener = getASTMutationListener())
1115 
1116  MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
1117 }
1118 
1120  auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1121  if (It == MergedDefModules.end())
1122  return;
1123 
1124  auto &Merged = It->second;
1126  for (Module *&M : Merged)
1127  if (!Found.insert(M).second)
1128  M = nullptr;
1129  llvm::erase_value(Merged, nullptr);
1130 }
1131 
1134  auto MergedIt =
1135  MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
1136  if (MergedIt == MergedDefModules.end())
1137  return None;
1138  return MergedIt->second;
1139 }
1140 
1141 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1142  if (LazyInitializers.empty())
1143  return;
1144 
1145  auto *Source = Ctx.getExternalSource();
1146  assert(Source && "lazy initializers but no external source");
1147 
1148  auto LazyInits = std::move(LazyInitializers);
1149  LazyInitializers.clear();
1150 
1151  for (auto ID : LazyInits)
1152  Initializers.push_back(Source->GetExternalDecl(ID));
1153 
1154  assert(LazyInitializers.empty() &&
1155  "GetExternalDecl for lazy module initializer added more inits");
1156 }
1157 
1159  // One special case: if we add a module initializer that imports another
1160  // module, and that module's only initializer is an ImportDecl, simplify.
1161  if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1162  auto It = ModuleInitializers.find(ID->getImportedModule());
1163 
1164  // Maybe the ImportDecl does nothing at all. (Common case.)
1165  if (It == ModuleInitializers.end())
1166  return;
1167 
1168  // Maybe the ImportDecl only imports another ImportDecl.
1169  auto &Imported = *It->second;
1170  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1171  Imported.resolve(*this);
1172  auto *OnlyDecl = Imported.Initializers.front();
1173  if (isa<ImportDecl>(OnlyDecl))
1174  D = OnlyDecl;
1175  }
1176  }
1177 
1178  auto *&Inits = ModuleInitializers[M];
1179  if (!Inits)
1180  Inits = new (*this) PerModuleInitializers;
1181  Inits->Initializers.push_back(D);
1182 }
1183 
1185  auto *&Inits = ModuleInitializers[M];
1186  if (!Inits)
1187  Inits = new (*this) PerModuleInitializers;
1188  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1189  IDs.begin(), IDs.end());
1190 }
1191 
1193  auto It = ModuleInitializers.find(M);
1194  if (It == ModuleInitializers.end())
1195  return None;
1196 
1197  auto *Inits = It->second;
1198  Inits->resolve(*this);
1199  return Inits->Initializers;
1200 }
1201 
1203  if (!ExternCContext)
1204  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1205 
1206  return ExternCContext;
1207 }
1208 
1211  const IdentifierInfo *II) const {
1212  auto *BuiltinTemplate =
1214  BuiltinTemplate->setImplicit();
1215  getTranslationUnitDecl()->addDecl(BuiltinTemplate);
1216 
1217  return BuiltinTemplate;
1218 }
1219 
1222  if (!MakeIntegerSeqDecl)
1223  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1225  return MakeIntegerSeqDecl;
1226 }
1227 
1230  if (!TypePackElementDecl)
1231  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1233  return TypePackElementDecl;
1234 }
1235 
1237  RecordDecl::TagKind TK) const {
1238  SourceLocation Loc;
1239  RecordDecl *NewDecl;
1240  if (getLangOpts().CPlusPlus)
1241  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1242  Loc, &Idents.get(Name));
1243  else
1244  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1245  &Idents.get(Name));
1246  NewDecl->setImplicit();
1247  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1248  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1249  return NewDecl;
1250 }
1251 
1253  StringRef Name) const {
1255  TypedefDecl *NewDecl = TypedefDecl::Create(
1256  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1257  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1258  NewDecl->setImplicit();
1259  return NewDecl;
1260 }
1261 
1263  if (!Int128Decl)
1264  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1265  return Int128Decl;
1266 }
1267 
1269  if (!UInt128Decl)
1270  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1271  return UInt128Decl;
1272 }
1273 
1274 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1275  auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1276  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1277  Types.push_back(Ty);
1278 }
1279 
1281  const TargetInfo *AuxTarget) {
1282  assert((!this->Target || this->Target == &Target) &&
1283  "Incorrect target reinitialization");
1284  assert(VoidTy.isNull() && "Context reinitialized?");
1285 
1286  this->Target = &Target;
1287  this->AuxTarget = AuxTarget;
1288 
1289  ABI.reset(createCXXABI(Target));
1290  AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
1291  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1292 
1293  // C99 6.2.5p19.
1294  InitBuiltinType(VoidTy, BuiltinType::Void);
1295 
1296  // C99 6.2.5p2.
1297  InitBuiltinType(BoolTy, BuiltinType::Bool);
1298  // C99 6.2.5p3.
1299  if (LangOpts.CharIsSigned)
1300  InitBuiltinType(CharTy, BuiltinType::Char_S);
1301  else
1302  InitBuiltinType(CharTy, BuiltinType::Char_U);
1303  // C99 6.2.5p4.
1304  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1305  InitBuiltinType(ShortTy, BuiltinType::Short);
1306  InitBuiltinType(IntTy, BuiltinType::Int);
1307  InitBuiltinType(LongTy, BuiltinType::Long);
1308  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1309 
1310  // C99 6.2.5p6.
1311  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1312  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1313  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1314  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1315  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1316 
1317  // C99 6.2.5p10.
1318  InitBuiltinType(FloatTy, BuiltinType::Float);
1319  InitBuiltinType(DoubleTy, BuiltinType::Double);
1320  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1321 
1322  // GNU extension, __float128 for IEEE quadruple precision
1323  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1324 
1325  // __ibm128 for IBM extended precision
1326  InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128);
1327 
1328  // C11 extension ISO/IEC TS 18661-3
1329  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1330 
1331  // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1332  InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1333  InitBuiltinType(AccumTy, BuiltinType::Accum);
1334  InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1335  InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1336  InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1337  InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1338  InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1339  InitBuiltinType(FractTy, BuiltinType::Fract);
1340  InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1341  InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1342  InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1343  InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1344  InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1345  InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1346  InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1347  InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1348  InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1349  InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1350  InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1351  InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1352  InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1353  InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1354  InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1355  InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1356 
1357  // GNU extension, 128-bit integers.
1358  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1359  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1360 
1361  // C++ 3.9.1p5
1362  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1363  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1364  else // -fshort-wchar makes wchar_t be unsigned.
1365  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1366  if (LangOpts.CPlusPlus && LangOpts.WChar)
1367  WideCharTy = WCharTy;
1368  else {
1369  // C99 (or C++ using -fno-wchar).
1370  WideCharTy = getFromTargetType(Target.getWCharType());
1371  }
1372 
1373  WIntTy = getFromTargetType(Target.getWIntType());
1374 
1375  // C++20 (proposed)
1376  InitBuiltinType(Char8Ty, BuiltinType::Char8);
1377 
1378  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1379  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1380  else // C99
1381  Char16Ty = getFromTargetType(Target.getChar16Type());
1382 
1383  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1384  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1385  else // C99
1386  Char32Ty = getFromTargetType(Target.getChar32Type());
1387 
1388  // Placeholder type for type-dependent expressions whose type is
1389  // completely unknown. No code should ever check a type against
1390  // DependentTy and users should never see it; however, it is here to
1391  // help diagnose failures to properly check for type-dependent
1392  // expressions.
1393  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1394 
1395  // Placeholder type for functions.
1396  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1397 
1398  // Placeholder type for bound members.
1399  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1400 
1401  // Placeholder type for pseudo-objects.
1402  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1403 
1404  // "any" type; useful for debugger-like clients.
1405  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1406 
1407  // Placeholder type for unbridged ARC casts.
1408  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1409 
1410  // Placeholder type for builtin functions.
1411  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1412 
1413  // Placeholder type for OMP array sections.
1414  if (LangOpts.OpenMP) {
1415  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1416  InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
1417  InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
1418  }
1419  if (LangOpts.MatrixTypes)
1420  InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
1421 
1422  // Builtin types for 'id', 'Class', and 'SEL'.
1423  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1424  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1425  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1426 
1427  if (LangOpts.OpenCL) {
1428 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1429  InitBuiltinType(SingletonId, BuiltinType::Id);
1430 #include "clang/Basic/OpenCLImageTypes.def"
1431 
1432  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1433  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1434  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1435  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1436  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1437 
1438 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1439  InitBuiltinType(Id##Ty, BuiltinType::Id);
1440 #include "clang/Basic/OpenCLExtensionTypes.def"
1441  }
1442 
1443  if (Target.hasAArch64SVETypes()) {
1444 #define SVE_TYPE(Name, Id, SingletonId) \
1445  InitBuiltinType(SingletonId, BuiltinType::Id);
1446 #include "clang/Basic/AArch64SVEACLETypes.def"
1447  }
1448 
1449  if (Target.getTriple().isPPC64()) {
1450 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1451  InitBuiltinType(Id##Ty, BuiltinType::Id);
1452 #include "clang/Basic/PPCTypes.def"
1453 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1454  InitBuiltinType(Id##Ty, BuiltinType::Id);
1455 #include "clang/Basic/PPCTypes.def"
1456  }
1457 
1458  if (Target.hasRISCVVTypes()) {
1459 #define RVV_TYPE(Name, Id, SingletonId) \
1460  InitBuiltinType(SingletonId, BuiltinType::Id);
1461 #include "clang/Basic/RISCVVTypes.def"
1462  }
1463 
1464  // Builtin type for __objc_yes and __objc_no
1465  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1466  SignedCharTy : BoolTy);
1467 
1468  ObjCConstantStringType = QualType();
1469 
1470  ObjCSuperType = QualType();
1471 
1472  // void * type
1473  if (LangOpts.OpenCLGenericAddressSpace) {
1474  auto Q = VoidTy.getQualifiers();
1478  } else {
1480  }
1481 
1482  // nullptr type (C++0x 2.14.7)
1483  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1484 
1485  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1486  InitBuiltinType(HalfTy, BuiltinType::Half);
1487 
1488  InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
1489 
1490  // Builtin type used to help define __builtin_va_list.
1491  VaListTagDecl = nullptr;
1492 
1493  // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1494  if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1497  }
1498 }
1499 
1501  return SourceMgr.getDiagnostics();
1502 }
1503 
1505  AttrVec *&Result = DeclAttrs[D];
1506  if (!Result) {
1507  void *Mem = Allocate(sizeof(AttrVec));
1508  Result = new (Mem) AttrVec;
1509  }
1510 
1511  return *Result;
1512 }
1513 
1514 /// Erase the attributes corresponding to the given declaration.
1516  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1517  if (Pos != DeclAttrs.end()) {
1518  Pos->second->~AttrVec();
1519  DeclAttrs.erase(Pos);
1520  }
1521 }
1522 
1523 // FIXME: Remove ?
1526  assert(Var->isStaticDataMember() && "Not a static data member");
1528  .dyn_cast<MemberSpecializationInfo *>();
1529 }
1530 
1533  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1534  TemplateOrInstantiation.find(Var);
1535  if (Pos == TemplateOrInstantiation.end())
1536  return {};
1537 
1538  return Pos->second;
1539 }
1540 
1541 void
1544  SourceLocation PointOfInstantiation) {
1545  assert(Inst->isStaticDataMember() && "Not a static data member");
1546  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1548  Tmpl, TSK, PointOfInstantiation));
1549 }
1550 
1551 void
1554  assert(!TemplateOrInstantiation[Inst] &&
1555  "Already noted what the variable was instantiated from");
1556  TemplateOrInstantiation[Inst] = TSI;
1557 }
1558 
1559 NamedDecl *
1561  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1562  if (Pos == InstantiatedFromUsingDecl.end())
1563  return nullptr;
1564 
1565  return Pos->second;
1566 }
1567 
1568 void
1570  assert((isa<UsingDecl>(Pattern) ||
1571  isa<UnresolvedUsingValueDecl>(Pattern) ||
1572  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1573  "pattern decl is not a using decl");
1574  assert((isa<UsingDecl>(Inst) ||
1575  isa<UnresolvedUsingValueDecl>(Inst) ||
1576  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1577  "instantiation did not produce a using decl");
1578  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1579  InstantiatedFromUsingDecl[Inst] = Pattern;
1580 }
1581 
1582 UsingEnumDecl *
1584  auto Pos = InstantiatedFromUsingEnumDecl.find(UUD);
1585  if (Pos == InstantiatedFromUsingEnumDecl.end())
1586  return nullptr;
1587 
1588  return Pos->second;
1589 }
1590 
1592  UsingEnumDecl *Pattern) {
1593  assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1594  InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1595 }
1596 
1599  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1600  = InstantiatedFromUsingShadowDecl.find(Inst);
1601  if (Pos == InstantiatedFromUsingShadowDecl.end())
1602  return nullptr;
1603 
1604  return Pos->second;
1605 }
1606 
1607 void
1609  UsingShadowDecl *Pattern) {
1610  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1611  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1612 }
1613 
1615  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1616  = InstantiatedFromUnnamedFieldDecl.find(Field);
1617  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1618  return nullptr;
1619 
1620  return Pos->second;
1621 }
1622 
1624  FieldDecl *Tmpl) {
1625  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1626  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1627  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1628  "Already noted what unnamed field was instantiated from");
1629 
1630  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1631 }
1632 
1635  return overridden_methods(Method).begin();
1636 }
1637 
1640  return overridden_methods(Method).end();
1641 }
1642 
1643 unsigned
1645  auto Range = overridden_methods(Method);
1646  return Range.end() - Range.begin();
1647 }
1648 
1651  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1652  OverriddenMethods.find(Method->getCanonicalDecl());
1653  if (Pos == OverriddenMethods.end())
1654  return overridden_method_range(nullptr, nullptr);
1655  return overridden_method_range(Pos->second.begin(), Pos->second.end());
1656 }
1657 
1659  const CXXMethodDecl *Overridden) {
1660  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1661  OverriddenMethods[Method].push_back(Overridden);
1662 }
1663 
1665  const NamedDecl *D,
1666  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1667  assert(D);
1668 
1669  if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1670  Overridden.append(overridden_methods_begin(CXXMethod),
1671  overridden_methods_end(CXXMethod));
1672  return;
1673  }
1674 
1675  const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1676  if (!Method)
1677  return;
1678 
1680  Method->getOverriddenMethods(OverDecls);
1681  Overridden.append(OverDecls.begin(), OverDecls.end());
1682 }
1683 
1685  assert(!Import->getNextLocalImport() &&
1686  "Import declaration already in the chain");
1687  assert(!Import->isFromASTFile() && "Non-local import declaration");
1688  if (!FirstLocalImport) {
1689  FirstLocalImport = Import;
1690  LastLocalImport = Import;
1691  return;
1692  }
1693 
1694  LastLocalImport->setNextLocalImport(Import);
1695  LastLocalImport = Import;
1696 }
1697 
1698 //===----------------------------------------------------------------------===//
1699 // Type Sizing and Analysis
1700 //===----------------------------------------------------------------------===//
1701 
1702 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1703 /// scalar floating point type.
1704 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1705  switch (T->castAs<BuiltinType>()->getKind()) {
1706  default:
1707  llvm_unreachable("Not a floating point type!");
1708  case BuiltinType::BFloat16:
1709  return Target->getBFloat16Format();
1710  case BuiltinType::Float16:
1711  return Target->getHalfFormat();
1712  case BuiltinType::Half:
1713  // For HLSL, when the native half type is disabled, half will be treat as
1714  // float.
1715  if (getLangOpts().HLSL)
1716  if (getLangOpts().NativeHalfType)
1717  return Target->getHalfFormat();
1718  else
1719  return Target->getFloatFormat();
1720  else
1721  return Target->getHalfFormat();
1722  case BuiltinType::Float: return Target->getFloatFormat();
1723  case BuiltinType::Double: return Target->getDoubleFormat();
1724  case BuiltinType::Ibm128:
1725  return Target->getIbm128Format();
1726  case BuiltinType::LongDouble:
1727  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1728  return AuxTarget->getLongDoubleFormat();
1729  return Target->getLongDoubleFormat();
1730  case BuiltinType::Float128:
1731  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1732  return AuxTarget->getFloat128Format();
1733  return Target->getFloat128Format();
1734  }
1735 }
1736 
1737 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1738  unsigned Align = Target->getCharWidth();
1739 
1740  bool UseAlignAttrOnly = false;
1741  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1742  Align = AlignFromAttr;
1743 
1744  // __attribute__((aligned)) can increase or decrease alignment
1745  // *except* on a struct or struct member, where it only increases
1746  // alignment unless 'packed' is also specified.
1747  //
1748  // It is an error for alignas to decrease alignment, so we can
1749  // ignore that possibility; Sema should diagnose it.
1750  if (isa<FieldDecl>(D)) {
1751  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1752  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1753  } else {
1754  UseAlignAttrOnly = true;
1755  }
1756  }
1757  else if (isa<FieldDecl>(D))
1758  UseAlignAttrOnly =
1759  D->hasAttr<PackedAttr>() ||
1760  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1761 
1762  // If we're using the align attribute only, just ignore everything
1763  // else about the declaration and its type.
1764  if (UseAlignAttrOnly) {
1765  // do nothing
1766  } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1767  QualType T = VD->getType();
1768  if (const auto *RT = T->getAs<ReferenceType>()) {
1769  if (ForAlignof)
1770  T = RT->getPointeeType();
1771  else
1772  T = getPointerType(RT->getPointeeType());
1773  }
1774  QualType BaseT = getBaseElementType(T);
1775  if (T->isFunctionType())
1776  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1777  else if (!BaseT->isIncompleteType()) {
1778  // Adjust alignments of declarations with array type by the
1779  // large-array alignment on the target.
1780  if (const ArrayType *arrayType = getAsArrayType(T)) {
1781  unsigned MinWidth = Target->getLargeArrayMinWidth();
1782  if (!ForAlignof && MinWidth) {
1783  if (isa<VariableArrayType>(arrayType))
1784  Align = std::max(Align, Target->getLargeArrayAlign());
1785  else if (isa<ConstantArrayType>(arrayType) &&
1786  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1787  Align = std::max(Align, Target->getLargeArrayAlign());
1788  }
1789  }
1790  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1791  if (BaseT.getQualifiers().hasUnaligned())
1792  Align = Target->getCharWidth();
1793  if (const auto *VD = dyn_cast<VarDecl>(D)) {
1794  if (VD->hasGlobalStorage() && !ForAlignof) {
1795  uint64_t TypeSize = getTypeSize(T.getTypePtr());
1796  Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
1797  }
1798  }
1799  }
1800 
1801  // Fields can be subject to extra alignment constraints, like if
1802  // the field is packed, the struct is packed, or the struct has a
1803  // a max-field-alignment constraint (#pragma pack). So calculate
1804  // the actual alignment of the field within the struct, and then
1805  // (as we're expected to) constrain that by the alignment of the type.
1806  if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1807  const RecordDecl *Parent = Field->getParent();
1808  // We can only produce a sensible answer if the record is valid.
1809  if (!Parent->isInvalidDecl()) {
1810  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1811 
1812  // Start with the record's overall alignment.
1813  unsigned FieldAlign = toBits(Layout.getAlignment());
1814 
1815  // Use the GCD of that and the offset within the record.
1816  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1817  if (Offset > 0) {
1818  // Alignment is always a power of 2, so the GCD will be a power of 2,
1819  // which means we get to do this crazy thing instead of Euclid's.
1820  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1821  if (LowBitOfOffset < FieldAlign)
1822  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1823  }
1824 
1825  Align = std::min(Align, FieldAlign);
1826  }
1827  }
1828  }
1829 
1830  // Some targets have hard limitation on the maximum requestable alignment in
1831  // aligned attribute for static variables.
1832  const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1833  const auto *VD = dyn_cast<VarDecl>(D);
1834  if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1835  Align = std::min(Align, MaxAlignedAttr);
1836 
1837  return toCharUnitsFromBits(Align);
1838 }
1839 
1841  return toCharUnitsFromBits(Target->getExnObjectAlignment());
1842 }
1843 
1844 // getTypeInfoDataSizeInChars - Return the size of a type, in
1845 // chars. If the type is a record, its data size is returned. This is
1846 // the size of the memcpy that's performed when assigning this type
1847 // using a trivial copy/move assignment operator.
1850 
1851  // In C++, objects can sometimes be allocated into the tail padding
1852  // of a base-class subobject. We decide whether that's possible
1853  // during class layout, so here we can just trust the layout results.
1854  if (getLangOpts().CPlusPlus) {
1855  if (const auto *RT = T->getAs<RecordType>()) {
1856  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1857  Info.Width = layout.getDataSize();
1858  }
1859  }
1860 
1861  return Info;
1862 }
1863 
1864 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1865 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1868  const ConstantArrayType *CAT) {
1869  TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
1870  uint64_t Size = CAT->getSize().getZExtValue();
1871  assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1872  (uint64_t)(-1)/Size) &&
1873  "Overflow in array type char size evaluation");
1874  uint64_t Width = EltInfo.Width.getQuantity() * Size;
1875  unsigned Align = EltInfo.Align.getQuantity();
1876  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1877  Context.getTargetInfo().getPointerWidth(0) == 64)
1878  Width = llvm::alignTo(Width, Align);
1879  return TypeInfoChars(CharUnits::fromQuantity(Width),
1880  CharUnits::fromQuantity(Align),
1881  EltInfo.AlignRequirement);
1882 }
1883 
1885  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1886  return getConstantArrayInfoInChars(*this, CAT);
1887  TypeInfo Info = getTypeInfo(T);
1890 }
1891 
1893  return getTypeInfoInChars(T.getTypePtr());
1894 }
1895 
1898 }
1899 
1901  return isAlignmentRequired(T.getTypePtr());
1902 }
1903 
1905  bool NeedsPreferredAlignment) const {
1906  // An alignment on a typedef overrides anything else.
1907  if (const auto *TT = T->getAs<TypedefType>())
1908  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1909  return Align;
1910 
1911  // If we have an (array of) complete type, we're done.
1912  T = getBaseElementType(T);
1913  if (!T->isIncompleteType())
1914  return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1915 
1916  // If we had an array type, its element type might be a typedef
1917  // type with an alignment attribute.
1918  if (const auto *TT = T->getAs<TypedefType>())
1919  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1920  return Align;
1921 
1922  // Otherwise, see if the declaration of the type had an attribute.
1923  if (const auto *TT = T->getAs<TagType>())
1924  return TT->getDecl()->getMaxAlignment();
1925 
1926  return 0;
1927 }
1928 
1930  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1931  if (I != MemoizedTypeInfo.end())
1932  return I->second;
1933 
1934  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1935  TypeInfo TI = getTypeInfoImpl(T);
1936  MemoizedTypeInfo[T] = TI;
1937  return TI;
1938 }
1939 
1940 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
1941 /// method does not work on incomplete types.
1942 ///
1943 /// FIXME: Pointers into different addr spaces could have different sizes and
1944 /// alignment requirements: getPointerInfo should take an AddrSpace, this
1945 /// should take a QualType, &c.
1946 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1947  uint64_t Width = 0;
1948  unsigned Align = 8;
1950  unsigned AS = 0;
1951  switch (T->getTypeClass()) {
1952 #define TYPE(Class, Base)
1953 #define ABSTRACT_TYPE(Class, Base)
1954 #define NON_CANONICAL_TYPE(Class, Base)
1955 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1956 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1957  case Type::Class: \
1958  assert(!T->isDependentType() && "should not see dependent types here"); \
1959  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1960 #include "clang/AST/TypeNodes.inc"
1961  llvm_unreachable("Should not see dependent types");
1962 
1963  case Type::FunctionNoProto:
1964  case Type::FunctionProto:
1965  // GCC extension: alignof(function) = 32 bits
1966  Width = 0;
1967  Align = 32;
1968  break;
1969 
1970  case Type::IncompleteArray:
1971  case Type::VariableArray:
1972  case Type::ConstantArray: {
1973  // Model non-constant sized arrays as size zero, but track the alignment.
1974  uint64_t Size = 0;
1975  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1976  Size = CAT->getSize().getZExtValue();
1977 
1978  TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
1979  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1980  "Overflow in array type bit size evaluation");
1981  Width = EltInfo.Width * Size;
1982  Align = EltInfo.Align;
1983  AlignRequirement = EltInfo.AlignRequirement;
1984  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1985  getTargetInfo().getPointerWidth(0) == 64)
1986  Width = llvm::alignTo(Width, Align);
1987  break;
1988  }
1989 
1990  case Type::ExtVector:
1991  case Type::Vector: {
1992  const auto *VT = cast<VectorType>(T);
1993  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1994  Width = VT->isExtVectorBoolType() ? VT->getNumElements()
1995  : EltInfo.Width * VT->getNumElements();
1996  // Enforce at least byte alignment.
1997  Align = std::max<unsigned>(8, Width);
1998 
1999  // If the alignment is not a power of 2, round up to the next power of 2.
2000  // This happens for non-power-of-2 length vectors.
2001  if (Align & (Align-1)) {
2002  Align = llvm::NextPowerOf2(Align);
2003  Width = llvm::alignTo(Width, Align);
2004  }
2005  // Adjust the alignment based on the target max.
2006  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2007  if (TargetVectorAlign && TargetVectorAlign < Align)
2008  Align = TargetVectorAlign;
2009  if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
2010  // Adjust the alignment for fixed-length SVE vectors. This is important
2011  // for non-power-of-2 vector lengths.
2012  Align = 128;
2013  else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
2014  // Adjust the alignment for fixed-length SVE predicates.
2015  Align = 16;
2016  break;
2017  }
2018 
2019  case Type::ConstantMatrix: {
2020  const auto *MT = cast<ConstantMatrixType>(T);
2021  TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
2022  // The internal layout of a matrix value is implementation defined.
2023  // Initially be ABI compatible with arrays with respect to alignment and
2024  // size.
2025  Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2026  Align = ElementInfo.Align;
2027  break;
2028  }
2029 
2030  case Type::Builtin:
2031  switch (cast<BuiltinType>(T)->getKind()) {
2032  default: llvm_unreachable("Unknown builtin type!");
2033  case BuiltinType::Void:
2034  // GCC extension: alignof(void) = 8 bits.
2035  Width = 0;
2036  Align = 8;
2037  break;
2038  case BuiltinType::Bool:
2039  Width = Target->getBoolWidth();
2040  Align = Target->getBoolAlign();
2041  break;
2042  case BuiltinType::Char_S:
2043  case BuiltinType::Char_U:
2044  case BuiltinType::UChar:
2045  case BuiltinType::SChar:
2046  case BuiltinType::Char8:
2047  Width = Target->getCharWidth();
2048  Align = Target->getCharAlign();
2049  break;
2050  case BuiltinType::WChar_S:
2051  case BuiltinType::WChar_U:
2052  Width = Target->getWCharWidth();
2053  Align = Target->getWCharAlign();
2054  break;
2055  case BuiltinType::Char16:
2056  Width = Target->getChar16Width();
2057  Align = Target->getChar16Align();
2058  break;
2059  case BuiltinType::Char32:
2060  Width = Target->getChar32Width();
2061  Align = Target->getChar32Align();
2062  break;
2063  case BuiltinType::UShort:
2064  case BuiltinType::Short:
2065  Width = Target->getShortWidth();
2066  Align = Target->getShortAlign();
2067  break;
2068  case BuiltinType::UInt:
2069  case BuiltinType::Int:
2070  Width = Target->getIntWidth();
2071  Align = Target->getIntAlign();
2072  break;
2073  case BuiltinType::ULong:
2074  case BuiltinType::Long:
2075  Width = Target->getLongWidth();
2076  Align = Target->getLongAlign();
2077  break;
2078  case BuiltinType::ULongLong:
2079  case BuiltinType::LongLong:
2080  Width = Target->getLongLongWidth();
2081  Align = Target->getLongLongAlign();
2082  break;
2083  case BuiltinType::Int128:
2084  case BuiltinType::UInt128:
2085  Width = 128;
2086  Align = Target->getInt128Align();
2087  break;
2088  case BuiltinType::ShortAccum:
2089  case BuiltinType::UShortAccum:
2090  case BuiltinType::SatShortAccum:
2091  case BuiltinType::SatUShortAccum:
2092  Width = Target->getShortAccumWidth();
2093  Align = Target->getShortAccumAlign();
2094  break;
2095  case BuiltinType::Accum:
2096  case BuiltinType::UAccum:
2097  case BuiltinType::SatAccum:
2098  case BuiltinType::SatUAccum:
2099  Width = Target->getAccumWidth();
2100  Align = Target->getAccumAlign();
2101  break;
2102  case BuiltinType::LongAccum:
2103  case BuiltinType::ULongAccum:
2104  case BuiltinType::SatLongAccum:
2105  case BuiltinType::SatULongAccum:
2106  Width = Target->getLongAccumWidth();
2107  Align = Target->getLongAccumAlign();
2108  break;
2109  case BuiltinType::ShortFract:
2110  case BuiltinType::UShortFract:
2111  case BuiltinType::SatShortFract:
2112  case BuiltinType::SatUShortFract:
2113  Width = Target->getShortFractWidth();
2114  Align = Target->getShortFractAlign();
2115  break;
2116  case BuiltinType::Fract:
2117  case BuiltinType::UFract:
2118  case BuiltinType::SatFract:
2119  case BuiltinType::SatUFract:
2120  Width = Target->getFractWidth();
2121  Align = Target->getFractAlign();
2122  break;
2123  case BuiltinType::LongFract:
2124  case BuiltinType::ULongFract:
2125  case BuiltinType::SatLongFract:
2126  case BuiltinType::SatULongFract:
2127  Width = Target->getLongFractWidth();
2128  Align = Target->getLongFractAlign();
2129  break;
2130  case BuiltinType::BFloat16:
2131  if (Target->hasBFloat16Type()) {
2132  Width = Target->getBFloat16Width();
2133  Align = Target->getBFloat16Align();
2134  }
2135  break;
2136  case BuiltinType::Float16:
2137  case BuiltinType::Half:
2138  if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2139  !getLangOpts().OpenMPIsDevice) {
2140  Width = Target->getHalfWidth();
2141  Align = Target->getHalfAlign();
2142  } else {
2143  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
2144  "Expected OpenMP device compilation.");
2145  Width = AuxTarget->getHalfWidth();
2146  Align = AuxTarget->getHalfAlign();
2147  }
2148  break;
2149  case BuiltinType::Float:
2150  Width = Target->getFloatWidth();
2151  Align = Target->getFloatAlign();
2152  break;
2153  case BuiltinType::Double:
2154  Width = Target->getDoubleWidth();
2155  Align = Target->getDoubleAlign();
2156  break;
2157  case BuiltinType::Ibm128:
2158  Width = Target->getIbm128Width();
2159  Align = Target->getIbm128Align();
2160  break;
2161  case BuiltinType::LongDouble:
2162  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
2163  (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2164  Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2165  Width = AuxTarget->getLongDoubleWidth();
2166  Align = AuxTarget->getLongDoubleAlign();
2167  } else {
2168  Width = Target->getLongDoubleWidth();
2169  Align = Target->getLongDoubleAlign();
2170  }
2171  break;
2172  case BuiltinType::Float128:
2173  if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2174  !getLangOpts().OpenMPIsDevice) {
2175  Width = Target->getFloat128Width();
2176  Align = Target->getFloat128Align();
2177  } else {
2178  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
2179  "Expected OpenMP device compilation.");
2180  Width = AuxTarget->getFloat128Width();
2181  Align = AuxTarget->getFloat128Align();
2182  }
2183  break;
2184  case BuiltinType::NullPtr:
2185  Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
2186  Align = Target->getPointerAlign(0); // == sizeof(void*)
2187  break;
2188  case BuiltinType::ObjCId:
2189  case BuiltinType::ObjCClass:
2190  case BuiltinType::ObjCSel:
2191  Width = Target->getPointerWidth(0);
2192  Align = Target->getPointerAlign(0);
2193  break;
2194  case BuiltinType::OCLSampler:
2195  case BuiltinType::OCLEvent:
2196  case BuiltinType::OCLClkEvent:
2197  case BuiltinType::OCLQueue:
2198  case BuiltinType::OCLReserveID:
2199 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2200  case BuiltinType::Id:
2201 #include "clang/Basic/OpenCLImageTypes.def"
2202 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2203  case BuiltinType::Id:
2204 #include "clang/Basic/OpenCLExtensionTypes.def"
2205  AS = getTargetAddressSpace(
2207  Width = Target->getPointerWidth(AS);
2208  Align = Target->getPointerAlign(AS);
2209  break;
2210  // The SVE types are effectively target-specific. The length of an
2211  // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2212  // of 128 bits. There is one predicate bit for each vector byte, so the
2213  // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2214  //
2215  // Because the length is only known at runtime, we use a dummy value
2216  // of 0 for the static length. The alignment values are those defined
2217  // by the Procedure Call Standard for the Arm Architecture.
2218 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
2219  IsSigned, IsFP, IsBF) \
2220  case BuiltinType::Id: \
2221  Width = 0; \
2222  Align = 128; \
2223  break;
2224 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
2225  case BuiltinType::Id: \
2226  Width = 0; \
2227  Align = 16; \
2228  break;
2229 #include "clang/Basic/AArch64SVEACLETypes.def"
2230 #define PPC_VECTOR_TYPE(Name, Id, Size) \
2231  case BuiltinType::Id: \
2232  Width = Size; \
2233  Align = Size; \
2234  break;
2235 #include "clang/Basic/PPCTypes.def"
2236 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2237  IsFP) \
2238  case BuiltinType::Id: \
2239  Width = 0; \
2240  Align = ElBits; \
2241  break;
2242 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2243  case BuiltinType::Id: \
2244  Width = 0; \
2245  Align = 8; \
2246  break;
2247 #include "clang/Basic/RISCVVTypes.def"
2248  }
2249  break;
2250  case Type::ObjCObjectPointer:
2251  Width = Target->getPointerWidth(0);
2252  Align = Target->getPointerAlign(0);
2253  break;
2254  case Type::BlockPointer:
2255  AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
2256  Width = Target->getPointerWidth(AS);
2257  Align = Target->getPointerAlign(AS);
2258  break;
2259  case Type::LValueReference:
2260  case Type::RValueReference:
2261  // alignof and sizeof should never enter this code path here, so we go
2262  // the pointer route.
2263  AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
2264  Width = Target->getPointerWidth(AS);
2265  Align = Target->getPointerAlign(AS);
2266  break;
2267  case Type::Pointer:
2268  AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
2269  Width = Target->getPointerWidth(AS);
2270  Align = Target->getPointerAlign(AS);
2271  break;
2272  case Type::MemberPointer: {
2273  const auto *MPT = cast<MemberPointerType>(T);
2274  CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2275  Width = MPI.Width;
2276  Align = MPI.Align;
2277  break;
2278  }
2279  case Type::Complex: {
2280  // Complex types have the same alignment as their elements, but twice the
2281  // size.
2282  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2283  Width = EltInfo.Width * 2;
2284  Align = EltInfo.Align;
2285  break;
2286  }
2287  case Type::ObjCObject:
2288  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2289  case Type::Adjusted:
2290  case Type::Decayed:
2291  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2292  case Type::ObjCInterface: {
2293  const auto *ObjCI = cast<ObjCInterfaceType>(T);
2294  if (ObjCI->getDecl()->isInvalidDecl()) {
2295  Width = 8;
2296  Align = 8;
2297  break;
2298  }
2299  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2300  Width = toBits(Layout.getSize());
2301  Align = toBits(Layout.getAlignment());
2302  break;
2303  }
2304  case Type::BitInt: {
2305  const auto *EIT = cast<BitIntType>(T);
2306  Align =
2307  std::min(static_cast<unsigned>(std::max(
2308  getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
2309  Target->getLongLongAlign());
2310  Width = llvm::alignTo(EIT->getNumBits(), Align);
2311  break;
2312  }
2313  case Type::Record:
2314  case Type::Enum: {
2315  const auto *TT = cast<TagType>(T);
2316 
2317  if (TT->getDecl()->isInvalidDecl()) {
2318  Width = 8;
2319  Align = 8;
2320  break;
2321  }
2322 
2323  if (const auto *ET = dyn_cast<EnumType>(TT)) {
2324  const EnumDecl *ED = ET->getDecl();
2325  TypeInfo Info =
2327  if (unsigned AttrAlign = ED->getMaxAlignment()) {
2328  Info.Align = AttrAlign;
2330  }
2331  return Info;
2332  }
2333 
2334  const auto *RT = cast<RecordType>(TT);
2335  const RecordDecl *RD = RT->getDecl();
2336  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2337  Width = toBits(Layout.getSize());
2338  Align = toBits(Layout.getAlignment());
2339  AlignRequirement = RD->hasAttr<AlignedAttr>()
2342  break;
2343  }
2344 
2345  case Type::SubstTemplateTypeParm:
2346  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2347  getReplacementType().getTypePtr());
2348 
2349  case Type::Auto:
2350  case Type::DeducedTemplateSpecialization: {
2351  const auto *A = cast<DeducedType>(T);
2352  assert(!A->getDeducedType().isNull() &&
2353  "cannot request the size of an undeduced or dependent auto type");
2354  return getTypeInfo(A->getDeducedType().getTypePtr());
2355  }
2356 
2357  case Type::Paren:
2358  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2359 
2360  case Type::MacroQualified:
2361  return getTypeInfo(
2362  cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
2363 
2364  case Type::ObjCTypeParam:
2365  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2366 
2367  case Type::Using:
2368  return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr());
2369 
2370  case Type::Typedef: {
2371  const auto *TT = cast<TypedefType>(T);
2372  TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr());
2373  // If the typedef has an aligned attribute on it, it overrides any computed
2374  // alignment we have. This violates the GCC documentation (which says that
2375  // attribute(aligned) can only round up) but matches its implementation.
2376  if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2377  Align = AttrAlign;
2378  AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2379  } else {
2380  Align = Info.Align;
2381  AlignRequirement = Info.AlignRequirement;
2382  }
2383  Width = Info.Width;
2384  break;
2385  }
2386 
2387  case Type::Elaborated:
2388  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2389 
2390  case Type::Attributed:
2391  return getTypeInfo(
2392  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2393 
2394  case Type::BTFTagAttributed:
2395  return getTypeInfo(
2396  cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
2397 
2398  case Type::Atomic: {
2399  // Start with the base type information.
2400  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2401  Width = Info.Width;
2402  Align = Info.Align;
2403 
2404  if (!Width) {
2405  // An otherwise zero-sized type should still generate an
2406  // atomic operation.
2407  Width = Target->getCharWidth();
2408  assert(Align);
2409  } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2410  // If the size of the type doesn't exceed the platform's max
2411  // atomic promotion width, make the size and alignment more
2412  // favorable to atomic operations:
2413 
2414  // Round the size up to a power of 2.
2415  if (!llvm::isPowerOf2_64(Width))
2416  Width = llvm::NextPowerOf2(Width);
2417 
2418  // Set the alignment equal to the size.
2419  Align = static_cast<unsigned>(Width);
2420  }
2421  }
2422  break;
2423 
2424  case Type::Pipe:
2427  break;
2428  }
2429 
2430  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2431  return TypeInfo(Width, Align, AlignRequirement);
2432 }
2433 
2434 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2435  UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2436  if (I != MemoizedUnadjustedAlign.end())
2437  return I->second;
2438 
2439  unsigned UnadjustedAlign;
2440  if (const auto *RT = T->getAs<RecordType>()) {
2441  const RecordDecl *RD = RT->getDecl();
2442  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2443  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2444  } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2445  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2446  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2447  } else {
2448  UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
2449  }
2450 
2451  MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2452  return UnadjustedAlign;
2453 }
2454 
2456  unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
2457  return SimdAlign;
2458 }
2459 
2460 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2462  return CharUnits::fromQuantity(BitSize / getCharWidth());
2463 }
2464 
2465 /// toBits - Convert a size in characters to a size in characters.
2467  return CharSize.getQuantity() * getCharWidth();
2468 }
2469 
2470 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2471 /// This method does not work on incomplete types.
2473  return getTypeInfoInChars(T).Width;
2474 }
2476  return getTypeInfoInChars(T).Width;
2477 }
2478 
2479 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2480 /// characters. This method does not work on incomplete types.
2482  return toCharUnitsFromBits(getTypeAlign(T));
2483 }
2485  return toCharUnitsFromBits(getTypeAlign(T));
2486 }
2487 
2488 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2489 /// type, in characters, before alignment adjustments. This method does
2490 /// not work on incomplete types.
2493 }
2496 }
2497 
2498 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2499 /// type for the current target in bits. This can be different than the ABI
2500 /// alignment in cases where it is beneficial for performance or backwards
2501 /// compatibility preserving to overalign a data type. (Note: despite the name,
2502 /// the preferred alignment is ABI-impacting, and not an optimization.)
2503 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2504  TypeInfo TI = getTypeInfo(T);
2505  unsigned ABIAlign = TI.Align;
2506 
2507  T = T->getBaseElementTypeUnsafe();
2508 
2509  // The preferred alignment of member pointers is that of a pointer.
2510  if (T->isMemberPointerType())
2511  return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
2512 
2513  if (!Target->allowsLargerPreferedTypeAlignment())
2514  return ABIAlign;
2515 
2516  if (const auto *RT = T->getAs<RecordType>()) {
2517  const RecordDecl *RD = RT->getDecl();
2518 
2519  // When used as part of a typedef, or together with a 'packed' attribute,
2520  // the 'aligned' attribute can be used to decrease alignment. Note that the
2521  // 'packed' case is already taken into consideration when computing the
2522  // alignment, we only need to handle the typedef case here.
2524  RD->isInvalidDecl())
2525  return ABIAlign;
2526 
2527  unsigned PreferredAlign = static_cast<unsigned>(
2528  toBits(getASTRecordLayout(RD).PreferredAlignment));
2529  assert(PreferredAlign >= ABIAlign &&
2530  "PreferredAlign should be at least as large as ABIAlign.");
2531  return PreferredAlign;
2532  }
2533 
2534  // Double (and, for targets supporting AIX `power` alignment, long double) and
2535  // long long should be naturally aligned (despite requiring less alignment) if
2536  // possible.
2537  if (const auto *CT = T->getAs<ComplexType>())
2538  T = CT->getElementType().getTypePtr();
2539  if (const auto *ET = T->getAs<EnumType>())
2540  T = ET->getDecl()->getIntegerType().getTypePtr();
2541  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2542  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2543  T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2544  (T->isSpecificBuiltinType(BuiltinType::LongDouble) &&
2545  Target->defaultsToAIXPowerAlignment()))
2546  // Don't increase the alignment if an alignment attribute was specified on a
2547  // typedef declaration.
2548  if (!TI.isAlignRequired())
2549  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2550 
2551  return ABIAlign;
2552 }
2553 
2554 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2555 /// for __attribute__((aligned)) on this target, to be used if no alignment
2556 /// value is specified.
2559 }
2560 
2561 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2562 /// to a global variable of the specified type.
2564  uint64_t TypeSize = getTypeSize(T.getTypePtr());
2565  return std::max(getPreferredTypeAlign(T),
2566  getTargetInfo().getMinGlobalAlign(TypeSize));
2567 }
2568 
2569 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2570 /// should be given to a global variable of the specified type.
2573 }
2574 
2577  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2578  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2579  Offset += Layout->getBaseClassOffset(Base);
2580  Layout = &getASTRecordLayout(Base);
2581  }
2582  return Offset;
2583 }
2584 
2586  const ValueDecl *MPD = MP.getMemberPointerDecl();
2589  bool DerivedMember = MP.isMemberPointerToDerivedMember();
2590  const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
2591  for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2592  const CXXRecordDecl *Base = RD;
2593  const CXXRecordDecl *Derived = Path[I];
2594  if (DerivedMember)
2595  std::swap(Base, Derived);
2597  RD = Path[I];
2598  }
2599  if (DerivedMember)
2601  return ThisAdjustment;
2602 }
2603 
2604 /// DeepCollectObjCIvars -
2605 /// This routine first collects all declared, but not synthesized, ivars in
2606 /// super class and then collects all ivars, including those synthesized for
2607 /// current class. This routine is used for implementation of current class
2608 /// when all ivars, declared and synthesized are known.
2610  bool leafClass,
2611  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2612  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2613  DeepCollectObjCIvars(SuperClass, false, Ivars);
2614  if (!leafClass) {
2615  llvm::append_range(Ivars, OI->ivars());
2616  } else {
2617  auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2618  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2619  Iv= Iv->getNextIvar())
2620  Ivars.push_back(Iv);
2621  }
2622 }
2623 
2624 /// CollectInheritedProtocols - Collect all protocols in current class and
2625 /// those inherited by it.
2628  if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2629  // We can use protocol_iterator here instead of
2630  // all_referenced_protocol_iterator since we are walking all categories.
2631  for (auto *Proto : OI->all_referenced_protocols()) {
2632  CollectInheritedProtocols(Proto, Protocols);
2633  }
2634 
2635  // Categories of this Interface.
2636  for (const auto *Cat : OI->visible_categories())
2637  CollectInheritedProtocols(Cat, Protocols);
2638 
2639  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2640  while (SD) {
2641  CollectInheritedProtocols(SD, Protocols);
2642  SD = SD->getSuperClass();
2643  }
2644  } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2645  for (auto *Proto : OC->protocols()) {
2646  CollectInheritedProtocols(Proto, Protocols);
2647  }
2648  } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2649  // Insert the protocol.
2650  if (!Protocols.insert(
2651  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2652  return;
2653 
2654  for (auto *Proto : OP->protocols())
2655  CollectInheritedProtocols(Proto, Protocols);
2656  }
2657 }
2658 
2660  const RecordDecl *RD) {
2661  assert(RD->isUnion() && "Must be union type");
2662  CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2663 
2664  for (const auto *Field : RD->fields()) {
2665  if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2666  return false;
2667  CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2668  if (FieldSize != UnionSize)
2669  return false;
2670  }
2671  return !RD->field_empty();
2672 }
2673 
2675  const ASTContext &Context,
2676  const clang::ASTRecordLayout & /*Layout*/) {
2677  return Context.getFieldOffset(Field);
2678 }
2679 
2681  const ASTContext &Context,
2682  const clang::ASTRecordLayout &Layout) {
2683  return Context.toBits(Layout.getBaseClassOffset(RD));
2684 }
2685 
2688  const RecordDecl *RD);
2689 
2691 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) {
2692  if (Field->getType()->isRecordType()) {
2693  const RecordDecl *RD = Field->getType()->getAsRecordDecl();
2694  if (!RD->isUnion())
2695  return structHasUniqueObjectRepresentations(Context, RD);
2696  }
2697 
2698  // A _BitInt type may not be unique if it has padding bits
2699  // but if it is a bitfield the padding bits are not used.
2700  bool IsBitIntType = Field->getType()->isBitIntType();
2701  if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2702  !Context.hasUniqueObjectRepresentations(Field->getType()))
2703  return llvm::None;
2704 
2705  int64_t FieldSizeInBits =
2706  Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2707  if (Field->isBitField()) {
2708  int64_t BitfieldSize = Field->getBitWidthValue(Context);
2709  if (IsBitIntType) {
2710  if ((unsigned)BitfieldSize >
2711  cast<BitIntType>(Field->getType())->getNumBits())
2712  return llvm::None;
2713  } else if (BitfieldSize > FieldSizeInBits) {
2714  return llvm::None;
2715  }
2716  FieldSizeInBits = BitfieldSize;
2717  } else if (IsBitIntType &&
2718  !Context.hasUniqueObjectRepresentations(Field->getType())) {
2719  return llvm::None;
2720  }
2721  return FieldSizeInBits;
2722 }
2723 
2726  return structHasUniqueObjectRepresentations(Context, RD);
2727 }
2728 
2729 template <typename RangeT>
2731  const RangeT &Subobjects, int64_t CurOffsetInBits,
2732  const ASTContext &Context, const clang::ASTRecordLayout &Layout) {
2733  for (const auto *Subobject : Subobjects) {
2734  llvm::Optional<int64_t> SizeInBits =
2735  getSubobjectSizeInBits(Subobject, Context);
2736  if (!SizeInBits)
2737  return llvm::None;
2738  if (*SizeInBits != 0) {
2739  int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2740  if (Offset != CurOffsetInBits)
2741  return llvm::None;
2742  CurOffsetInBits += *SizeInBits;
2743  }
2744  }
2745  return CurOffsetInBits;
2746 }
2747 
2750  const RecordDecl *RD) {
2751  assert(!RD->isUnion() && "Must be struct/class type");
2752  const auto &Layout = Context.getASTRecordLayout(RD);
2753 
2754  int64_t CurOffsetInBits = 0;
2755  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2756  if (ClassDecl->isDynamicClass())
2757  return llvm::None;
2758 
2760  for (const auto &Base : ClassDecl->bases()) {
2761  // Empty types can be inherited from, and non-empty types can potentially
2762  // have tail padding, so just make sure there isn't an error.
2763  Bases.emplace_back(Base.getType()->getAsCXXRecordDecl());
2764  }
2765 
2766  llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2767  return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
2768  });
2769 
2770  llvm::Optional<int64_t> OffsetAfterBases =
2771  structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits,
2772  Context, Layout);
2773  if (!OffsetAfterBases)
2774  return llvm::None;
2775  CurOffsetInBits = *OffsetAfterBases;
2776  }
2777 
2778  llvm::Optional<int64_t> OffsetAfterFields =
2780  RD->fields(), CurOffsetInBits, Context, Layout);
2781  if (!OffsetAfterFields)
2782  return llvm::None;
2783  CurOffsetInBits = *OffsetAfterFields;
2784 
2785  return CurOffsetInBits;
2786 }
2787 
2789  // C++17 [meta.unary.prop]:
2790  // The predicate condition for a template specialization
2791  // has_unique_object_representations<T> shall be
2792  // satisfied if and only if:
2793  // (9.1) - T is trivially copyable, and
2794  // (9.2) - any two objects of type T with the same value have the same
2795  // object representation, where two objects
2796  // of array or non-union class type are considered to have the same value
2797  // if their respective sequences of
2798  // direct subobjects have the same values, and two objects of union type
2799  // are considered to have the same
2800  // value if they have the same active member and the corresponding members
2801  // have the same value.
2802  // The set of scalar types for which this condition holds is
2803  // implementation-defined. [ Note: If a type has padding
2804  // bits, the condition does not hold; otherwise, the condition holds true
2805  // for unsigned integral types. -- end note ]
2806  assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2807 
2808  // Arrays are unique only if their element type is unique.
2809  if (Ty->isArrayType())
2811 
2812  // (9.1) - T is trivially copyable...
2813  if (!Ty.isTriviallyCopyableType(*this))
2814  return false;
2815 
2816  // All integrals and enums are unique.
2817  if (Ty->isIntegralOrEnumerationType()) {
2818  // Except _BitInt types that have padding bits.
2819  if (const auto *BIT = dyn_cast<BitIntType>(Ty))
2820  return getTypeSize(BIT) == BIT->getNumBits();
2821 
2822  return true;
2823  }
2824 
2825  // All other pointers are unique.
2826  if (Ty->isPointerType())
2827  return true;
2828 
2829  if (Ty->isMemberPointerType()) {
2830  const auto *MPT = Ty->getAs<MemberPointerType>();
2831  return !ABI->getMemberPointerInfo(MPT).HasPadding;
2832  }
2833 
2834  if (Ty->isRecordType()) {
2835  const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
2836 
2837  if (Record->isInvalidDecl())
2838  return false;
2839 
2840  if (Record->isUnion())
2841  return unionHasUniqueObjectRepresentations(*this, Record);
2842 
2843  Optional<int64_t> StructSize =
2844  structHasUniqueObjectRepresentations(*this, Record);
2845 
2846  return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty));
2847  }
2848 
2849  // FIXME: More cases to handle here (list by rsmith):
2850  // vectors (careful about, eg, vector of 3 foo)
2851  // _Complex int and friends
2852  // _Atomic T
2853  // Obj-C block pointers
2854  // Obj-C object pointers
2855  // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2856  // clk_event_t, queue_t, reserve_id_t)
2857  // There're also Obj-C class types and the Obj-C selector type, but I think it
2858  // makes sense for those to return false here.
2859 
2860  return false;
2861 }
2862 
2864  unsigned count = 0;
2865  // Count ivars declared in class extension.
2866  for (const auto *Ext : OI->known_extensions())
2867  count += Ext->ivar_size();
2868 
2869  // Count ivar defined in this class's implementation. This
2870  // includes synthesized ivars.
2871  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2872  count += ImplDecl->ivar_size();
2873 
2874  return count;
2875 }
2876 
2878  if (!E)
2879  return false;
2880 
2881  // nullptr_t is always treated as null.
2882  if (E->getType()->isNullPtrType()) return true;
2883 
2884  if (E->getType()->isAnyPointerType() &&
2887  return true;
2888 
2889  // Unfortunately, __null has type 'int'.
2890  if (isa<GNUNullExpr>(E)) return true;
2891 
2892  return false;
2893 }
2894 
2895 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2896 /// exists.
2898  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2899  I = ObjCImpls.find(D);
2900  if (I != ObjCImpls.end())
2901  return cast<ObjCImplementationDecl>(I->second);
2902  return nullptr;
2903 }
2904 
2905 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2906 /// exists.
2908  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2909  I = ObjCImpls.find(D);
2910  if (I != ObjCImpls.end())
2911  return cast<ObjCCategoryImplDecl>(I->second);
2912  return nullptr;
2913 }
2914 
2915 /// Set the implementation of ObjCInterfaceDecl.
2917  ObjCImplementationDecl *ImplD) {
2918  assert(IFaceD && ImplD && "Passed null params");
2919  ObjCImpls[IFaceD] = ImplD;
2920 }
2921 
2922 /// Set the implementation of ObjCCategoryDecl.
2924  ObjCCategoryImplDecl *ImplD) {
2925  assert(CatD && ImplD && "Passed null params");
2926  ObjCImpls[CatD] = ImplD;
2927 }
2928 
2929 const ObjCMethodDecl *
2931  return ObjCMethodRedecls.lookup(MD);
2932 }
2933 
2935  const ObjCMethodDecl *Redecl) {
2936  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2937  ObjCMethodRedecls[MD] = Redecl;
2938 }
2939 
2941  const NamedDecl *ND) const {
2942  if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2943  return ID;
2944  if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2945  return CD->getClassInterface();
2946  if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2947  return IMD->getClassInterface();
2948 
2949  return nullptr;
2950 }
2951 
2952 /// Get the copy initialization expression of VarDecl, or nullptr if
2953 /// none exists.
2955  assert(VD && "Passed null params");
2956  assert(VD->hasAttr<BlocksAttr>() &&
2957  "getBlockVarCopyInits - not __block var");
2958  auto I = BlockVarCopyInits.find(VD);
2959  if (I != BlockVarCopyInits.end())
2960  return I->second;
2961  return {nullptr, false};
2962 }
2963 
2964 /// Set the copy initialization expression of a block var decl.
2966  bool CanThrow) {
2967  assert(VD && CopyExpr && "Passed null params");
2968  assert(VD->hasAttr<BlocksAttr>() &&
2969  "setBlockVarCopyInits - not __block var");
2970  BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2971 }
2972 
2974  unsigned DataSize) const {
2975  if (!DataSize)
2976  DataSize = TypeLoc::getFullDataSizeForType(T);
2977  else
2978  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2979  "incorrect data size provided to CreateTypeSourceInfo!");
2980 
2981  auto *TInfo =
2982  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2983  new (TInfo) TypeSourceInfo(T);
2984  return TInfo;
2985 }
2986 
2988  SourceLocation L) const {
2990  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2991  return DI;
2992 }
2993 
2994 const ASTRecordLayout &
2996  return getObjCLayout(D, nullptr);
2997 }
2998 
2999 const ASTRecordLayout &
3001  const ObjCImplementationDecl *D) const {
3002  return getObjCLayout(D->getClassInterface(), D);
3003 }
3004 
3005 //===----------------------------------------------------------------------===//
3006 // Type creation/memoization methods
3007 //===----------------------------------------------------------------------===//
3008 
3009 QualType
3010 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3011  unsigned fastQuals = quals.getFastQualifiers();
3012  quals.removeFastQualifiers();
3013 
3014  // Check if we've already instantiated this type.
3015  llvm::FoldingSetNodeID ID;
3016  ExtQuals::Profile(ID, baseType, quals);
3017  void *insertPos = nullptr;
3018  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
3019  assert(eq->getQualifiers() == quals);
3020  return QualType(eq, fastQuals);
3021  }
3022 
3023  // If the base type is not canonical, make the appropriate canonical type.
3024  QualType canon;
3025  if (!baseType->isCanonicalUnqualified()) {
3026  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3027  canonSplit.Quals.addConsistentQualifiers(quals);
3028  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
3029 
3030  // Re-find the insert position.
3031  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
3032  }
3033 
3034  auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
3035  ExtQualNodes.InsertNode(eq, insertPos);
3036  return QualType(eq, fastQuals);
3037 }
3038 
3040  LangAS AddressSpace) const {
3041  QualType CanT = getCanonicalType(T);
3042  if (CanT.getAddressSpace() == AddressSpace)
3043  return T;
3044 
3045  // If we are composing extended qualifiers together, merge together
3046  // into one ExtQuals node.
3047  QualifierCollector Quals;
3048  const Type *TypeNode = Quals.strip(T);
3049 
3050  // If this type already has an address space specified, it cannot get
3051  // another one.
3052  assert(!Quals.hasAddressSpace() &&
3053  "Type cannot be in multiple addr spaces!");
3054  Quals.addAddressSpace(AddressSpace);
3055 
3056  return getExtQualType(TypeNode, Quals);
3057 }
3058 
3060  // If the type is not qualified with an address space, just return it
3061  // immediately.
3062  if (!T.hasAddressSpace())
3063  return T;
3064 
3065  // If we are composing extended qualifiers together, merge together
3066  // into one ExtQuals node.
3067  QualifierCollector Quals;
3068  const Type *TypeNode;
3069 
3070  while (T.hasAddressSpace()) {
3071  TypeNode = Quals.strip(T);
3072 
3073  // If the type no longer has an address space after stripping qualifiers,
3074  // jump out.
3075  if (!QualType(TypeNode, 0).hasAddressSpace())
3076  break;
3077 
3078  // There might be sugar in the way. Strip it and try again.
3079  T = T.getSingleStepDesugaredType(*this);
3080  }
3081 
3082  Quals.removeAddressSpace();
3083 
3084  // Removal of the address space can mean there are no longer any
3085  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3086  // or required.
3087  if (Quals.hasNonFastQualifiers())
3088  return getExtQualType(TypeNode, Quals);
3089  else
3090  return QualType(TypeNode, Quals.getFastQualifiers());
3091 }
3092 
3094  Qualifiers::GC GCAttr) const {
3095  QualType CanT = getCanonicalType(T);
3096  if (CanT.getObjCGCAttr() == GCAttr)
3097  return T;
3098 
3099  if (const auto *ptr = T->getAs<PointerType>()) {
3100  QualType Pointee = ptr->getPointeeType();
3101  if (Pointee->isAnyPointerType()) {
3102  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
3103  return getPointerType(ResultType);
3104  }
3105  }
3106 
3107  // If we are composing extended qualifiers together, merge together
3108  // into one ExtQuals node.
3109  QualifierCollector Quals;
3110  const Type *TypeNode = Quals.strip(T);
3111 
3112  // If this type already has an ObjCGC specified, it cannot get
3113  // another one.
3114  assert(!Quals.hasObjCGCAttr() &&
3115  "Type cannot have multiple ObjCGCs!");
3116  Quals.addObjCGCAttr(GCAttr);
3117 
3118  return getExtQualType(TypeNode, Quals);
3119 }
3120 
3122  if (const PointerType *Ptr = T->getAs<PointerType>()) {
3123  QualType Pointee = Ptr->getPointeeType();
3124  if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) {
3125  return getPointerType(removeAddrSpaceQualType(Pointee));
3126  }
3127  }
3128  return T;
3129 }
3130 
3132  FunctionType::ExtInfo Info) {
3133  if (T->getExtInfo() == Info)
3134  return T;
3135 
3136  QualType Result;
3137  if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
3138  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
3139  } else {
3140  const auto *FPT = cast<FunctionProtoType>(T);
3141  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3142  EPI.ExtInfo = Info;
3143  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
3144  }
3145 
3146  return cast<FunctionType>(Result.getTypePtr());
3147 }
3148 
3150  QualType ResultType) {
3151  FD = FD->getMostRecentDecl();
3152  while (true) {
3153  const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
3154  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3155  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
3156  if (FunctionDecl *Next = FD->getPreviousDecl())
3157  FD = Next;
3158  else
3159  break;
3160  }
3162  L->DeducedReturnType(FD, ResultType);
3163 }
3164 
3165 /// Get a function type and produce the equivalent function type with the
3166 /// specified exception specification. Type sugar that can be present on a
3167 /// declaration of a function with an exception specification is permitted
3168 /// and preserved. Other type sugar (for instance, typedefs) is not.
3170  QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3171  // Might have some parens.
3172  if (const auto *PT = dyn_cast<ParenType>(Orig))
3173  return getParenType(
3174  getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
3175 
3176  // Might be wrapped in a macro qualified type.
3177  if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig))
3178  return getMacroQualifiedType(
3179  getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI),
3180  MQT->getMacroIdentifier());
3181 
3182  // Might have a calling-convention attribute.
3183  if (const auto *AT = dyn_cast<AttributedType>(Orig))
3184  return getAttributedType(
3185  AT->getAttrKind(),
3186  getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
3187  getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
3188 
3189  // Anything else must be a function type. Rebuild it with the new exception
3190  // specification.
3191  const auto *Proto = Orig->castAs<FunctionProtoType>();
3192  return getFunctionType(
3193  Proto->getReturnType(), Proto->getParamTypes(),
3194  Proto->getExtProtoInfo().withExceptionSpec(ESI));
3195 }
3196 
3198  QualType U) const {
3199  return hasSameType(T, U) ||
3200  (getLangOpts().CPlusPlus17 &&
3203 }
3204 
3206  if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3207  QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3208  SmallVector<QualType, 16> Args(Proto->param_types().size());
3209  for (unsigned i = 0, n = Args.size(); i != n; ++i)
3210  Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]);
3211  return getFunctionType(RetTy, Args, Proto->getExtProtoInfo());
3212  }
3213 
3214  if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3215  QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3216  return getFunctionNoProtoType(RetTy, Proto->getExtInfo());
3217  }
3218 
3219  return T;
3220 }
3221 
3223  return hasSameType(T, U) ||
3226 }
3227 
3230  bool AsWritten) {
3231  // Update the type.
3232  QualType Updated =
3234  FD->setType(Updated);
3235 
3236  if (!AsWritten)
3237  return;
3238 
3239  // Update the type in the type source information too.
3240  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3241  // If the type and the type-as-written differ, we may need to update
3242  // the type-as-written too.
3243  if (TSInfo->getType() != FD->getType())
3244  Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
3245 
3246  // FIXME: When we get proper type location information for exceptions,
3247  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3248  // up the TypeSourceInfo;
3249  assert(TypeLoc::getFullDataSizeForType(Updated) ==
3250  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3251  "TypeLoc size mismatch from updating exception specification");
3252  TSInfo->overrideType(Updated);
3253  }
3254 }
3255 
3256 /// getComplexType - Return the uniqued reference to the type for a complex
3257 /// number with the specified element type.
3259  // Unique pointers, to guarantee there is only one pointer of a particular
3260  // structure.
3261  llvm::FoldingSetNodeID ID;
3263 
3264  void *InsertPos = nullptr;
3265  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3266  return QualType(CT, 0);
3267 
3268  // If the pointee type isn't canonical, this won't be a canonical type either,
3269  // so fill in the canonical type field.
3270  QualType Canonical;
3271  if (!T.isCanonical()) {
3272  Canonical = getComplexType(getCanonicalType(T));
3273 
3274  // Get the new insert position for the node we care about.
3275  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3276  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3277  }
3278  auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
3279  Types.push_back(New);
3280  ComplexTypes.InsertNode(New, InsertPos);
3281  return QualType(New, 0);
3282 }
3283 
3284 /// getPointerType - Return the uniqued reference to the type for a pointer to
3285 /// the specified type.
3287  // Unique pointers, to guarantee there is only one pointer of a particular
3288  // structure.
3289  llvm::FoldingSetNodeID ID;
3291 
3292  void *InsertPos = nullptr;
3293  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3294  return QualType(PT, 0);
3295 
3296  // If the pointee type isn't canonical, this won't be a canonical type either,
3297  // so fill in the canonical type field.
3298  QualType Canonical;
3299  if (!T.isCanonical()) {
3300  Canonical = getPointerType(getCanonicalType(T));
3301 
3302  // Get the new insert position for the node we care about.
3303  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3304  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3305  }
3306  auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
3307  Types.push_back(New);
3308  PointerTypes.InsertNode(New, InsertPos);
3309  return QualType(New, 0);
3310 }
3311 
3313  llvm::FoldingSetNodeID ID;
3314  AdjustedType::Profile(ID, Orig, New);
3315  void *InsertPos = nullptr;
3316  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3317  if (AT)
3318  return QualType(AT, 0);
3319 
3320  QualType Canonical = getCanonicalType(New);
3321 
3322  // Get the new insert position for the node we care about.
3323  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3324  assert(!AT && "Shouldn't be in the map!");
3325 
3326  AT = new (*this, TypeAlignment)
3327  AdjustedType(Type::Adjusted, Orig, New, Canonical);
3328  Types.push_back(AT);
3329  AdjustedTypes.InsertNode(AT, InsertPos);
3330  return QualType(AT, 0);
3331 }
3332 
3334  llvm::FoldingSetNodeID ID;
3335  AdjustedType::Profile(ID, Orig, Decayed);
3336  void *InsertPos = nullptr;
3337  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3338  if (AT)
3339  return QualType(AT, 0);
3340 
3341  QualType Canonical = getCanonicalType(Decayed);
3342 
3343  // Get the new insert position for the node we care about.
3344  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3345  assert(!AT && "Shouldn't be in the map!");
3346 
3347  AT = new (*this, TypeAlignment) DecayedType(Orig, Decayed, Canonical);
3348  Types.push_back(AT);
3349  AdjustedTypes.InsertNode(AT, InsertPos);
3350  return QualType(AT, 0);
3351 }
3352 
3354  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3355 
3356  QualType Decayed;
3357 
3358  // C99 6.7.5.3p7:
3359  // A declaration of a parameter as "array of type" shall be
3360  // adjusted to "qualified pointer to type", where the type
3361  // qualifiers (if any) are those specified within the [ and ] of
3362  // the array type derivation.
3363  if (T->isArrayType())
3364  Decayed = getArrayDecayedType(T);
3365 
3366  // C99 6.7.5.3p8:
3367  // A declaration of a parameter as "function returning type"
3368  // shall be adjusted to "pointer to function returning type", as
3369  // in 6.3.2.1.
3370  if (T->isFunctionType())
3371  Decayed = getPointerType(T);
3372 
3373  return getDecayedType(T, Decayed);
3374 }
3375 
3376 /// getBlockPointerType - Return the uniqued reference to the type for
3377 /// a pointer to the specified block.
3379  assert(T->isFunctionType() && "block of function types only");
3380  // Unique pointers, to guarantee there is only one block of a particular
3381  // structure.
3382  llvm::FoldingSetNodeID ID;
3384 
3385  void *InsertPos = nullptr;
3386  if (BlockPointerType *PT =
3387  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3388  return QualType(PT, 0);
3389 
3390  // If the block pointee type isn't canonical, this won't be a canonical
3391  // type either so fill in the canonical type field.
3392  QualType Canonical;
3393  if (!T.isCanonical()) {
3394  Canonical = getBlockPointerType(getCanonicalType(T));
3395 
3396  // Get the new insert position for the node we care about.
3397  BlockPointerType *NewIP =
3398  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3399  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3400  }
3401  auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
3402  Types.push_back(New);
3403  BlockPointerTypes.InsertNode(New, InsertPos);
3404  return QualType(New, 0);
3405 }
3406 
3407 /// getLValueReferenceType - Return the uniqued reference to the type for an
3408 /// lvalue reference to the specified type.
3409 QualType
3410 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3411  assert((!T->isPlaceholderType() ||
3412  T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3413  "Unresolved placeholder type");
3414 
3415  // Unique pointers, to guarantee there is only one pointer of a particular
3416  // structure.
3417  llvm::FoldingSetNodeID ID;
3418  ReferenceType::Profile(ID, T, SpelledAsLValue);
3419 
3420  void *InsertPos = nullptr;
3421  if (LValueReferenceType *RT =
3422  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3423  return QualType(RT, 0);
3424 
3425  const auto *InnerRef = T->getAs<ReferenceType>();
3426 
3427  // If the referencee type isn't canonical, this won't be a canonical type
3428  // either, so fill in the canonical type field.
3429  QualType Canonical;
3430  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
3431  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3432  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
3433 
3434  // Get the new insert position for the node we care about.
3435  LValueReferenceType *NewIP =
3436  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3437  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3438  }
3439 
3440  auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
3441  SpelledAsLValue);
3442  Types.push_back(New);
3443  LValueReferenceTypes.InsertNode(New, InsertPos);
3444 
3445  return QualType(New, 0);
3446 }
3447 
3448 /// getRValueReferenceType - Return the uniqued reference to the type for an
3449 /// rvalue reference to the specified type.
3451  assert((!T->isPlaceholderType() ||
3452  T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3453  "Unresolved placeholder type");
3454 
3455  // Unique pointers, to guarantee there is only one pointer of a particular
3456  // structure.
3457  llvm::FoldingSetNodeID ID;
3458  ReferenceType::Profile(ID, T, false);
3459 
3460  void *InsertPos = nullptr;
3461  if (RValueReferenceType *RT =
3462  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3463  return QualType(RT, 0);
3464 
3465  const auto *InnerRef = T->getAs<ReferenceType>();
3466 
3467  // If the referencee type isn't canonical, this won't be a canonical type
3468  // either, so fill in the canonical type field.
3469  QualType Canonical;
3470  if (InnerRef || !T.isCanonical()) {
3471  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3472  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3473 
3474  // Get the new insert position for the node we care about.
3475  RValueReferenceType *NewIP =
3476  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3477  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3478  }
3479 
3480  auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
3481  Types.push_back(New);
3482  RValueReferenceTypes.InsertNode(New, InsertPos);
3483  return QualType(New, 0);
3484 }
3485 
3486 /// getMemberPointerType - Return the uniqued reference to the type for a
3487 /// member pointer to the specified type, in the specified class.
3489  // Unique pointers, to guarantee there is only one pointer of a particular
3490  // structure.
3491  llvm::FoldingSetNodeID ID;
3492  MemberPointerType::Profile(ID, T, Cls);
3493 
3494  void *InsertPos = nullptr;
3495  if (MemberPointerType *PT =
3496  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3497  return QualType(PT, 0);
3498 
3499  // If the pointee or class type isn't canonical, this won't be a canonical
3500  // type either, so fill in the canonical type field.
3501  QualType Canonical;
3502  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3504 
3505  // Get the new insert position for the node we care about.
3506  MemberPointerType *NewIP =
3507  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3508  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3509  }
3510  auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
3511  Types.push_back(New);
3512  MemberPointerTypes.InsertNode(New, InsertPos);
3513  return QualType(New, 0);
3514 }
3515 
3516 /// getConstantArrayType - Return the unique reference to the type for an
3517 /// array of the specified element type.
3519  const llvm::APInt &ArySizeIn,
3520  const Expr *SizeExpr,
3522  unsigned IndexTypeQuals) const {
3523  assert((EltTy->isDependentType() ||
3524  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3525  "Constant array of VLAs is illegal!");
3526 
3527  // We only need the size as part of the type if it's instantiation-dependent.
3528  if (SizeExpr && !SizeExpr->isInstantiationDependent())
3529  SizeExpr = nullptr;
3530 
3531  // Convert the array size into a canonical width matching the pointer size for
3532  // the target.
3533  llvm::APInt ArySize(ArySizeIn);
3534  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3535 
3536  llvm::FoldingSetNodeID ID;
3537  ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM,
3538  IndexTypeQuals);
3539 
3540  void *InsertPos = nullptr;
3541  if (ConstantArrayType *ATP =
3542  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3543  return QualType(ATP, 0);
3544 
3545  // If the element type isn't canonical or has qualifiers, or the array bound
3546  // is instantiation-dependent, this won't be a canonical type either, so fill
3547  // in the canonical type field.
3548  QualType Canon;
3549  // FIXME: Check below should look for qualifiers behind sugar.
3550  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
3551  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3552  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr,
3553  ASM, IndexTypeQuals);
3554  Canon = getQualifiedType(Canon, canonSplit.Quals);
3555 
3556  // Get the new insert position for the node we care about.
3557  ConstantArrayType *NewIP =
3558  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3559  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3560  }
3561 
3562  void *Mem = Allocate(
3563  ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
3564  TypeAlignment);
3565  auto *New = new (Mem)
3566  ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
3567  ConstantArrayTypes.InsertNode(New, InsertPos);
3568  Types.push_back(New);
3569  return QualType(New, 0);
3570 }
3571 
3572 /// getVariableArrayDecayedType - Turns the given type, which may be
3573 /// variably-modified, into the corresponding type with all the known
3574 /// sizes replaced with [*].
3576  // Vastly most common case.
3577  if (!type->isVariablyModifiedType()) return type;
3578 
3579  QualType result;
3580 
3581  SplitQualType split = type.getSplitDesugaredType();
3582  const Type *ty = split.Ty;
3583  switch (ty->getTypeClass()) {
3584 #define TYPE(Class, Base)
3585 #define ABSTRACT_TYPE(Class, Base)
3586 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3587 #include "clang/AST/TypeNodes.inc"
3588  llvm_unreachable("didn't desugar past all non-canonical types?");
3589 
3590  // These types should never be variably-modified.
3591  case Type::Builtin:
3592  case Type::Complex:
3593  case Type::Vector:
3594  case Type::DependentVector:
3595  case Type::ExtVector:
3596  case Type::DependentSizedExtVector:
3597  case Type::ConstantMatrix:
3598  case Type::DependentSizedMatrix:
3599  case Type::DependentAddressSpace:
3600  case Type::ObjCObject:
3601  case Type::ObjCInterface:
3602  case Type::ObjCObjectPointer:
3603  case Type::Record:
3604  case Type::Enum:
3605  case Type::UnresolvedUsing:
3606  case Type::TypeOfExpr:
3607  case Type::TypeOf:
3608  case Type::Decltype:
3609  case Type::UnaryTransform:
3610  case Type::DependentName:
3611  case Type::InjectedClassName:
3612  case Type::TemplateSpecialization:
3613  case Type::DependentTemplateSpecialization:
3614  case Type::TemplateTypeParm:
3615  case Type::SubstTemplateTypeParmPack:
3616  case Type::Auto:
3617  case Type::DeducedTemplateSpecialization:
3618  case Type::PackExpansion:
3619  case Type::BitInt:
3620  case Type::DependentBitInt:
3621  llvm_unreachable("type should never be variably-modified");
3622 
3623  // These types can be variably-modified but should never need to
3624  // further decay.
3625  case Type::FunctionNoProto:
3626  case Type::FunctionProto:
3627  case Type::BlockPointer:
3628  case Type::MemberPointer:
3629  case Type::Pipe:
3630  return type;
3631 
3632  // These types can be variably-modified. All these modifications
3633  // preserve structure except as noted by comments.
3634  // TODO: if we ever care about optimizing VLAs, there are no-op
3635  // optimizations available here.
3636  case Type::Pointer:
3638  cast<PointerType>(ty)->getPointeeType()));
3639  break;
3640 
3641  case Type::LValueReference: {
3642  const auto *lv = cast<LValueReferenceType>(ty);
3643  result = getLValueReferenceType(
3644  getVariableArrayDecayedType(lv->getPointeeType()),
3645  lv->isSpelledAsLValue());
3646  break;
3647  }
3648 
3649  case Type::RValueReference: {
3650  const auto *lv = cast<RValueReferenceType>(ty);
3651  result = getRValueReferenceType(
3652  getVariableArrayDecayedType(lv->getPointeeType()));
3653  break;
3654  }
3655 
3656  case Type::Atomic: {
3657  const auto *at = cast<AtomicType>(ty);
3658  result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3659  break;
3660  }
3661 
3662  case Type::ConstantArray: {
3663  const auto *cat = cast<ConstantArrayType>(ty);
3664  result = getConstantArrayType(
3665  getVariableArrayDecayedType(cat->getElementType()),
3666  cat->getSize(),
3667  cat->getSizeExpr(),
3668  cat->getSizeModifier(),
3669  cat->getIndexTypeCVRQualifiers());
3670  break;
3671  }
3672 
3673  case Type::DependentSizedArray: {
3674  const auto *dat = cast<DependentSizedArrayType>(ty);
3675  result = getDependentSizedArrayType(
3676  getVariableArrayDecayedType(dat->getElementType()),
3677  dat->getSizeExpr(),
3678  dat->getSizeModifier(),
3679  dat->getIndexTypeCVRQualifiers(),
3680  dat->getBracketsRange());
3681  break;
3682  }
3683 
3684  // Turn incomplete types into [*] types.
3685  case Type::IncompleteArray: {
3686  const auto *iat = cast<IncompleteArrayType>(ty);
3687  result = getVariableArrayType(
3688  getVariableArrayDecayedType(iat->getElementType()),
3689  /*size*/ nullptr,
3691  iat->getIndexTypeCVRQualifiers(),
3692  SourceRange());
3693  break;
3694  }
3695 
3696  // Turn VLA types into [*] types.
3697  case Type::VariableArray: {
3698  const auto *vat = cast<VariableArrayType>(ty);
3699  result = getVariableArrayType(
3700  getVariableArrayDecayedType(vat->getElementType()),
3701  /*size*/ nullptr,
3703  vat->getIndexTypeCVRQualifiers(),
3704  vat->getBracketsRange());
3705  break;
3706  }
3707  }
3708 
3709  // Apply the top-level qualifiers from the original.
3710  return getQualifiedType(result, split.Quals);
3711 }
3712 
3713 /// getVariableArrayType - Returns a non-unique reference to the type for a
3714 /// variable array of the specified element type.
3716  Expr *NumElts,
3718  unsigned IndexTypeQuals,
3719  SourceRange Brackets) const {
3720  // Since we don't unique expressions, it isn't possible to unique VLA's
3721  // that have an expression provided for their size.
3722  QualType Canon;
3723 
3724  // Be sure to pull qualifiers off the element type.
3725  // FIXME: Check below should look for qualifiers behind sugar.
3726  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3727  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3728  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3729  IndexTypeQuals, Brackets);
3730  Canon = getQualifiedType(Canon, canonSplit.Quals);
3731  }
3732 
3733  auto *New = new (*this, TypeAlignment)
3734  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3735 
3736  VariableArrayTypes.push_back(New);
3737  Types.push_back(New);
3738  return QualType(New, 0);
3739 }
3740 
3741 /// getDependentSizedArrayType - Returns a non-unique reference to
3742 /// the type for a dependently-sized array of the specified element
3743 /// type.
3745  Expr *numElements,
3747  unsigned elementTypeQuals,
3748  SourceRange brackets) const {
3749  assert((!numElements || numElements->isTypeDependent() ||
3750  numElements->isValueDependent()) &&
3751  "Size must be type- or value-dependent!");
3752 
3753  // Dependently-sized array types that do not have a specified number
3754  // of elements will have their sizes deduced from a dependent
3755  // initializer. We do no canonicalization here at all, which is okay
3756  // because they can't be used in most locations.
3757  if (!numElements) {
3758  auto *newType
3759  = new (*this, TypeAlignment)
3760  DependentSizedArrayType(*this, elementType, QualType(),
3761  numElements, ASM, elementTypeQuals,
3762  brackets);
3763  Types.push_back(newType);
3764  return QualType(newType, 0);
3765  }
3766 
3767  // Otherwise, we actually build a new type every time, but we
3768  // also build a canonical type.
3769 
3770  SplitQualType canonElementType = getCanonicalType(elementType).split();
3771 
3772  void *insertPos = nullptr;
3773  llvm::FoldingSetNodeID ID;
3775  QualType(canonElementType.Ty, 0),
3776  ASM, elementTypeQuals, numElements);
3777 
3778  // Look for an existing type with these properties.
3779  DependentSizedArrayType *canonTy =
3780  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3781 
3782  // If we don't have one, build one.
3783  if (!canonTy) {
3784  canonTy = new (*this, TypeAlignment)
3785  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3786  QualType(), numElements, ASM, elementTypeQuals,
3787  brackets);
3788  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3789  Types.push_back(canonTy);
3790  }
3791 
3792  // Apply qualifiers from the element type to the array.
3793  QualType canon = getQualifiedType(QualType(canonTy,0),
3794  canonElementType.Quals);
3795 
3796  // If we didn't need extra canonicalization for the element type or the size
3797  // expression, then just use that as our result.
3798  if (QualType(canonElementType.Ty, 0) == elementType &&
3799  canonTy->getSizeExpr() == numElements)
3800  return canon;
3801 
3802  // Otherwise, we need to build a type which follows the spelling
3803  // of the element type.
3804  auto *sugaredType
3805  = new (*this, TypeAlignment)
3806  DependentSizedArrayType(*this, elementType, canon, numElements,
3807  ASM, elementTypeQuals, brackets);
3808  Types.push_back(sugaredType);
3809  return QualType(sugaredType, 0);
3810 }
3811 
3814  unsigned elementTypeQuals) const {
3815  llvm::FoldingSetNodeID ID;
3816  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3817 
3818  void *insertPos = nullptr;
3819  if (IncompleteArrayType *iat =
3820  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3821  return QualType(iat, 0);
3822 
3823  // If the element type isn't canonical, this won't be a canonical type
3824  // either, so fill in the canonical type field. We also have to pull
3825  // qualifiers off the element type.
3826  QualType canon;
3827 
3828  // FIXME: Check below should look for qualifiers behind sugar.
3829  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3830  SplitQualType canonSplit = getCanonicalType(elementType).split();
3831  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3832  ASM, elementTypeQuals);
3833  canon = getQualifiedType(canon, canonSplit.Quals);
3834 
3835  // Get the new insert position for the node we care about.
3836  IncompleteArrayType *existing =
3837  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3838  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3839  }
3840 
3841  auto *newType = new (*this, TypeAlignment)
3842  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3843 
3844  IncompleteArrayTypes.InsertNode(newType, insertPos);
3845  Types.push_back(newType);
3846  return QualType(newType, 0);
3847 }
3848 
3851 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
3852  {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
3853  NUMVECTORS};
3854 
3855 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
3856  {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
3857 
3858  switch (Ty->getKind()) {
3859  default:
3860  llvm_unreachable("Unsupported builtin vector type");
3861  case BuiltinType::SveInt8:
3862  return SVE_INT_ELTTY(8, 16, true, 1);
3863  case BuiltinType::SveUint8:
3864  return SVE_INT_ELTTY(8, 16, false, 1);
3865  case BuiltinType::SveInt8x2:
3866  return SVE_INT_ELTTY(8, 16, true, 2);
3867  case BuiltinType::SveUint8x2:
3868  return SVE_INT_ELTTY(8, 16, false, 2);
3869  case BuiltinType::SveInt8x3:
3870  return SVE_INT_ELTTY(8, 16, true, 3);
3871  case BuiltinType::SveUint8x3:
3872  return SVE_INT_ELTTY(8, 16, false, 3);
3873  case BuiltinType::SveInt8x4:
3874  return SVE_INT_ELTTY(8, 16, true, 4);
3875  case BuiltinType::SveUint8x4:
3876  return SVE_INT_ELTTY(8, 16, false, 4);
3877  case BuiltinType::SveInt16:
3878  return SVE_INT_ELTTY(16, 8, true, 1);
3879  case BuiltinType::SveUint16:
3880  return SVE_INT_ELTTY(16, 8, false, 1);
3881  case BuiltinType::SveInt16x2:
3882  return SVE_INT_ELTTY(16, 8, true, 2);
3883  case BuiltinType::SveUint16x2:
3884  return SVE_INT_ELTTY(16, 8, false, 2);
3885  case BuiltinType::SveInt16x3:
3886  return SVE_INT_ELTTY(16, 8, true, 3);
3887  case BuiltinType::SveUint16x3:
3888  return SVE_INT_ELTTY(16, 8, false, 3);
3889  case BuiltinType::SveInt16x4:
3890  return SVE_INT_ELTTY(16, 8, true, 4);
3891  case BuiltinType::SveUint16x4:
3892  return SVE_INT_ELTTY(16, 8, false, 4);
3893  case BuiltinType::SveInt32:
3894  return SVE_INT_ELTTY(32, 4, true, 1);
3895  case BuiltinType::SveUint32:
3896  return SVE_INT_ELTTY(32, 4, false, 1);
3897  case BuiltinType::SveInt32x2:
3898  return SVE_INT_ELTTY(32, 4, true, 2);
3899  case BuiltinType::SveUint32x2:
3900  return SVE_INT_ELTTY(32, 4, false, 2);
3901  case BuiltinType::SveInt32x3:
3902  return SVE_INT_ELTTY(32, 4, true, 3);
3903  case BuiltinType::SveUint32x3:
3904  return SVE_INT_ELTTY(32, 4, false, 3);
3905  case BuiltinType::SveInt32x4:
3906  return SVE_INT_ELTTY(32, 4, true, 4);
3907  case BuiltinType::SveUint32x4:
3908  return SVE_INT_ELTTY(32, 4, false, 4);
3909  case BuiltinType::SveInt64:
3910  return SVE_INT_ELTTY(64, 2, true, 1);
3911  case BuiltinType::SveUint64:
3912  return SVE_INT_ELTTY(64, 2, false, 1);
3913  case BuiltinType::SveInt64x2:
3914  return SVE_INT_ELTTY(64, 2, true, 2);
3915  case BuiltinType::SveUint64x2:
3916  return SVE_INT_ELTTY(64, 2, false, 2);
3917  case BuiltinType::SveInt64x3:
3918  return SVE_INT_ELTTY(64, 2, true, 3);
3919  case BuiltinType::SveUint64x3:
3920  return SVE_INT_ELTTY(64, 2, false, 3);
3921  case BuiltinType::SveInt64x4:
3922  return SVE_INT_ELTTY(64, 2, true, 4);
3923  case BuiltinType::SveUint64x4:
3924  return SVE_INT_ELTTY(64, 2, false, 4);
3925  case BuiltinType::SveBool:
3926  return SVE_ELTTY(BoolTy, 16, 1);
3927  case BuiltinType::SveFloat16:
3928  return SVE_ELTTY(HalfTy, 8, 1);
3929  case BuiltinType::SveFloat16x2:
3930  return SVE_ELTTY(HalfTy, 8, 2);
3931  case BuiltinType::SveFloat16x3:
3932  return SVE_ELTTY(HalfTy, 8, 3);
3933  case BuiltinType::SveFloat16x4:
3934  return SVE_ELTTY(HalfTy, 8, 4);
3935  case BuiltinType::SveFloat32:
3936  return SVE_ELTTY(FloatTy, 4, 1);
3937  case BuiltinType::SveFloat32x2:
3938  return SVE_ELTTY(FloatTy, 4, 2);
3939  case BuiltinType::SveFloat32x3:
3940  return SVE_ELTTY(FloatTy, 4, 3);
3941  case BuiltinType::SveFloat32x4:
3942  return SVE_ELTTY(FloatTy, 4, 4);
3943  case BuiltinType::SveFloat64:
3944  return SVE_ELTTY(DoubleTy, 2, 1);
3945  case BuiltinType::SveFloat64x2:
3946  return SVE_ELTTY(DoubleTy, 2, 2);
3947  case BuiltinType::SveFloat64x3:
3948  return SVE_ELTTY(DoubleTy, 2, 3);
3949  case BuiltinType::SveFloat64x4:
3950  return SVE_ELTTY(DoubleTy, 2, 4);
3951  case BuiltinType::SveBFloat16:
3952  return SVE_ELTTY(BFloat16Ty, 8, 1);
3953  case BuiltinType::SveBFloat16x2:
3954  return SVE_ELTTY(BFloat16Ty, 8, 2);
3955  case BuiltinType::SveBFloat16x3:
3956  return SVE_ELTTY(BFloat16Ty, 8, 3);
3957  case BuiltinType::SveBFloat16x4:
3958  return SVE_ELTTY(BFloat16Ty, 8, 4);
3959 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
3960  IsSigned) \
3961  case BuiltinType::Id: \
3962  return {getIntTypeForBitwidth(ElBits, IsSigned), \
3963  llvm::ElementCount::getScalable(NumEls), NF};
3964 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
3965  case BuiltinType::Id: \
3966  return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
3967  llvm::ElementCount::getScalable(NumEls), NF};
3968 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
3969  case BuiltinType::Id: \
3970  return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
3971 #include "clang/Basic/RISCVVTypes.def"
3972  }
3973 }
3974 
3975 /// getScalableVectorType - Return the unique reference to a scalable vector
3976 /// type of the specified element type and size. VectorType must be a built-in
3977 /// type.
3979  unsigned NumElts) const {
3980  if (Target->hasAArch64SVETypes()) {
3981  uint64_t EltTySize = getTypeSize(EltTy);
3982 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
3983  IsSigned, IsFP, IsBF) \
3984  if (!EltTy->isBooleanType() && \
3985  ((EltTy->hasIntegerRepresentation() && \
3986  EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
3987  (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
3988  IsFP && !IsBF) || \
3989  (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
3990  IsBF && !IsFP)) && \
3991  EltTySize == ElBits && NumElts == NumEls) { \
3992  return SingletonId; \
3993  }
3994 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
3995  if (EltTy->isBooleanType() && NumElts == NumEls) \
3996  return SingletonId;
3997 #include "clang/Basic/AArch64SVEACLETypes.def"
3998  } else if (Target->hasRISCVVTypes()) {
3999  uint64_t EltTySize = getTypeSize(EltTy);
4000 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4001  IsFP) \
4002  if (!EltTy->isBooleanType() && \
4003  ((EltTy->hasIntegerRepresentation() && \
4004  EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4005  (EltTy->hasFloatingRepresentation() && IsFP)) && \
4006  EltTySize == ElBits && NumElts == NumEls) \
4007  return SingletonId;
4008 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4009  if (EltTy->isBooleanType() && NumElts == NumEls) \
4010  return SingletonId;
4011 #include "clang/Basic/RISCVVTypes.def"
4012  }
4013  return QualType();
4014 }
4015 
4016 /// getVectorType - Return the unique reference to a vector type of
4017 /// the specified element type and size. VectorType must be a built-in type.
4018 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4019  VectorType::VectorKind VecKind) const {
4020  assert(vecType->isBuiltinType() ||
4021  (vecType->isBitIntType() &&
4022  // Only support _BitInt elements with byte-sized power of 2 NumBits.
4023  llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) &&
4024  vecType->getAs<BitIntType>()->getNumBits() >= 8));
4025 
4026  // Check if we've already instantiated a vector of this type.
4027  llvm::FoldingSetNodeID ID;
4028  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
4029 
4030  void *InsertPos = nullptr;
4031  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4032  return QualType(VTP, 0);
4033 
4034  // If the element type isn't canonical, this won't be a canonical type either,
4035  // so fill in the canonical type field.
4036  QualType Canonical;
4037  if (!vecType.isCanonical()) {
4038  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
4039 
4040  // Get the new insert position for the node we care about.
4041  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4042  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4043  }
4044  auto *New = new (*this, TypeAlignment)
4045  VectorType(vecType, NumElts, Canonical, VecKind);
4046  VectorTypes.InsertNode(New, InsertPos);
4047  Types.push_back(New);
4048  return QualType(New, 0);
4049 }
4050 
4051 QualType
4053  SourceLocation AttrLoc,
4054  VectorType::VectorKind VecKind) const {
4055  llvm::FoldingSetNodeID ID;
4056  DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
4057  VecKind);
4058  void *InsertPos = nullptr;
4059  DependentVectorType *Canon =
4060  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4061  DependentVectorType *New;
4062 
4063  if (Canon) {
4064  New = new (*this, TypeAlignment) DependentVectorType(
4065  *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4066  } else {
4067  QualType CanonVecTy = getCanonicalType(VecType);
4068  if (CanonVecTy == VecType) {
4069  New = new (*this, TypeAlignment) DependentVectorType(
4070  *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4071 
4072  DependentVectorType *CanonCheck =
4073  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4074  assert(!CanonCheck &&
4075  "Dependent-sized vector_size canonical type broken");
4076  (void)CanonCheck;
4077  DependentVectorTypes.InsertNode(New, InsertPos);
4078  } else {
4079  QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
4080  SourceLocation(), VecKind);
4081  New = new (*this, TypeAlignment) DependentVectorType(
4082  *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4083  }
4084  }
4085 
4086  Types.push_back(New);
4087  return QualType(New, 0);
4088 }
4089 
4090 /// getExtVectorType - Return the unique reference to an extended vector type of
4091 /// the specified element type and size. VectorType must be a built-in type.
4093  unsigned NumElts) const {
4094  assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4095  (vecType->isBitIntType() &&
4096  // Only support _BitInt elements with byte-sized power of 2 NumBits.
4097  llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) &&
4098  vecType->getAs<BitIntType>()->getNumBits() >= 8));
4099 
4100  // Check if we've already instantiated a vector of this type.
4101  llvm::FoldingSetNodeID ID;
4102  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
4104  void *InsertPos = nullptr;
4105  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4106  return QualType(VTP, 0);
4107 
4108  // If the element type isn't canonical, this won't be a canonical type either,
4109  // so fill in the canonical type field.
4110  QualType Canonical;
4111  if (!vecType.isCanonical()) {
4112  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
4113 
4114  // Get the new insert position for the node we care about.
4115  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4116  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4117  }
4118  auto *New = new (*this, TypeAlignment)
4119  ExtVectorType(vecType, NumElts, Canonical);
4120  VectorTypes.InsertNode(New, InsertPos);
4121  Types.push_back(New);
4122  return QualType(New, 0);
4123 }
4124 
4125 QualType
4127  Expr *SizeExpr,
4128  SourceLocation AttrLoc) const {
4129  llvm::FoldingSetNodeID ID;
4131  SizeExpr);
4132 
4133  void *InsertPos = nullptr;
4135  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4137  if (Canon) {
4138  // We already have a canonical version of this array type; use it as
4139  // the canonical type for a newly-built type.
4140  New = new (*this, TypeAlignment)
4141  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
4142  SizeExpr, AttrLoc);
4143  } else {
4144  QualType CanonVecTy = getCanonicalType(vecType);
4145  if (CanonVecTy == vecType) {
4146  New = new (*this, TypeAlignment)
4147  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
4148  AttrLoc);
4149 
4150  DependentSizedExtVectorType *CanonCheck
4151  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4152  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4153  (void)CanonCheck;
4154  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
4155  } else {
4156  QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
4157  SourceLocation());
4158  New = new (*this, TypeAlignment) DependentSizedExtVectorType(
4159  *this, vecType, CanonExtTy, SizeExpr, AttrLoc);
4160  }
4161  }
4162 
4163  Types.push_back(New);
4164  return QualType(New, 0);
4165 }
4166 
4168  unsigned NumColumns) const {
4169  llvm::FoldingSetNodeID ID;
4170  ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
4171  Type::ConstantMatrix);
4172 
4173  assert(MatrixType::isValidElementType(ElementTy) &&
4174  "need a valid element type");
4175  assert(ConstantMatrixType::isDimensionValid(NumRows) &&
4177  "need valid matrix dimensions");
4178  void *InsertPos = nullptr;
4179  if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4180  return QualType(MTP, 0);
4181 
4182  QualType Canonical;
4183  if (!ElementTy.isCanonical()) {
4184  Canonical =
4185  getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
4186 
4187  ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4188  assert(!NewIP && "Matrix type shouldn't already exist in the map");
4189  (void)NewIP;
4190  }
4191 
4192  auto *New = new (*this, TypeAlignment)
4193  ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4194  MatrixTypes.InsertNode(New, InsertPos);
4195  Types.push_back(New);
4196  return QualType(New, 0);
4197 }
4198 
4200  Expr *RowExpr,
4201  Expr *ColumnExpr,
4202  SourceLocation AttrLoc) const {
4203  QualType CanonElementTy = getCanonicalType(ElementTy);
4204  llvm::FoldingSetNodeID ID;
4205  DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
4206  ColumnExpr);
4207 
4208  void *InsertPos = nullptr;
4209  DependentSizedMatrixType *Canon =
4210  DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4211 
4212  if (!Canon) {
4213  Canon = new (*this, TypeAlignment) DependentSizedMatrixType(
4214  *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc);
4215 #ifndef NDEBUG
4216  DependentSizedMatrixType *CanonCheck =
4217  DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4218  assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4219 #endif
4220  DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
4221  Types.push_back(Canon);
4222  }
4223 
4224  // Already have a canonical version of the matrix type
4225  //
4226  // If it exactly matches the requested type, use it directly.
4227  if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4228  Canon->getRowExpr() == ColumnExpr)
4229  return QualType(Canon, 0);
4230 
4231  // Use Canon as the canonical type for newly-built type.
4232  DependentSizedMatrixType *New = new (*this, TypeAlignment)
4233  DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr,
4234  ColumnExpr, AttrLoc);
4235  Types.push_back(New);
4236  return QualType(New, 0);
4237 }
4238 
4240  Expr *AddrSpaceExpr,
4241  SourceLocation AttrLoc) const {
4242  assert(AddrSpaceExpr->isInstantiationDependent());
4243 
4244  QualType canonPointeeType = getCanonicalType(PointeeType);
4245 
4246  void *insertPos = nullptr;
4247  llvm::FoldingSetNodeID ID;
4248  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
4249  AddrSpaceExpr);
4250 
4251  DependentAddressSpaceType *canonTy =
4252  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
4253 
4254  if (!canonTy) {
4255  canonTy = new (*this, TypeAlignment)
4256  DependentAddressSpaceType(*this, canonPointeeType,
4257  QualType(), AddrSpaceExpr, AttrLoc);
4258  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
4259  Types.push_back(canonTy);
4260  }
4261 
4262  if (canonPointeeType == PointeeType &&
4263  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4264  return QualType(canonTy, 0);
4265 
4266  auto *sugaredType
4267  = new (*this, TypeAlignment)
4268  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
4269  AddrSpaceExpr, AttrLoc);
4270  Types.push_back(sugaredType);
4271  return QualType(sugaredType, 0);
4272 }
4273 
4274 /// Determine whether \p T is canonical as the result type of a function.
4276  return T.isCanonical() &&
4279 }
4280 
4281 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4282 QualType
4284  const FunctionType::ExtInfo &Info) const {
4285  // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4286  // functionality creates a function without a prototype regardless of
4287  // language mode (so it makes them even in C++). Once the rewriter has been
4288  // fixed, this assertion can be enabled again.
4289  //assert(!LangOpts.requiresStrictPrototypes() &&
4290  // "strict prototypes are disabled");
4291 
4292  // Unique functions, to guarantee there is only one function of a particular
4293  // structure.
4294  llvm::FoldingSetNodeID ID;
4295  FunctionNoProtoType::Profile(ID, ResultTy, Info);
4296 
4297  void *InsertPos = nullptr;
4298  if (FunctionNoProtoType *FT =
4299  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4300  return QualType(FT, 0);
4301 
4302  QualType Canonical;
4303  if (!isCanonicalResultType(ResultTy)) {
4304  Canonical =
4306 
4307  // Get the new insert position for the node we care about.
4308  FunctionNoProtoType *NewIP =
4309  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4310  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4311  }
4312 
4313  auto *New = new (*this, TypeAlignment)
4314  FunctionNoProtoType(ResultTy, Canonical, Info);
4315  Types.push_back(New);
4316  FunctionNoProtoTypes.InsertNode(New, InsertPos);
4317  return QualType(New, 0);
4318 }
4319 
4322  CanQualType CanResultType = getCanonicalType(ResultType);
4323 
4324  // Canonical result types do not have ARC lifetime qualifiers.
4325  if (CanResultType.getQualifiers().hasObjCLifetime()) {
4326  Qualifiers Qs = CanResultType.getQualifiers();
4327  Qs.removeObjCLifetime();
4329  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
4330  }
4331 
4332  return CanResultType;
4333 }
4334 
4336  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4337  if (ESI.Type == EST_None)
4338  return true;
4339  if (!NoexceptInType)
4340  return false;
4341 
4342  // C++17 onwards: exception specification is part of the type, as a simple
4343  // boolean "can this function type throw".
4344  if (ESI.Type == EST_BasicNoexcept)
4345  return true;
4346 
4347  // A noexcept(expr) specification is (possibly) canonical if expr is
4348  // value-dependent.
4349  if (ESI.Type == EST_DependentNoexcept)
4350  return true;
4351 
4352  // A dynamic exception specification is canonical if it only contains pack
4353  // expansions (so we can't tell whether it's non-throwing) and all its
4354  // contained types are canonical.
4355  if (ESI.Type == EST_Dynamic) {
4356  bool AnyPackExpansions = false;
4357  for (QualType ET : ESI.Exceptions) {
4358  if (!ET.isCanonical())
4359  return false;
4360  if (ET->getAs<PackExpansionType>())
4361  AnyPackExpansions = true;
4362  }
4363  return AnyPackExpansions;
4364  }
4365 
4366  return false;
4367 }
4368 
4369 QualType ASTContext::getFunctionTypeInternal(
4370  QualType ResultTy, ArrayRef<QualType> ArgArray,
4371  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4372  size_t NumArgs = ArgArray.size();
4373 
4374  // Unique functions, to guarantee there is only one function of a particular
4375  // structure.
4376  llvm::FoldingSetNodeID ID;
4377  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
4378  *this, true);
4379 
4380  QualType Canonical;
4381  bool Unique = false;
4382 
4383  void *InsertPos = nullptr;
4384  if (FunctionProtoType *FPT =
4385  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4386  QualType Existing = QualType(FPT, 0);
4387 
4388  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4389  // it so long as our exception specification doesn't contain a dependent
4390  // noexcept expression, or we're just looking for a canonical type.
4391  // Otherwise, we're going to need to create a type
4392  // sugar node to hold the concrete expression.
4393  if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
4394  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4395  return Existing;
4396 
4397  // We need a new type sugar node for this one, to hold the new noexcept
4398  // expression. We do no canonicalization here, but that's OK since we don't
4399  // expect to see the same noexcept expression much more than once.
4400  Canonical = getCanonicalType(Existing);
4401  Unique = true;
4402  }
4403 
4404  bool NoexceptInType = getLangOpts().CPlusPlus17;
4405  bool IsCanonicalExceptionSpec =
4406  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
4407 
4408  // Determine whether the type being created is already canonical or not.
4409  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
4410  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
4411  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
4412  if (!ArgArray[i].isCanonicalAsParam())
4413  isCanonical = false;
4414 
4415  if (OnlyWantCanonical)
4416  assert(isCanonical &&
4417  "given non-canonical parameters constructing canonical type");
4418 
4419  // If this type isn't canonical, get the canonical version of it if we don't
4420  // already have it. The exception spec is only partially part of the
4421  // canonical type, and only in C++17 onwards.
4422  if (!isCanonical && Canonical.isNull()) {
4423  SmallVector<QualType, 16> CanonicalArgs;
4424  CanonicalArgs.reserve(NumArgs);
4425  for (unsigned i = 0; i != NumArgs; ++i)
4426  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
4427 
4428  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
4429  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
4430  CanonicalEPI.HasTrailingReturn = false;
4431 
4432  if (IsCanonicalExceptionSpec) {
4433  // Exception spec is already OK.
4434  } else if (NoexceptInType) {
4435  switch (EPI.ExceptionSpec.Type) {
4437  // We don't know yet. It shouldn't matter what we pick here; no-one
4438  // should ever look at this.
4439  [[fallthrough]];
4440  case EST_None: case EST_MSAny: case EST_NoexceptFalse:
4441  CanonicalEPI.ExceptionSpec.Type = EST_None;
4442  break;
4443 
4444  // A dynamic exception specification is almost always "not noexcept",
4445  // with the exception that a pack expansion might expand to no types.
4446  case EST_Dynamic: {
4447  bool AnyPacks = false;
4448  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
4449  if (ET->getAs<PackExpansionType>())
4450  AnyPacks = true;
4451  ExceptionTypeStorage.push_back(getCanonicalType(ET));
4452  }
4453  if (!AnyPacks)
4454  CanonicalEPI.ExceptionSpec.Type = EST_None;
4455  else {
4456  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
4457  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
4458  }
4459  break;
4460  }
4461 
4462  case EST_DynamicNone:
4463  case EST_BasicNoexcept:
4464  case EST_NoexceptTrue:
4465  case EST_NoThrow:
4466  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
4467  break;
4468 
4469  case EST_DependentNoexcept:
4470  llvm_unreachable("dependent noexcept is already canonical");
4471  }
4472  } else {
4474  }
4475 
4476  // Adjust the canonical function result type.
4477  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
4478  Canonical =
4479  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
4480 
4481  // Get the new insert position for the node we care about.
4482  FunctionProtoType *NewIP =
4483  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4484  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4485  }
4486 
4487  // Compute the needed size to hold this FunctionProtoType and the
4488  // various trailing objects.
4489  auto ESH = FunctionProtoType::getExceptionSpecSize(
4490  EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
4491  size_t Size = FunctionProtoType::totalSizeToAlloc<
4494  FunctionProtoType::ExtParameterInfo, Qualifiers>(
4496  ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
4497  EPI.ExtParameterInfos ? NumArgs : 0,
4498  EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
4499 
4500  auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
4501  FunctionProtoType::ExtProtoInfo newEPI = EPI;
4502  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
4503  Types.push_back(FTP);
4504  if (!Unique)
4505  FunctionProtoTypes.InsertNode(FTP, InsertPos);
4506  return QualType(FTP, 0);
4507 }
4508 
4509 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
4510  llvm::FoldingSetNodeID ID;
4511  PipeType::Profile(ID, T, ReadOnly);
4512 
4513  void *InsertPos = nullptr;
4514  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
4515  return QualType(PT, 0);
4516 
4517  // If the pipe element type isn't canonical, this won't be a canonical type
4518  // either, so fill in the canonical type field.
4519  QualType Canonical;
4520  if (!T.isCanonical()) {
4521  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
4522 
4523  // Get the new insert position for the node we care about.
4524  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
4525  assert(!NewIP && "Shouldn't be in the map!");
4526  (void)NewIP;
4527  }
4528  auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
4529  Types.push_back(New);
4530  PipeTypes.InsertNode(New, InsertPos);
4531  return QualType(New, 0);
4532 }
4533 
4535  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
4536  return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
4537  : Ty;
4538 }
4539 
4541  return getPipeType(T, true);
4542 }
4543 
4545  return getPipeType(T, false);
4546 }
4547 
4548 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
4549  llvm::FoldingSetNodeID ID;
4550  BitIntType::Profile(ID, IsUnsigned, NumBits);
4551 
4552  void *InsertPos = nullptr;
4553  if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4554  return QualType(EIT, 0);
4555 
4556  auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits);
4557  BitIntTypes.InsertNode(New, InsertPos);
4558  Types.push_back(New);
4559  return QualType(New, 0);
4560 }
4561 
4563  Expr *NumBitsExpr) const {
4564  assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
4565  llvm::FoldingSetNodeID ID;
4566  DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
4567 
4568  void *InsertPos = nullptr;
4569  if (DependentBitIntType *Existing =
4570  DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4571  return QualType(Existing, 0);
4572 
4573  auto *New = new (*this, TypeAlignment)
4574  DependentBitIntType(*this, IsUnsigned, NumBitsExpr);
4575  DependentBitIntTypes.InsertNode(New, InsertPos);
4576 
4577  Types.push_back(New);
4578  return QualType(New, 0);
4579 }
4580 
4581 #ifndef NDEBUG
4583  if (!isa<CXXRecordDecl>(D)) return false;
4584  const auto *RD = cast<CXXRecordDecl>(D);
4585  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
4586  return true;
4587  if (RD->getDescribedClassTemplate() &&
4588  !isa<ClassTemplateSpecializationDecl>(RD))
4589  return true;
4590  return false;
4591 }
4592 #endif
4593 
4594 /// getInjectedClassNameType - Return the unique reference to the
4595 /// injected class name type for the specified templated declaration.
4597  QualType TST) const {
4599  if (Decl->TypeForDecl) {
4600  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
4601  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
4602  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
4603  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4604  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
4605  } else {
4606  Type *newType =
4607  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
4608  Decl->TypeForDecl = newType;
4609  Types.push_back(newType);
4610  }
4611  return QualType(Decl->TypeForDecl, 0);
4612 }
4613 
4614 /// getTypeDeclType - Return the unique reference to the type for the
4615 /// specified type declaration.
4616 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
4617  assert(Decl && "Passed null for Decl param");
4618  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
4619 
4620  if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
4621  return getTypedefType(Typedef);
4622 
4623  assert(!isa<TemplateTypeParmDecl>(Decl) &&
4624  "Template type parameter types are always available.");
4625 
4626  if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
4627  assert(Record->isFirstDecl() && "struct/union has previous declaration");
4628  assert(!NeedsInjectedClassNameType(Record));
4629  return getRecordType(Record);
4630  } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
4631  assert(Enum->isFirstDecl() && "enum has previous declaration");
4632  return getEnumType(Enum);
4633  } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
4634  return getUnresolvedUsingType(Using);
4635  } else
4636  llvm_unreachable("TypeDecl without a type?");
4637 
4638  return QualType(Decl->TypeForDecl, 0);
4639 }
4640 
4641 /// getTypedefType - Return the unique reference to the type for the
4642 /// specified typedef name decl.
4644  QualType Underlying) const {
4645  if (!Decl->TypeForDecl) {
4646  if (Underlying.isNull())
4647  Underlying = Decl->getUnderlyingType();
4648  auto *NewType = new (*this, TypeAlignment) TypedefType(
4649  Type::Typedef, Decl, QualType(), getCanonicalType(Underlying));
4650  Decl->TypeForDecl = NewType;
4651  Types.push_back(NewType);
4652  return QualType(NewType, 0);
4653  }
4654  if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
4655  return QualType(Decl->TypeForDecl, 0);
4656  assert(hasSameType(Decl->getUnderlyingType(), Underlying));
4657 
4658  llvm::FoldingSetNodeID ID;
4659  TypedefType::Profile(ID, Decl, Underlying);
4660 
4661  void *InsertPos = nullptr;
4662  if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4663  assert(!T->typeMatchesDecl() &&
4664  "non-divergent case should be handled with TypeDecl");
4665  return QualType(T, 0);
4666  }
4667 
4668  void *Mem =
4669  Allocate(TypedefType::totalSizeToAlloc<QualType>(true), TypeAlignment);
4670  auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
4671  getCanonicalType(Underlying));
4672  TypedefTypes.InsertNode(NewType, InsertPos);
4673  Types.push_back(NewType);
4674  return QualType(NewType, 0);
4675 }
4676 
4678  QualType Underlying) const {
4679  llvm::FoldingSetNodeID ID;
4680  UsingType::Profile(ID, Found, Underlying);
4681 
4682  void *InsertPos = nullptr;
4683  if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
4684  return QualType(T, 0);
4685 
4686  const Type *TypeForDecl =
4687  cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl();
4688 
4689  assert(!Underlying.hasLocalQualifiers());
4690  QualType Canon = Underlying->getCanonicalTypeInternal();
4691  assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
4692 
4693  if (Underlying.getTypePtr() == TypeForDecl)
4694  Underlying = QualType();
4695  void *Mem =
4696  Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()),
4697  TypeAlignment);
4698  UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
4699  Types.push_back(NewType);
4700  UsingTypes.InsertNode(NewType, InsertPos);
4701  return QualType(NewType, 0);
4702 }
4703 
4705  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4706 
4707  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
4708  if (PrevDecl->TypeForDecl)
4709  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4710 
4711  auto *newType = new (*this, TypeAlignment) RecordType(Decl);
4712  Decl->TypeForDecl = newType;
4713  Types.push_back(newType);
4714  return QualType(newType, 0);
4715 }
4716 
4718  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4719 
4720  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
4721  if (PrevDecl->TypeForDecl)
4722  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4723 
4724  auto *newType = new (*this, TypeAlignment) EnumType(Decl);
4725  Decl->TypeForDecl = newType;
4726  Types.push_back(newType);
4727  return QualType(newType, 0);
4728 }
4729 
4731  const UnresolvedUsingTypenameDecl *Decl) const {
4732  if (Decl->TypeForDecl)
4733  return QualType(Decl->TypeForDecl, 0);
4734 
4735  if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
4737  if (CanonicalDecl->TypeForDecl)
4738  return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
4739 
4740  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl);
4741  Decl->TypeForDecl = newType;
4742  Types.push_back(newType);
4743  return QualType(newType, 0);
4744 }
4745 
4747  QualType modifiedType,
4748  QualType equivalentType) const {
4749  llvm::FoldingSetNodeID id;
4750  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
4751 
4752  void *insertPos = nullptr;
4753  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
4754  if (type) return QualType(type, 0);
4755 
4756  QualType canon = getCanonicalType(equivalentType);
4757  type = new (*this, TypeAlignment)
4758  AttributedType(canon, attrKind, modifiedType, equivalentType);
4759 
4760  Types.push_back(type);
4761  AttributedTypes.InsertNode(type, insertPos);
4762 
4763  return QualType(type, 0);
4764 }
4765 
4766 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
4767  QualType Wrapped) {
4768  llvm::FoldingSetNodeID ID;
4769  BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
4770 
4771  void *InsertPos = nullptr;
4772  BTFTagAttributedType *Ty =
4773  BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
4774  if (Ty)
4775  return QualType(Ty, 0);
4776 
4777  QualType Canon = getCanonicalType(Wrapped);
4778  Ty = new (*this, TypeAlignment) BTFTagAttributedType(Canon, Wrapped, BTFAttr);
4779 
4780  Types.push_back(Ty);
4781  BTFTagAttributedTypes.InsertNode(Ty, InsertPos);
4782 
4783  return QualType(Ty, 0);
4784 }
4785 
4786 /// Retrieve a substitution-result type.
4787 QualType
4789  QualType Replacement,
4790  Optional<unsigned> PackIndex) const {
4791  llvm::FoldingSetNodeID ID;
4792  SubstTemplateTypeParmType::Profile(ID, Parm, Replacement, PackIndex);
4793  void *InsertPos = nullptr;
4794  SubstTemplateTypeParmType *SubstParm
4795  = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4796 
4797  if (!SubstParm) {
4798  void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
4799  !Replacement.isCanonical()),
4800  TypeAlignment);
4801  SubstParm =
4802  new (Mem) SubstTemplateTypeParmType(Parm, Replacement, PackIndex);
4803  Types.push_back(SubstParm);
4804  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
4805  }
4806 
4807  return QualType(SubstParm, 0);
4808 }
4809 
4810 /// Retrieve a
4812  const TemplateTypeParmType *Parm,
4813  const TemplateArgument &ArgPack) {
4814 #ifndef NDEBUG
4815  for (const auto &P : ArgPack.pack_elements()) {
4816  assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
4817  assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
4818  }
4819 #endif
4820 
4821  llvm::FoldingSetNodeID ID;
4823  void *InsertPos = nullptr;
4824  if (SubstTemplateTypeParmPackType *SubstParm
4825  = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
4826  return QualType(SubstParm, 0);
4827 
4828  QualType Canon;
4829  if (!Parm->isCanonicalUnqualified()) {
4830  Canon = getCanonicalType(QualType(Parm, 0));
4831  Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
4832  ArgPack);
4833  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
4834  }
4835 
4836  auto *SubstParm
4837  = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
4838  ArgPack);
4839  Types.push_back(SubstParm);
4840  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
4841  return QualType(SubstParm, 0);
4842 }
4843 
4844 /// Retrieve the template type parameter type for a template
4845 /// parameter or parameter pack with the given depth, index, and (optionally)
4846 /// name.
4848  bool ParameterPack,
4849  TemplateTypeParmDecl *TTPDecl) const {
4850  llvm::FoldingSetNodeID ID;
4851  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4852  void *InsertPos = nullptr;
4853  TemplateTypeParmType *TypeParm
4854  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4855 
4856  if (TypeParm)
4857  return QualType(TypeParm, 0);
4858 
4859  if (TTPDecl) {
4860  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4861  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
4862 
4863  TemplateTypeParmType *TypeCheck
4864  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4865  assert(!TypeCheck && "Template type parameter canonical type broken");
4866  (void)TypeCheck;
4867  } else
4868  TypeParm = new (*this, TypeAlignment)
4869  TemplateTypeParmType(Depth, Index, ParameterPack);
4870 
4871  Types.push_back(TypeParm);
4872  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
4873 
4874  return QualType(TypeParm, 0);
4875 }
4876 
4879  SourceLocation NameLoc,
4880  const TemplateArgumentListInfo &Args,
4881  QualType Underlying) const {
4882  assert(!Name.getAsDependentTemplateName() &&
4883  "No dependent template names here!");
4884  QualType TST =
4885  getTemplateSpecializationType(Name, Args.arguments(), Underlying);
4886 
4891  TL.setTemplateNameLoc(NameLoc);
4892  TL.setLAngleLoc(Args.getLAngleLoc());
4893  TL.setRAngleLoc(Args.getRAngleLoc());
4894  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4895  TL.setArgLocInfo(i, Args[i].getLocInfo());
4896  return DI;
4897 }
4898 
4899 QualType
4902  QualType Underlying) const {
4903  assert(!Template.getAsDependentTemplateName() &&
4904  "No dependent template names here!");
4905 
4907  ArgVec.reserve(Args.size());
4908  for (const TemplateArgumentLoc &Arg : Args)
4909  ArgVec.push_back(Arg.getArgument());
4910 
4911  return getTemplateSpecializationType(Template, ArgVec, Underlying);
4912 }
4913 
4914 #ifndef NDEBUG
4916  for (const TemplateArgument &Arg : Args)
4917  if (Arg.isPackExpansion())
4918  return true;
4919 
4920  return true;
4921 }
4922 #endif
4923 
4924 QualType
4927  QualType Underlying) const {
4928  assert(!Template.getAsDependentTemplateName() &&
4929  "No dependent template names here!");
4930  // Look through qualified template names.
4931  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4932  Template = QTN->getUnderlyingTemplate();
4933 
4934  const auto *TD = Template.getAsTemplateDecl();
4935  bool IsTypeAlias = TD && TD->isTypeAlias();
4936  QualType CanonType;
4937  if (!Underlying.isNull())
4938  CanonType = getCanonicalType(Underlying);
4939  else {
4940  // We can get here with an alias template when the specialization contains
4941  // a pack expansion that does not match up with a parameter pack.
4942  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
4943  "Caller must compute aliased type");
4944  IsTypeAlias = false;
4945  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
4946  }
4947 
4948  // Allocate the (non-canonical) template specialization type, but don't
4949  // try to unique it: these types typically have location information that
4950  // we don't unique and don't want to lose.
4951  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
4952  sizeof(TemplateArgument) * Args.size() +
4953  (IsTypeAlias? sizeof(QualType) : 0),
4954  TypeAlignment);
4955  auto *Spec
4956  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4957  IsTypeAlias ? Underlying : QualType());
4958 
4959  Types.push_back(Spec);
4960  return QualType(Spec, 0);
4961 }
4962 
4963 static bool
4965  ArrayRef<TemplateArgument> OrigArgs,
4966  SmallVectorImpl<TemplateArgument> &CanonArgs) {
4967  bool AnyNonCanonArgs = false;
4968  unsigned NumArgs = OrigArgs.size();
4969  CanonArgs.resize(NumArgs);
4970  for (unsigned I = 0; I != NumArgs; ++I) {
4971  const TemplateArgument &OrigArg = OrigArgs[I];
4972  TemplateArgument &CanonArg = CanonArgs[I];
4973  CanonArg = C.getCanonicalTemplateArgument(OrigArg);
4974  if (!CanonArg.structurallyEquals(OrigArg))
4975  AnyNonCanonArgs = true;
4976  }
4977  return AnyNonCanonArgs;
4978 }
4979 
4981  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4982  assert(!Template.getAsDependentTemplateName() &&
4983  "No dependent template names here!");
4984 
4985  // Look through qualified template names.
4986  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4987  Template = TemplateName(QTN->getUnderlyingTemplate());
4988 
4989  // Build the canonical template specialization type.
4990  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4992  ::getCanonicalTemplateArguments(*this, Args, CanonArgs);
4993 
4994  // Determine whether this canonical template specialization type already
4995  // exists.
4996  llvm::FoldingSetNodeID ID;
4997  TemplateSpecializationType::Profile(ID, CanonTemplate,
4998  CanonArgs, *this);
4999 
5000  void *InsertPos = nullptr;
5002  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5003 
5004  if (!Spec) {
5005  // Allocate a new canonical template specialization type.
5006  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
5007  sizeof(TemplateArgument) * CanonArgs.size()),
5008  TypeAlignment);
5009  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
5010  CanonArgs,
5011  QualType(), QualType());
5012  Types.push_back(Spec);
5013  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
5014  }
5015 
5016  assert(Spec->isDependentType() &&
5017  "Non-dependent template-id type must have a canonical type");
5018  return QualType(Spec, 0);
5019 }
5020 
5022  NestedNameSpecifier *NNS,
5023  QualType NamedType,
5024  TagDecl *OwnedTagDecl) const {
5025  llvm::FoldingSetNodeID ID;
5026  ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
5027 
5028  void *InsertPos = nullptr;
5029  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5030  if (T)
5031  return QualType(T, 0);
5032 
5033  QualType Canon = NamedType;
5034  if (!Canon.isCanonical()) {
5035  Canon = getCanonicalType(NamedType);
5036  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5037  assert(!CheckT && "Elaborated canonical type broken");
5038  (void)CheckT;
5039  }
5040 
5041  void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
5042  TypeAlignment);
5043  T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
5044 
5045  Types.push_back(T);
5046  ElaboratedTypes.InsertNode(T, InsertPos);
5047  return QualType(T, 0);
5048 }
5049 
5050 QualType
5052  llvm::FoldingSetNodeID ID;
5053  ParenType::Profile(ID, InnerType);
5054 
5055  void *InsertPos = nullptr;
5056  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5057  if (T)
5058  return QualType(T, 0);
5059 
5060  QualType Canon = InnerType;
5061  if (!Canon.isCanonical()) {
5062  Canon = getCanonicalType(InnerType);
5063  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5064  assert(!CheckT && "Paren canonical type broken");
5065  (void)CheckT;
5066  }
5067 
5068  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
5069  Types.push_back(T);
5070  ParenTypes.InsertNode(T, InsertPos);
5071  return QualType(T, 0);
5072 }
5073 
5074 QualType
5076  const IdentifierInfo *MacroII) const {
5077  QualType Canon = UnderlyingTy;
5078  if (!Canon.isCanonical())
5079  Canon = getCanonicalType(UnderlyingTy);
5080 
5081  auto *newType = new (*this, TypeAlignment)
5082  MacroQualifiedType(UnderlyingTy, Canon, MacroII);
5083  Types.push_back(newType);
5084  return QualType(newType, 0);
5085 }
5086 
5088  NestedNameSpecifier *NNS,
5089  const IdentifierInfo *Name,
5090  QualType Canon) const {
5091  if (Canon.isNull()) {
5093  if (CanonNNS != NNS)
5094  Canon = getDependentNameType(Keyword, CanonNNS, Name);
5095  }
5096 
5097  llvm::FoldingSetNodeID ID;
5098  DependentNameType::Profile(ID, Keyword, NNS, Name);
5099 
5100  void *InsertPos = nullptr;
5102  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
5103  if (T)
5104  return QualType(T, 0);
5105 
5106  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
5107  Types.push_back(T);
5108  DependentNameTypes.InsertNode(T, InsertPos);
5109  return QualType(T, 0);
5110 }
5111 
5112 QualType
5114  ElaboratedTypeKeyword Keyword,
5115  NestedNameSpecifier *NNS,
5116  const IdentifierInfo *Name,
5117  const TemplateArgumentListInfo &Args) const {
5118  // TODO: avoid this copy
5120  for (unsigned I = 0, E = Args.size(); I != E; ++I)
5121  ArgCopy.push_back(Args[I].getArgument());
5122  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
5123 }
5124 
5125 QualType
5127  ElaboratedTypeKeyword Keyword,
5128  NestedNameSpecifier *NNS,
5129  const IdentifierInfo *Name,
5130  ArrayRef<TemplateArgument> Args) const {
5131  assert((!NNS || NNS->isDependent()) &&
5132  "nested-name-specifier must be dependent");
5133 
5134  llvm::FoldingSetNodeID ID;
5135  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
5136  Name, Args);
5137 
5138  void *InsertPos = nullptr;
5140  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5141  if (T)
5142  return QualType(T, 0);
5143 
5145 
5146  ElaboratedTypeKeyword CanonKeyword = Keyword;
5147  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
5148 
5150  bool AnyNonCanonArgs =
5151  ::getCanonicalTemplateArguments(*this, Args, CanonArgs);
5152 
5153  QualType Canon;
5154  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
5155  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
5156  Name,
5157  CanonArgs);
5158 
5159  // Find the insert position again.
5160  [[maybe_unused]] auto *Nothing =
5161  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5162  assert(!Nothing && "canonical type broken");
5163  }
5164 
5165  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
5166  sizeof(TemplateArgument) * Args.size()),
5167  TypeAlignment);
5168  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
5169  Name, Args, Canon);
5170  Types.push_back(T);
5171  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
5172  return QualType(T, 0);
5173 }
5174 
5176  TemplateArgument Arg;
5177  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
5178  QualType ArgType = getTypeDeclType(TTP);
5179  if (TTP->isParameterPack())
5180  ArgType = getPackExpansionType(ArgType, None);
5181 
5182  Arg = TemplateArgument(ArgType);
5183  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
5184  QualType T =
5185  NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this);
5186  // For class NTTPs, ensure we include the 'const' so the type matches that
5187  // of a real template argument.
5188  // FIXME: It would be more faithful to model this as something like an
5189  // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
5190  if (T->isRecordType())
5191  T.addConst();
5192  Expr *E = new (*this) DeclRefExpr(
5193  *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T,
5194  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
5195 
5196  if (NTTP->isParameterPack())
5197  E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
5198  None);
5199  Arg = TemplateArgument(E);
5200  } else {
5201  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
5202  if (TTP->isParameterPack())
5204  else
5205  Arg = TemplateArgument(TemplateName(TTP));
5206  }
5207 
5208  if (Param->isTemplateParameterPack())
5209  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
5210 
5211  return Arg;
5212 }
5213 
5214 void
5217  Args.reserve(Args.size() + Params->size());
5218 
5219  for (NamedDecl *Param : *Params)
5220  Args.push_back(getInjectedTemplateArg(Param));
5221 }
5222 
5224  Optional<unsigned> NumExpansions,
5225  bool ExpectPackInType) {
5226  assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
5227  "Pack expansions must expand one or more parameter packs");
5228 
5229  llvm::FoldingSetNodeID ID;
5230  PackExpansionType::Profile(ID, Pattern, NumExpansions);
5231 
5232  void *InsertPos = nullptr;
5233  PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
5234  if (T)
5235  return QualType(T, 0);
5236 
5237  QualType Canon;
5238  if (!Pattern.isCanonical()) {
5239  Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions,
5240  /*ExpectPackInType=*/false);
5241 
5242  // Find the insert position again, in case we inserted an element into
5243  // PackExpansionTypes and invalidated our insert position.
5244  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
5245  }
5246 
5247  T = new (*this, TypeAlignment)
5248  PackExpansionType(Pattern, Canon, NumExpansions);
5249  Types.push_back(T);
5250  PackExpansionTypes.InsertNode(T, InsertPos);
5251  return QualType(T, 0);
5252 }
5253 
5254 /// CmpProtocolNames - Comparison predicate for sorting protocols
5255 /// alphabetically.
5256 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
5257  ObjCProtocolDecl *const *RHS) {
5258  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
5259 }
5260 
5262  if (Protocols.empty()) return true;
5263 
5264  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
5265  return false;
5266 
5267  for (unsigned i = 1; i != Protocols.size(); ++i)
5268  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
5269  Protocols[i]->getCanonicalDecl() != Protocols[i])
5270  return false;
5271  return true;
5272 }
5273 
5274 static void
5276  // Sort protocols, keyed by name.
5277  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
5278 
5279  // Canonicalize.
5280  for (ObjCProtocolDecl *&P : Protocols)
5281  P = P->getCanonicalDecl();
5282 
5283  // Remove duplicates.
5284  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
5285  Protocols.erase(ProtocolsEnd, Protocols.end());
5286 }
5287 
5289  ObjCProtocolDecl * const *Protocols,
5290  unsigned NumProtocols) const {
5291  return getObjCObjectType(BaseType, {},
5292  llvm::makeArrayRef(Protocols, NumProtocols),
5293  /*isKindOf=*/false);
5294 }
5295 
5297  QualType baseType,
5298  ArrayRef<QualType> typeArgs,
5299  ArrayRef<ObjCProtocolDecl *> protocols,
5300  bool isKindOf) const {
5301  // If the base type is an interface and there aren't any protocols or
5302  // type arguments to add, then the interface type will do just fine.
5303  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
5304  isa<ObjCInterfaceType>(baseType))
5305  return baseType;
5306 
5307  // Look in the folding set for an existing type.
5308  llvm::FoldingSetNodeID ID;
5309  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
5310  void *InsertPos = nullptr;
5311  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
5312  return QualType(QT, 0);
5313 
5314  // Determine the type arguments to be used for canonicalization,
5315  // which may be explicitly specified here or written on the base
5316  // type.
5317  ArrayRef<QualType> effectiveTypeArgs = typeArgs;
5318  if (effectiveTypeArgs.empty()) {
5319  if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
5320  effectiveTypeArgs = baseObject->getTypeArgs();
5321  }
5322 
5323  // Build the canonical type, which has the canonical base type and a
5324  // sorted-and-uniqued list of protocols and the type arguments
5325  // canonicalized.
5326  QualType canonical;
5327  bool typeArgsAreCanonical = llvm::all_of(
5328  effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); });
5329  bool protocolsSorted = areSortedAndUniqued(protocols);
5330  if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
5331  // Determine the canonical type arguments.
5332  ArrayRef<QualType> canonTypeArgs;
5333  SmallVector<QualType, 4> canonTypeArgsVec;
5334  if (!typeArgsAreCanonical) {
5335  canonTypeArgsVec.reserve(effectiveTypeArgs.size());
5336  for (auto typeArg : effectiveTypeArgs)
5337  canonTypeArgsVec.push_back(getCanonicalType(typeArg));
5338  canonTypeArgs = canonTypeArgsVec;
5339  } else {
5340  canonTypeArgs = effectiveTypeArgs;
5341  }
5342 
5343  ArrayRef<ObjCProtocolDecl *> canonProtocols;
5344  SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
5345  if (!protocolsSorted) {
5346  canonProtocolsVec.append(protocols.begin(), protocols.end());
5347  SortAndUniqueProtocols(canonProtocolsVec);
5348  canonProtocols = canonProtocolsVec;
5349  } else {
5350  canonProtocols = protocols;
5351  }
5352 
5353  canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
5354  canonProtocols, isKindOf);
5355 
5356  // Regenerate InsertPos.
5357  ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
5358  }
5359 
5360  unsigned size = sizeof(ObjCObjectTypeImpl);
5361  size += typeArgs.size() * sizeof(QualType);
5362  size += protocols.size() * sizeof(ObjCProtocolDecl *);
5363  void *mem = Allocate(size, TypeAlignment);
5364  auto *T =
5365  new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
5366  isKindOf);
5367 
5368  Types.push_back(T);
5369  ObjCObjectTypes.InsertNode(T, InsertPos);
5370  return QualType(T, 0);
5371 }
5372 
5373 /// Apply Objective-C protocol qualifiers to the given type.
5374 /// If this is for the canonical type of a type parameter, we can apply
5375 /// protocol qualifiers on the ObjCObjectPointerType.
5376 QualType
5378  ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
5379  bool allowOnPointerType) const {
5380  hasError = false;
5381 
5382  if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
5383  return getObjCTypeParamType(objT->getDecl(), protocols);
5384  }
5385 
5386  // Apply protocol qualifiers to ObjCObjectPointerType.
5387  if (allowOnPointerType) {
5388  if (const auto *objPtr =
5389  dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
5390  const ObjCObjectType *objT = objPtr->getObjectType();
5391  // Merge protocol lists and construct ObjCObjectType.
5392  SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
5393  protocolsVec.append(objT->qual_begin(),
5394  objT->qual_end());
5395  protocolsVec.append(protocols.begin(), protocols.end());
5396  ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;