clang  17.0.0git
ASTContext.cpp
Go to the documentation of this file.
1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ASTContext interface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/AST/ASTContext.h"
14 #include "CXXABI.h"
15 #include "Interp/Context.h"
16 #include "clang/AST/APValue.h"
17 #include "clang/AST/ASTConcept.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/AttrIterator.h"
22 #include "clang/AST/CharUnits.h"
23 #include "clang/AST/Comment.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclBase.h"
26 #include "clang/AST/DeclCXX.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/DeclOpenMP.h"
30 #include "clang/AST/DeclTemplate.h"
33 #include "clang/AST/Expr.h"
34 #include "clang/AST/ExprCXX.h"
35 #include "clang/AST/ExprConcepts.h"
37 #include "clang/AST/Mangle.h"
42 #include "clang/AST/RecordLayout.h"
43 #include "clang/AST/Stmt.h"
44 #include "clang/AST/TemplateBase.h"
45 #include "clang/AST/TemplateName.h"
46 #include "clang/AST/Type.h"
47 #include "clang/AST/TypeLoc.h"
51 #include "clang/Basic/Builtins.h"
55 #include "clang/Basic/LLVM.h"
57 #include "clang/Basic/Linkage.h"
58 #include "clang/Basic/Module.h"
63 #include "clang/Basic/Specifiers.h"
65 #include "clang/Basic/TargetInfo.h"
66 #include "clang/Basic/XRayLists.h"
67 #include "llvm/ADT/APFixedPoint.h"
68 #include "llvm/ADT/APInt.h"
69 #include "llvm/ADT/APSInt.h"
70 #include "llvm/ADT/ArrayRef.h"
71 #include "llvm/ADT/DenseMap.h"
72 #include "llvm/ADT/DenseSet.h"
73 #include "llvm/ADT/FoldingSet.h"
74 #include "llvm/ADT/PointerUnion.h"
75 #include "llvm/ADT/STLExtras.h"
76 #include "llvm/ADT/SmallPtrSet.h"
77 #include "llvm/ADT/SmallVector.h"
78 #include "llvm/ADT/StringExtras.h"
79 #include "llvm/ADT/StringRef.h"
80 #include "llvm/ADT/Triple.h"
81 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82 #include "llvm/Support/Capacity.h"
83 #include "llvm/Support/Casting.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/ErrorHandling.h"
86 #include "llvm/Support/MD5.h"
87 #include "llvm/Support/MathExtras.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include <algorithm>
90 #include <cassert>
91 #include <cstddef>
92 #include <cstdint>
93 #include <cstdlib>
94 #include <map>
95 #include <memory>
96 #include <optional>
97 #include <string>
98 #include <tuple>
99 #include <utility>
100 
101 using namespace clang;
102 
112 };
113 
114 /// \returns location that is relevant when searching for Doc comments related
115 /// to \p D.
117  SourceManager &SourceMgr) {
118  assert(D);
119 
120  // User can not attach documentation to implicit declarations.
121  if (D->isImplicit())
122  return {};
123 
124  // User can not attach documentation to implicit instantiations.
125  if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
126  if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
127  return {};
128  }
129 
130  if (const auto *VD = dyn_cast<VarDecl>(D)) {
131  if (VD->isStaticDataMember() &&
132  VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
133  return {};
134  }
135 
136  if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
137  if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
138  return {};
139  }
140 
141  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
142  TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
143  if (TSK == TSK_ImplicitInstantiation ||
144  TSK == TSK_Undeclared)
145  return {};
146  }
147 
148  if (const auto *ED = dyn_cast<EnumDecl>(D)) {
149  if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
150  return {};
151  }
152  if (const auto *TD = dyn_cast<TagDecl>(D)) {
153  // When tag declaration (but not definition!) is part of the
154  // decl-specifier-seq of some other declaration, it doesn't get comment
155  if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
156  return {};
157  }
158  // TODO: handle comments for function parameters properly.
159  if (isa<ParmVarDecl>(D))
160  return {};
161 
162  // TODO: we could look up template parameter documentation in the template
163  // documentation.
164  if (isa<TemplateTypeParmDecl>(D) ||
165  isa<NonTypeTemplateParmDecl>(D) ||
166  isa<TemplateTemplateParmDecl>(D))
167  return {};
168 
169  // Find declaration location.
170  // For Objective-C declarations we generally don't expect to have multiple
171  // declarators, thus use declaration starting location as the "declaration
172  // location".
173  // For all other declarations multiple declarators are used quite frequently,
174  // so we use the location of the identifier as the "declaration location".
175  if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
176  isa<ObjCPropertyDecl>(D) ||
177  isa<RedeclarableTemplateDecl>(D) ||
178  isa<ClassTemplateSpecializationDecl>(D) ||
179  // Allow association with Y across {} in `typedef struct X {} Y`.
180  isa<TypedefDecl>(D))
181  return D->getBeginLoc();
182 
183  const SourceLocation DeclLoc = D->getLocation();
184  if (DeclLoc.isMacroID()) {
185  // There are (at least) three types of macros we care about here.
186  //
187  // 1. Macros that are used in the definition of a type outside the macro,
188  // with a comment attached at the macro call site.
189  // ```
190  // #define MAKE_NAME(Foo) Name##Foo
191  //
192  // /// Comment is here, where we use the macro.
193  // struct MAKE_NAME(Foo) {
194  // int a;
195  // int b;
196  // };
197  // ```
198  // 2. Macros that define whole things along with the comment.
199  // ```
200  // #define MAKE_METHOD(name) \
201  // /** Comment is here, inside the macro. */ \
202  // void name() {}
203  //
204  // struct S {
205  // MAKE_METHOD(f)
206  // }
207  // ```
208  // 3. Macros that both declare a type and name a decl outside the macro.
209  // ```
210  // /// Comment is here, where we use the macro.
211  // typedef NS_ENUM(NSInteger, Size) {
212  // SizeWidth,
213  // SizeHeight
214  // };
215  // ```
216  // In this case NS_ENUM declares am enum type, and uses the same name for
217  // the typedef declaration that appears outside the macro. The comment
218  // here should be applied to both declarations inside and outside the
219  // macro.
220  //
221  // We have found a Decl name that comes from inside a macro, but
222  // Decl::getLocation() returns the place where the macro is being called.
223  // If the declaration (and not just the name) resides inside the macro,
224  // then we want to map Decl::getLocation() into the macro to where the
225  // declaration and its attached comment (if any) were written.
226  //
227  // This mapping into the macro is done by mapping the location to its
228  // spelling location, however even if the declaration is inside a macro,
229  // the name's spelling can come from a macro argument (case 2 above). In
230  // this case mapping the location to the spelling location finds the
231  // argument's position (at `f` in MAKE_METHOD(`f`) above), which is not
232  // where the declaration and its comment are located.
233  //
234  // To avoid this issue, we make use of Decl::getBeginLocation() instead.
235  // While the declaration's position is where the name is written, the
236  // comment is always attached to the begining of the declaration, not to
237  // the name.
238  //
239  // In the first case, the begin location of the decl is outside the macro,
240  // at the location of `typedef`. This is where the comment is found as
241  // well. The begin location is not inside a macro, so it's spelling
242  // location is the same.
243  //
244  // In the second case, the begin location of the decl is the call to the
245  // macro, at `MAKE_METHOD`. However its spelling location is inside the
246  // the macro at the location of `void`. This is where the comment is found
247  // again.
248  //
249  // In the third case, there's no correct single behaviour. We want to use
250  // the comment outside the macro for the definition that's inside the macro.
251  // There is also a definition outside the macro, and we want the comment to
252  // apply to both. The cases we care about here is NS_ENUM() and
253  // NS_OPTIONS(). In general, if an enum is defined inside a macro, we should
254  // try to find the comment there.
255 
256  // This is handling case 3 for NS_ENUM() and NS_OPTIONS(), which define
257  // enum types inside the macro.
258  if (isa<EnumDecl>(D)) {
259  SourceLocation MacroCallLoc = SourceMgr.getExpansionLoc(DeclLoc);
260  if (auto BufferRef =
261  SourceMgr.getBufferOrNone(SourceMgr.getFileID(MacroCallLoc));
262  BufferRef.has_value()) {
263  llvm::StringRef buffer = BufferRef->getBuffer().substr(
264  SourceMgr.getFileOffset(MacroCallLoc));
265  if (buffer.starts_with("NS_ENUM(") ||
266  buffer.starts_with("NS_OPTIONS(")) {
267  // We want to use the comment on the call to NS_ENUM and NS_OPTIONS
268  // macros for the types defined inside the macros, which is at the
269  // expansion location.
270  return MacroCallLoc;
271  }
272  }
273  }
274  return SourceMgr.getSpellingLoc(D->getBeginLoc());
275  }
276 
277  return DeclLoc;
278 }
279 
281  const Decl *D, const SourceLocation RepresentativeLocForDecl,
282  const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
283  // If the declaration doesn't map directly to a location in a file, we
284  // can't find the comment.
285  if (RepresentativeLocForDecl.isInvalid() ||
286  !RepresentativeLocForDecl.isFileID())
287  return nullptr;
288 
289  // If there are no comments anywhere, we won't find anything.
290  if (CommentsInTheFile.empty())
291  return nullptr;
292 
293  // Decompose the location for the declaration and find the beginning of the
294  // file buffer.
295  const std::pair<FileID, unsigned> DeclLocDecomp =
296  SourceMgr.getDecomposedLoc(RepresentativeLocForDecl);
297 
298  // Slow path.
299  auto OffsetCommentBehindDecl =
300  CommentsInTheFile.lower_bound(DeclLocDecomp.second);
301 
302  // First check whether we have a trailing comment.
303  if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
304  RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
305  if ((CommentBehindDecl->isDocumentation() ||
306  LangOpts.CommentOpts.ParseAllComments) &&
307  CommentBehindDecl->isTrailingComment() &&
308  (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
309  isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
310 
311  // Check that Doxygen trailing comment comes after the declaration, starts
312  // on the same line and in the same file as the declaration.
313  if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) ==
314  Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first,
315  OffsetCommentBehindDecl->first)) {
316  return CommentBehindDecl;
317  }
318  }
319  }
320 
321  // The comment just after the declaration was not a trailing comment.
322  // Let's look at the previous comment.
323  if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
324  return nullptr;
325 
326  auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
327  RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
328 
329  // Check that we actually have a non-member Doxygen comment.
330  if (!(CommentBeforeDecl->isDocumentation() ||
331  LangOpts.CommentOpts.ParseAllComments) ||
332  CommentBeforeDecl->isTrailingComment())
333  return nullptr;
334 
335  // Decompose the end of the comment.
336  const unsigned CommentEndOffset =
337  Comments.getCommentEndOffset(CommentBeforeDecl);
338 
339  // Get the corresponding buffer.
340  bool Invalid = false;
341  const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
342  &Invalid).data();
343  if (Invalid)
344  return nullptr;
345 
346  // Extract text between the comment and declaration.
347  StringRef Text(Buffer + CommentEndOffset,
348  DeclLocDecomp.second - CommentEndOffset);
349 
350  // There should be no other declarations or preprocessor directives between
351  // comment and declaration.
352  if (Text.find_first_of(";{}#@") != StringRef::npos)
353  return nullptr;
354 
355  return CommentBeforeDecl;
356 }
357 
359  const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
360 
361  // If the declaration doesn't map directly to a location in a file, we
362  // can't find the comment.
363  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
364  return nullptr;
365 
366  if (ExternalSource && !CommentsLoaded) {
367  ExternalSource->ReadComments();
368  CommentsLoaded = true;
369  }
370 
371  if (Comments.empty())
372  return nullptr;
373 
374  const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
375  if (!File.isValid()) {
376  return nullptr;
377  }
378  const auto CommentsInThisFile = Comments.getCommentsInFile(File);
379  if (!CommentsInThisFile || CommentsInThisFile->empty())
380  return nullptr;
381 
382  return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
383 }
384 
386  assert(LangOpts.RetainCommentsFromSystemHeaders ||
387  !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
388  Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
389 }
390 
391 /// If we have a 'templated' declaration for a template, adjust 'D' to
392 /// refer to the actual template.
393 /// If we have an implicit instantiation, adjust 'D' to refer to template.
394 static const Decl &adjustDeclToTemplate(const Decl &D) {
395  if (const auto *FD = dyn_cast<FunctionDecl>(&D)) {
396  // Is this function declaration part of a function template?
397  if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
398  return *FTD;
399 
400  // Nothing to do if function is not an implicit instantiation.
401  if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
402  return D;
403 
404  // Function is an implicit instantiation of a function template?
405  if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
406  return *FTD;
407 
408  // Function is instantiated from a member definition of a class template?
409  if (const FunctionDecl *MemberDecl =
411  return *MemberDecl;
412 
413  return D;
414  }
415  if (const auto *VD = dyn_cast<VarDecl>(&D)) {
416  // Static data member is instantiated from a member definition of a class
417  // template?
418  if (VD->isStaticDataMember())
419  if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
420  return *MemberDecl;
421 
422  return D;
423  }
424  if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) {
425  // Is this class declaration part of a class template?
426  if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
427  return *CTD;
428 
429  // Class is an implicit instantiation of a class template or partial
430  // specialization?
431  if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
432  if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
433  return D;
434  llvm::PointerUnion<ClassTemplateDecl *,
436  PU = CTSD->getSpecializedTemplateOrPartial();
437  return PU.is<ClassTemplateDecl *>()
438  ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>())
439  : *static_cast<const Decl *>(
441  }
442 
443  // Class is instantiated from a member definition of a class template?
444  if (const MemberSpecializationInfo *Info =
445  CRD->getMemberSpecializationInfo())
446  return *Info->getInstantiatedFrom();
447 
448  return D;
449  }
450  if (const auto *ED = dyn_cast<EnumDecl>(&D)) {
451  // Enum is instantiated from a member definition of a class template?
452  if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
453  return *MemberDecl;
454 
455  return D;
456  }
457  // FIXME: Adjust alias templates?
458  return D;
459 }
460 
462  const Decl *D,
463  const Decl **OriginalDecl) const {
464  if (!D) {
465  if (OriginalDecl)
466  OriginalDecl = nullptr;
467  return nullptr;
468  }
469 
470  D = &adjustDeclToTemplate(*D);
471 
472  // Any comment directly attached to D?
473  {
474  auto DeclComment = DeclRawComments.find(D);
475  if (DeclComment != DeclRawComments.end()) {
476  if (OriginalDecl)
477  *OriginalDecl = D;
478  return DeclComment->second;
479  }
480  }
481 
482  // Any comment attached to any redeclaration of D?
483  const Decl *CanonicalD = D->getCanonicalDecl();
484  if (!CanonicalD)
485  return nullptr;
486 
487  {
488  auto RedeclComment = RedeclChainComments.find(CanonicalD);
489  if (RedeclComment != RedeclChainComments.end()) {
490  if (OriginalDecl)
491  *OriginalDecl = RedeclComment->second;
492  auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second);
493  assert(CommentAtRedecl != DeclRawComments.end() &&
494  "This decl is supposed to have comment attached.");
495  return CommentAtRedecl->second;
496  }
497  }
498 
499  // Any redeclarations of D that we haven't checked for comments yet?
500  // We can't use DenseMap::iterator directly since it'd get invalid.
501  auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
502  auto LookupRes = CommentlessRedeclChains.find(CanonicalD);
503  if (LookupRes != CommentlessRedeclChains.end())
504  return LookupRes->second;
505  return nullptr;
506  }();
507 
508  for (const auto Redecl : D->redecls()) {
509  assert(Redecl);
510  // Skip all redeclarations that have been checked previously.
511  if (LastCheckedRedecl) {
512  if (LastCheckedRedecl == Redecl) {
513  LastCheckedRedecl = nullptr;
514  }
515  continue;
516  }
517  const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl);
518  if (RedeclComment) {
519  cacheRawCommentForDecl(*Redecl, *RedeclComment);
520  if (OriginalDecl)
521  *OriginalDecl = Redecl;
522  return RedeclComment;
523  }
524  CommentlessRedeclChains[CanonicalD] = Redecl;
525  }
526 
527  if (OriginalDecl)
528  *OriginalDecl = nullptr;
529  return nullptr;
530 }
531 
533  const RawComment &Comment) const {
534  assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
535  DeclRawComments.try_emplace(&OriginalD, &Comment);
536  const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
537  RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD);
538  CommentlessRedeclChains.erase(CanonicalDecl);
539 }
540 
541 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
543  const DeclContext *DC = ObjCMethod->getDeclContext();
544  if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
545  const ObjCInterfaceDecl *ID = IMD->getClassInterface();
546  if (!ID)
547  return;
548  // Add redeclared method here.
549  for (const auto *Ext : ID->known_extensions()) {
550  if (ObjCMethodDecl *RedeclaredMethod =
551  Ext->getMethod(ObjCMethod->getSelector(),
552  ObjCMethod->isInstanceMethod()))
553  Redeclared.push_back(RedeclaredMethod);
554  }
555  }
556 }
557 
559  const Preprocessor *PP) {
560  if (Comments.empty() || Decls.empty())
561  return;
562 
563  FileID File;
564  for (Decl *D : Decls) {
565  SourceLocation Loc = D->getLocation();
566  if (Loc.isValid()) {
567  // See if there are any new comments that are not attached to a decl.
568  // The location doesn't have to be precise - we care only about the file.
569  File = SourceMgr.getDecomposedLoc(Loc).first;
570  break;
571  }
572  }
573 
574  if (File.isInvalid())
575  return;
576 
577  auto CommentsInThisFile = Comments.getCommentsInFile(File);
578  if (!CommentsInThisFile || CommentsInThisFile->empty() ||
579  CommentsInThisFile->rbegin()->second->isAttached())
580  return;
581 
582  // There is at least one comment not attached to a decl.
583  // Maybe it should be attached to one of Decls?
584  //
585  // Note that this way we pick up not only comments that precede the
586  // declaration, but also comments that *follow* the declaration -- thanks to
587  // the lookahead in the lexer: we've consumed the semicolon and looked
588  // ahead through comments.
589 
590  for (const Decl *D : Decls) {
591  assert(D);
592  if (D->isInvalidDecl())
593  continue;
594 
595  D = &adjustDeclToTemplate(*D);
596 
597  const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
598 
599  if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
600  continue;
601 
602  if (DeclRawComments.count(D) > 0)
603  continue;
604 
605  if (RawComment *const DocComment =
606  getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) {
608  comments::FullComment *FC = DocComment->parse(*this, PP, D);
609  ParsedComments[D->getCanonicalDecl()] = FC;
610  }
611  }
612 }
613 
615  const Decl *D) const {
616  auto *ThisDeclInfo = new (*this) comments::DeclInfo;
617  ThisDeclInfo->CommentDecl = D;
618  ThisDeclInfo->IsFilled = false;
619  ThisDeclInfo->fill();
620  ThisDeclInfo->CommentDecl = FC->getDecl();
621  if (!ThisDeclInfo->TemplateParameters)
622  ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
623  comments::FullComment *CFC =
624  new (*this) comments::FullComment(FC->getBlocks(),
625  ThisDeclInfo);
626  return CFC;
627 }
628 
631  return RC ? RC->parse(*this, nullptr, D) : nullptr;
632 }
633 
635  const Decl *D,
636  const Preprocessor *PP) const {
637  if (!D || D->isInvalidDecl())
638  return nullptr;
639  D = &adjustDeclToTemplate(*D);
640 
641  const Decl *Canonical = D->getCanonicalDecl();
642  llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
643  ParsedComments.find(Canonical);
644 
645  if (Pos != ParsedComments.end()) {
646  if (Canonical != D) {
647  comments::FullComment *FC = Pos->second;
649  return CFC;
650  }
651  return Pos->second;
652  }
653 
654  const Decl *OriginalDecl = nullptr;
655 
656  const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
657  if (!RC) {
658  if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
660  const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
661  if (OMD && OMD->isPropertyAccessor())
662  if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
663  if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
664  return cloneFullComment(FC, D);
665  if (OMD)
666  addRedeclaredMethods(OMD, Overridden);
667  getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
668  for (unsigned i = 0, e = Overridden.size(); i < e; i++)
669  if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
670  return cloneFullComment(FC, D);
671  }
672  else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
673  // Attach any tag type's documentation to its typedef if latter
674  // does not have one of its own.
675  QualType QT = TD->getUnderlyingType();
676  if (const auto *TT = QT->getAs<TagType>())
677  if (const Decl *TD = TT->getDecl())
678  if (comments::FullComment *FC = getCommentForDecl(TD, PP))
679  return cloneFullComment(FC, D);
680  }
681  else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
682  while (IC->getSuperClass()) {
683  IC = IC->getSuperClass();
684  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
685  return cloneFullComment(FC, D);
686  }
687  }
688  else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
689  if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
690  if (comments::FullComment *FC = getCommentForDecl(IC, PP))
691  return cloneFullComment(FC, D);
692  }
693  else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
694  if (!(RD = RD->getDefinition()))
695  return nullptr;
696  // Check non-virtual bases.
697  for (const auto &I : RD->bases()) {
698  if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
699  continue;
700  QualType Ty = I.getType();
701  if (Ty.isNull())
702  continue;
703  if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
704  if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
705  continue;
706 
707  if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
708  return cloneFullComment(FC, D);
709  }
710  }
711  // Check virtual bases.
712  for (const auto &I : RD->vbases()) {
713  if (I.getAccessSpecifier() != AS_public)
714  continue;
715  QualType Ty = I.getType();
716  if (Ty.isNull())
717  continue;
718  if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
719  if (!(VirtualBase= VirtualBase->getDefinition()))
720  continue;
721  if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
722  return cloneFullComment(FC, D);
723  }
724  }
725  }
726  return nullptr;
727  }
728 
729  // If the RawComment was attached to other redeclaration of this Decl, we
730  // should parse the comment in context of that other Decl. This is important
731  // because comments can contain references to parameter names which can be
732  // different across redeclarations.
733  if (D != OriginalDecl && OriginalDecl)
734  return getCommentForDecl(OriginalDecl, PP);
735 
736  comments::FullComment *FC = RC->parse(*this, PP, D);
737  ParsedComments[Canonical] = FC;
738  return FC;
739 }
740 
741 void
742 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
743  const ASTContext &C,
744  TemplateTemplateParmDecl *Parm) {
745  ID.AddInteger(Parm->getDepth());
746  ID.AddInteger(Parm->getPosition());
747  ID.AddBoolean(Parm->isParameterPack());
748 
750  ID.AddInteger(Params->size());
752  PEnd = Params->end();
753  P != PEnd; ++P) {
754  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
755  ID.AddInteger(0);
756  ID.AddBoolean(TTP->isParameterPack());
757  const TypeConstraint *TC = TTP->getTypeConstraint();
758  ID.AddBoolean(TC != nullptr);
759  if (TC)
761  /*Canonical=*/true);
762  if (TTP->isExpandedParameterPack()) {
763  ID.AddBoolean(true);
764  ID.AddInteger(TTP->getNumExpansionParameters());
765  } else
766  ID.AddBoolean(false);
767  continue;
768  }
769 
770  if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
771  ID.AddInteger(1);
772  ID.AddBoolean(NTTP->isParameterPack());
773  const Expr *TC = NTTP->getPlaceholderTypeConstraint();
774  ID.AddBoolean(TC != nullptr);
775  ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
776  if (TC)
777  TC->Profile(ID, C, /*Canonical=*/true);
778  if (NTTP->isExpandedParameterPack()) {
779  ID.AddBoolean(true);
780  ID.AddInteger(NTTP->getNumExpansionTypes());
781  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
782  QualType T = NTTP->getExpansionType(I);
783  ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
784  }
785  } else
786  ID.AddBoolean(false);
787  continue;
788  }
789 
790  auto *TTP = cast<TemplateTemplateParmDecl>(*P);
791  ID.AddInteger(2);
792  Profile(ID, C, TTP);
793  }
794  Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause();
795  ID.AddBoolean(RequiresClause != nullptr);
796  if (RequiresClause)
797  RequiresClause->Profile(ID, C, /*Canonical=*/true);
798 }
799 
800 static Expr *
802  QualType ConstrainedType) {
803  // This is a bit ugly - we need to form a new immediately-declared
804  // constraint that references the new parameter; this would ideally
805  // require semantic analysis (e.g. template<C T> struct S {}; - the
806  // converted arguments of C<T> could be an argument pack if C is
807  // declared as template<typename... T> concept C = ...).
808  // We don't have semantic analysis here so we dig deep into the
809  // ready-made constraint expr and change the thing manually.
811  if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
812  CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
813  else
814  CSE = cast<ConceptSpecializationExpr>(IDC);
815  ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
817  NewConverted.reserve(OldConverted.size());
818  if (OldConverted.front().getKind() == TemplateArgument::Pack) {
819  // The case:
820  // template<typename... T> concept C = true;
821  // template<C<int> T> struct S; -> constraint is C<{T, int}>
822  NewConverted.push_back(ConstrainedType);
823  llvm::append_range(NewConverted,
824  OldConverted.front().pack_elements().drop_front(1));
825  TemplateArgument NewPack(NewConverted);
826 
827  NewConverted.clear();
828  NewConverted.push_back(NewPack);
829  assert(OldConverted.size() == 1 &&
830  "Template parameter pack should be the last parameter");
831  } else {
832  assert(OldConverted.front().getKind() == TemplateArgument::Type &&
833  "Unexpected first argument kind for immediately-declared "
834  "constraint");
835  NewConverted.push_back(ConstrainedType);
836  llvm::append_range(NewConverted, OldConverted.drop_front(1));
837  }
839  C, CSE->getNamedConcept()->getDeclContext(),
840  CSE->getNamedConcept()->getLocation(), NewConverted);
841 
843  C, CSE->getNamedConcept(), CSD, nullptr, CSE->isInstantiationDependent(),
845 
846  if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
847  NewIDC = new (C) CXXFoldExpr(
848  OrigFold->getType(), /*Callee*/ nullptr, SourceLocation(), NewIDC,
849  BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr,
850  SourceLocation(), /*NumExpansions=*/std::nullopt);
851  return NewIDC;
852 }
853 
855 ASTContext::getCanonicalTemplateTemplateParmDecl(
856  TemplateTemplateParmDecl *TTP) const {
857  // Check if we already have a canonical template template parameter.
858  llvm::FoldingSetNodeID ID;
859  CanonicalTemplateTemplateParm::Profile(ID, *this, TTP);
860  void *InsertPos = nullptr;
861  CanonicalTemplateTemplateParm *Canonical
862  = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
863  if (Canonical)
864  return Canonical->getParam();
865 
866  // Build a canonical template parameter list.
868  SmallVector<NamedDecl *, 4> CanonParams;
869  CanonParams.reserve(Params->size());
871  PEnd = Params->end();
872  P != PEnd; ++P) {
873  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
876  TTP->getDepth(), TTP->getIndex(), nullptr, false,
877  TTP->isParameterPack(), TTP->hasTypeConstraint(),
879  ? std::optional<unsigned>(TTP->getNumExpansionParameters())
880  : std::nullopt);
881  if (const auto *TC = TTP->getTypeConstraint()) {
882  QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0);
884  *this, TC->getImmediatelyDeclaredConstraint(),
885  ParamAsArgument);
886  NewTTP->setTypeConstraint(
888  DeclarationNameInfo(TC->getNamedConcept()->getDeclName(),
889  SourceLocation()), /*FoundDecl=*/nullptr,
890  // Actually canonicalizing a TemplateArgumentLoc is difficult so we
891  // simply omit the ArgsAsWritten
892  TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
893  }
894  CanonParams.push_back(NewTTP);
895  } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
896  QualType T = getCanonicalType(NTTP->getType());
899  if (NTTP->isExpandedParameterPack()) {
900  SmallVector<QualType, 2> ExpandedTypes;
901  SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
902  for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
903  ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
904  ExpandedTInfos.push_back(
905  getTrivialTypeSourceInfo(ExpandedTypes.back()));
906  }
907 
909  SourceLocation(),
910  SourceLocation(),
911  NTTP->getDepth(),
912  NTTP->getPosition(), nullptr,
913  T,
914  TInfo,
915  ExpandedTypes,
916  ExpandedTInfos);
917  } else {
919  SourceLocation(),
920  SourceLocation(),
921  NTTP->getDepth(),
922  NTTP->getPosition(), nullptr,
923  T,
924  NTTP->isParameterPack(),
925  TInfo);
926  }
927  if (AutoType *AT = T->getContainedAutoType()) {
928  if (AT->isConstrained()) {
931  *this, NTTP->getPlaceholderTypeConstraint(), T));
932  }
933  }
934  CanonParams.push_back(Param);
935 
936  } else
937  CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
938  cast<TemplateTemplateParmDecl>(*P)));
939  }
940 
941  Expr *CanonRequiresClause = nullptr;
942  if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause())
943  CanonRequiresClause = RequiresClause;
944 
945  TemplateTemplateParmDecl *CanonTTP
947  SourceLocation(), TTP->getDepth(),
948  TTP->getPosition(),
949  TTP->isParameterPack(),
950  nullptr,
952  SourceLocation(),
953  CanonParams,
954  SourceLocation(),
955  CanonRequiresClause));
956 
957  // Get the new insert position for the node we care about.
958  Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
959  assert(!Canonical && "Shouldn't be in the map!");
960  (void)Canonical;
961 
962  // Create the canonical template template parameter entry.
963  Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
964  CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
965  return CanonTTP;
966 }
967 
969  auto Kind = getTargetInfo().getCXXABI().getKind();
970  return getLangOpts().CXXABI.value_or(Kind);
971 }
972 
973 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
974  if (!LangOpts.CPlusPlus) return nullptr;
975 
976  switch (getCXXABIKind()) {
977  case TargetCXXABI::AppleARM64:
978  case TargetCXXABI::Fuchsia:
979  case TargetCXXABI::GenericARM: // Same as Itanium at this level
980  case TargetCXXABI::iOS:
981  case TargetCXXABI::WatchOS:
982  case TargetCXXABI::GenericAArch64:
983  case TargetCXXABI::GenericMIPS:
984  case TargetCXXABI::GenericItanium:
985  case TargetCXXABI::WebAssembly:
986  case TargetCXXABI::XL:
987  return CreateItaniumCXXABI(*this);
988  case TargetCXXABI::Microsoft:
989  return CreateMicrosoftCXXABI(*this);
990  }
991  llvm_unreachable("Invalid CXXABI type!");
992 }
993 
995  if (!InterpContext) {
996  InterpContext.reset(new interp::Context(*this));
997  }
998  return *InterpContext.get();
999 }
1000 
1002  if (!ParentMapCtx)
1003  ParentMapCtx.reset(new ParentMapContext(*this));
1004  return *ParentMapCtx.get();
1005 }
1006 
1008  const LangOptions &LangOpts) {
1009  switch (LangOpts.getAddressSpaceMapMangling()) {
1011  return TI.useAddressSpaceMapMangling();
1012  case LangOptions::ASMM_On:
1013  return true;
1014  case LangOptions::ASMM_Off:
1015  return false;
1016  }
1017  llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
1018 }
1019 
1021  IdentifierTable &idents, SelectorTable &sels,
1022  Builtin::Context &builtins, TranslationUnitKind TUKind)
1023  : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
1024  FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
1025  TemplateSpecializationTypes(this_()),
1026  DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
1027  SubstTemplateTemplateParmPacks(this_()),
1028  CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
1029  NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
1030  XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
1031  LangOpts.XRayNeverInstrumentFiles,
1032  LangOpts.XRayAttrListFiles, SM)),
1033  ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
1034  PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
1035  BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
1036  Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
1037  CompCategories(this_()), LastSDM(nullptr, 0) {
1039 }
1040 
1042  // Release the DenseMaps associated with DeclContext objects.
1043  // FIXME: Is this the ideal solution?
1044  ReleaseDeclContextMaps();
1045 
1046  // Call all of the deallocation functions on all of their targets.
1047  for (auto &Pair : Deallocations)
1048  (Pair.first)(Pair.second);
1049  Deallocations.clear();
1050 
1051  // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
1052  // because they can contain DenseMaps.
1053  for (llvm::DenseMap<const ObjCContainerDecl*,
1054  const ASTRecordLayout*>::iterator
1055  I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
1056  // Increment in loop to prevent using deallocated memory.
1057  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
1058  R->Destroy(*this);
1059  ObjCLayouts.clear();
1060 
1061  for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
1062  I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
1063  // Increment in loop to prevent using deallocated memory.
1064  if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
1065  R->Destroy(*this);
1066  }
1067  ASTRecordLayouts.clear();
1068 
1069  for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
1070  AEnd = DeclAttrs.end();
1071  A != AEnd; ++A)
1072  A->second->~AttrVec();
1073  DeclAttrs.clear();
1074 
1075  for (const auto &Value : ModuleInitializers)
1076  Value.second->~PerModuleInitializers();
1077  ModuleInitializers.clear();
1078 }
1079 
1081 
1082 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
1083  TraversalScope = TopLevelDecls;
1085 }
1086 
1087 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
1088  Deallocations.push_back({Callback, Data});
1089 }
1090 
1091 void
1093  ExternalSource = std::move(Source);
1094 }
1095 
1097  llvm::errs() << "\n*** AST Context Stats:\n";
1098  llvm::errs() << " " << Types.size() << " types total.\n";
1099 
1100  unsigned counts[] = {
1101 #define TYPE(Name, Parent) 0,
1102 #define ABSTRACT_TYPE(Name, Parent)
1103 #include "clang/AST/TypeNodes.inc"
1104  0 // Extra
1105  };
1106 
1107  for (unsigned i = 0, e = Types.size(); i != e; ++i) {
1108  Type *T = Types[i];
1109  counts[(unsigned)T->getTypeClass()]++;
1110  }
1111 
1112  unsigned Idx = 0;
1113  unsigned TotalBytes = 0;
1114 #define TYPE(Name, Parent) \
1115  if (counts[Idx]) \
1116  llvm::errs() << " " << counts[Idx] << " " << #Name \
1117  << " types, " << sizeof(Name##Type) << " each " \
1118  << "(" << counts[Idx] * sizeof(Name##Type) \
1119  << " bytes)\n"; \
1120  TotalBytes += counts[Idx] * sizeof(Name##Type); \
1121  ++Idx;
1122 #define ABSTRACT_TYPE(Name, Parent)
1123 #include "clang/AST/TypeNodes.inc"
1124 
1125  llvm::errs() << "Total bytes = " << TotalBytes << "\n";
1126 
1127  // Implicit special member functions.
1128  llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
1130  << " implicit default constructors created\n";
1131  llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
1133  << " implicit copy constructors created\n";
1134  if (getLangOpts().CPlusPlus)
1135  llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
1137  << " implicit move constructors created\n";
1138  llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
1140  << " implicit copy assignment operators created\n";
1141  if (getLangOpts().CPlusPlus)
1142  llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1144  << " implicit move assignment operators created\n";
1145  llvm::errs() << NumImplicitDestructorsDeclared << "/"
1147  << " implicit destructors created\n";
1148 
1149  if (ExternalSource) {
1150  llvm::errs() << "\n";
1151  ExternalSource->PrintStats();
1152  }
1153 
1154  BumpAlloc.PrintStats();
1155 }
1156 
1158  bool NotifyListeners) {
1159  if (NotifyListeners)
1160  if (auto *Listener = getASTMutationListener())
1162 
1163  MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
1164 }
1165 
1167  auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1168  if (It == MergedDefModules.end())
1169  return;
1170 
1171  auto &Merged = It->second;
1173  for (Module *&M : Merged)
1174  if (!Found.insert(M).second)
1175  M = nullptr;
1176  llvm::erase_value(Merged, nullptr);
1177 }
1178 
1181  auto MergedIt =
1182  MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
1183  if (MergedIt == MergedDefModules.end())
1184  return std::nullopt;
1185  return MergedIt->second;
1186 }
1187 
1188 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1189  if (LazyInitializers.empty())
1190  return;
1191 
1192  auto *Source = Ctx.getExternalSource();
1193  assert(Source && "lazy initializers but no external source");
1194 
1195  auto LazyInits = std::move(LazyInitializers);
1196  LazyInitializers.clear();
1197 
1198  for (auto ID : LazyInits)
1199  Initializers.push_back(Source->GetExternalDecl(ID));
1200 
1201  assert(LazyInitializers.empty() &&
1202  "GetExternalDecl for lazy module initializer added more inits");
1203 }
1204 
1206  // One special case: if we add a module initializer that imports another
1207  // module, and that module's only initializer is an ImportDecl, simplify.
1208  if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1209  auto It = ModuleInitializers.find(ID->getImportedModule());
1210 
1211  // Maybe the ImportDecl does nothing at all. (Common case.)
1212  if (It == ModuleInitializers.end())
1213  return;
1214 
1215  // Maybe the ImportDecl only imports another ImportDecl.
1216  auto &Imported = *It->second;
1217  if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1218  Imported.resolve(*this);
1219  auto *OnlyDecl = Imported.Initializers.front();
1220  if (isa<ImportDecl>(OnlyDecl))
1221  D = OnlyDecl;
1222  }
1223  }
1224 
1225  auto *&Inits = ModuleInitializers[M];
1226  if (!Inits)
1227  Inits = new (*this) PerModuleInitializers;
1228  Inits->Initializers.push_back(D);
1229 }
1230 
1232  auto *&Inits = ModuleInitializers[M];
1233  if (!Inits)
1234  Inits = new (*this) PerModuleInitializers;
1235  Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1236  IDs.begin(), IDs.end());
1237 }
1238 
1240  auto It = ModuleInitializers.find(M);
1241  if (It == ModuleInitializers.end())
1242  return std::nullopt;
1243 
1244  auto *Inits = It->second;
1245  Inits->resolve(*this);
1246  return Inits->Initializers;
1247 }
1248 
1250  if (!ExternCContext)
1251  ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1252 
1253  return ExternCContext;
1254 }
1255 
1258  const IdentifierInfo *II) const {
1259  auto *BuiltinTemplate =
1261  BuiltinTemplate->setImplicit();
1262  getTranslationUnitDecl()->addDecl(BuiltinTemplate);
1263 
1264  return BuiltinTemplate;
1265 }
1266 
1269  if (!MakeIntegerSeqDecl)
1270  MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq,
1272  return MakeIntegerSeqDecl;
1273 }
1274 
1277  if (!TypePackElementDecl)
1278  TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element,
1280  return TypePackElementDecl;
1281 }
1282 
1284  RecordDecl::TagKind TK) const {
1285  SourceLocation Loc;
1286  RecordDecl *NewDecl;
1287  if (getLangOpts().CPlusPlus)
1288  NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1289  Loc, &Idents.get(Name));
1290  else
1291  NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1292  &Idents.get(Name));
1293  NewDecl->setImplicit();
1294  NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1295  const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1296  return NewDecl;
1297 }
1298 
1300  StringRef Name) const {
1302  TypedefDecl *NewDecl = TypedefDecl::Create(
1303  const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1304  SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1305  NewDecl->setImplicit();
1306  return NewDecl;
1307 }
1308 
1310  if (!Int128Decl)
1311  Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1312  return Int128Decl;
1313 }
1314 
1316  if (!UInt128Decl)
1317  UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1318  return UInt128Decl;
1319 }
1320 
1321 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1322  auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
1323  R = CanQualType::CreateUnsafe(QualType(Ty, 0));
1324  Types.push_back(Ty);
1325 }
1326 
1328  const TargetInfo *AuxTarget) {
1329  assert((!this->Target || this->Target == &Target) &&
1330  "Incorrect target reinitialization");
1331  assert(VoidTy.isNull() && "Context reinitialized?");
1332 
1333  this->Target = &Target;
1334  this->AuxTarget = AuxTarget;
1335 
1336  ABI.reset(createCXXABI(Target));
1337  AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1338 
1339  // C99 6.2.5p19.
1340  InitBuiltinType(VoidTy, BuiltinType::Void);
1341 
1342  // C99 6.2.5p2.
1343  InitBuiltinType(BoolTy, BuiltinType::Bool);
1344  // C99 6.2.5p3.
1345  if (LangOpts.CharIsSigned)
1346  InitBuiltinType(CharTy, BuiltinType::Char_S);
1347  else
1348  InitBuiltinType(CharTy, BuiltinType::Char_U);
1349  // C99 6.2.5p4.
1350  InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1351  InitBuiltinType(ShortTy, BuiltinType::Short);
1352  InitBuiltinType(IntTy, BuiltinType::Int);
1353  InitBuiltinType(LongTy, BuiltinType::Long);
1354  InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1355 
1356  // C99 6.2.5p6.
1357  InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1358  InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1359  InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1360  InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1361  InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1362 
1363  // C99 6.2.5p10.
1364  InitBuiltinType(FloatTy, BuiltinType::Float);
1365  InitBuiltinType(DoubleTy, BuiltinType::Double);
1366  InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1367 
1368  // GNU extension, __float128 for IEEE quadruple precision
1369  InitBuiltinType(Float128Ty, BuiltinType::Float128);
1370 
1371  // __ibm128 for IBM extended precision
1372  InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128);
1373 
1374  // C11 extension ISO/IEC TS 18661-3
1375  InitBuiltinType(Float16Ty, BuiltinType::Float16);
1376 
1377  // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1378  InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1379  InitBuiltinType(AccumTy, BuiltinType::Accum);
1380  InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1381  InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1382  InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1383  InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1384  InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1385  InitBuiltinType(FractTy, BuiltinType::Fract);
1386  InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1387  InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1388  InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1389  InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1390  InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1391  InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1392  InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1393  InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1394  InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1395  InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1396  InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1397  InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1398  InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1399  InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1400  InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1401  InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1402 
1403  // GNU extension, 128-bit integers.
1404  InitBuiltinType(Int128Ty, BuiltinType::Int128);
1405  InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1406 
1407  // C++ 3.9.1p5
1408  if (TargetInfo::isTypeSigned(Target.getWCharType()))
1409  InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1410  else // -fshort-wchar makes wchar_t be unsigned.
1411  InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1412  if (LangOpts.CPlusPlus && LangOpts.WChar)
1413  WideCharTy = WCharTy;
1414  else {
1415  // C99 (or C++ using -fno-wchar).
1416  WideCharTy = getFromTargetType(Target.getWCharType());
1417  }
1418 
1419  WIntTy = getFromTargetType(Target.getWIntType());
1420 
1421  // C++20 (proposed)
1422  InitBuiltinType(Char8Ty, BuiltinType::Char8);
1423 
1424  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1425  InitBuiltinType(Char16Ty, BuiltinType::Char16);
1426  else // C99
1427  Char16Ty = getFromTargetType(Target.getChar16Type());
1428 
1429  if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1430  InitBuiltinType(Char32Ty, BuiltinType::Char32);
1431  else // C99
1432  Char32Ty = getFromTargetType(Target.getChar32Type());
1433 
1434  // Placeholder type for type-dependent expressions whose type is
1435  // completely unknown. No code should ever check a type against
1436  // DependentTy and users should never see it; however, it is here to
1437  // help diagnose failures to properly check for type-dependent
1438  // expressions.
1439  InitBuiltinType(DependentTy, BuiltinType::Dependent);
1440 
1441  // Placeholder type for functions.
1442  InitBuiltinType(OverloadTy, BuiltinType::Overload);
1443 
1444  // Placeholder type for bound members.
1445  InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1446 
1447  // Placeholder type for pseudo-objects.
1448  InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1449 
1450  // "any" type; useful for debugger-like clients.
1451  InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1452 
1453  // Placeholder type for unbridged ARC casts.
1454  InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1455 
1456  // Placeholder type for builtin functions.
1457  InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1458 
1459  // Placeholder type for OMP array sections.
1460  if (LangOpts.OpenMP) {
1461  InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1462  InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
1463  InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
1464  }
1465  if (LangOpts.MatrixTypes)
1466  InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
1467 
1468  // Builtin types for 'id', 'Class', and 'SEL'.
1469  InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1470  InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1471  InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1472 
1473  if (LangOpts.OpenCL) {
1474 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1475  InitBuiltinType(SingletonId, BuiltinType::Id);
1476 #include "clang/Basic/OpenCLImageTypes.def"
1477 
1478  InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1479  InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1480  InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1481  InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1482  InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1483 
1484 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1485  InitBuiltinType(Id##Ty, BuiltinType::Id);
1486 #include "clang/Basic/OpenCLExtensionTypes.def"
1487  }
1488 
1489  if (Target.hasAArch64SVETypes()) {
1490 #define SVE_TYPE(Name, Id, SingletonId) \
1491  InitBuiltinType(SingletonId, BuiltinType::Id);
1492 #include "clang/Basic/AArch64SVEACLETypes.def"
1493  }
1494 
1495  if (Target.getTriple().isPPC64()) {
1496 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1497  InitBuiltinType(Id##Ty, BuiltinType::Id);
1498 #include "clang/Basic/PPCTypes.def"
1499 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1500  InitBuiltinType(Id##Ty, BuiltinType::Id);
1501 #include "clang/Basic/PPCTypes.def"
1502  }
1503 
1504  if (Target.hasRISCVVTypes()) {
1505 #define RVV_TYPE(Name, Id, SingletonId) \
1506  InitBuiltinType(SingletonId, BuiltinType::Id);
1507 #include "clang/Basic/RISCVVTypes.def"
1508  }
1509 
1510  // Builtin type for __objc_yes and __objc_no
1511  ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1512  SignedCharTy : BoolTy);
1513 
1514  ObjCConstantStringType = QualType();
1515 
1516  ObjCSuperType = QualType();
1517 
1518  // void * type
1519  if (LangOpts.OpenCLGenericAddressSpace) {
1520  auto Q = VoidTy.getQualifiers();
1524  } else {
1526  }
1527 
1528  // nullptr type (C++0x 2.14.7)
1529  InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1530 
1531  // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1532  InitBuiltinType(HalfTy, BuiltinType::Half);
1533 
1534  InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
1535 
1536  // Builtin type used to help define __builtin_va_list.
1537  VaListTagDecl = nullptr;
1538 
1539  // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1540  if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1543  }
1544 }
1545 
1547  return SourceMgr.getDiagnostics();
1548 }
1549 
1551  AttrVec *&Result = DeclAttrs[D];
1552  if (!Result) {
1553  void *Mem = Allocate(sizeof(AttrVec));
1554  Result = new (Mem) AttrVec;
1555  }
1556 
1557  return *Result;
1558 }
1559 
1560 /// Erase the attributes corresponding to the given declaration.
1562  llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1563  if (Pos != DeclAttrs.end()) {
1564  Pos->second->~AttrVec();
1565  DeclAttrs.erase(Pos);
1566  }
1567 }
1568 
1569 // FIXME: Remove ?
1572  assert(Var->isStaticDataMember() && "Not a static data member");
1574  .dyn_cast<MemberSpecializationInfo *>();
1575 }
1576 
1579  llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1580  TemplateOrInstantiation.find(Var);
1581  if (Pos == TemplateOrInstantiation.end())
1582  return {};
1583 
1584  return Pos->second;
1585 }
1586 
1587 void
1590  SourceLocation PointOfInstantiation) {
1591  assert(Inst->isStaticDataMember() && "Not a static data member");
1592  assert(Tmpl->isStaticDataMember() && "Not a static data member");
1594  Tmpl, TSK, PointOfInstantiation));
1595 }
1596 
1597 void
1600  assert(!TemplateOrInstantiation[Inst] &&
1601  "Already noted what the variable was instantiated from");
1602  TemplateOrInstantiation[Inst] = TSI;
1603 }
1604 
1605 NamedDecl *
1607  auto Pos = InstantiatedFromUsingDecl.find(UUD);
1608  if (Pos == InstantiatedFromUsingDecl.end())
1609  return nullptr;
1610 
1611  return Pos->second;
1612 }
1613 
1614 void
1616  assert((isa<UsingDecl>(Pattern) ||
1617  isa<UnresolvedUsingValueDecl>(Pattern) ||
1618  isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1619  "pattern decl is not a using decl");
1620  assert((isa<UsingDecl>(Inst) ||
1621  isa<UnresolvedUsingValueDecl>(Inst) ||
1622  isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1623  "instantiation did not produce a using decl");
1624  assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1625  InstantiatedFromUsingDecl[Inst] = Pattern;
1626 }
1627 
1628 UsingEnumDecl *
1630  auto Pos = InstantiatedFromUsingEnumDecl.find(UUD);
1631  if (Pos == InstantiatedFromUsingEnumDecl.end())
1632  return nullptr;
1633 
1634  return Pos->second;
1635 }
1636 
1638  UsingEnumDecl *Pattern) {
1639  assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1640  InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1641 }
1642 
1645  llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
1646  = InstantiatedFromUsingShadowDecl.find(Inst);
1647  if (Pos == InstantiatedFromUsingShadowDecl.end())
1648  return nullptr;
1649 
1650  return Pos->second;
1651 }
1652 
1653 void
1655  UsingShadowDecl *Pattern) {
1656  assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1657  InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1658 }
1659 
1661  llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
1662  = InstantiatedFromUnnamedFieldDecl.find(Field);
1663  if (Pos == InstantiatedFromUnnamedFieldDecl.end())
1664  return nullptr;
1665 
1666  return Pos->second;
1667 }
1668 
1670  FieldDecl *Tmpl) {
1671  assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1672  assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1673  assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1674  "Already noted what unnamed field was instantiated from");
1675 
1676  InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1677 }
1678 
1681  return overridden_methods(Method).begin();
1682 }
1683 
1686  return overridden_methods(Method).end();
1687 }
1688 
1689 unsigned
1691  auto Range = overridden_methods(Method);
1692  return Range.end() - Range.begin();
1693 }
1694 
1697  llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1698  OverriddenMethods.find(Method->getCanonicalDecl());
1699  if (Pos == OverriddenMethods.end())
1700  return overridden_method_range(nullptr, nullptr);
1701  return overridden_method_range(Pos->second.begin(), Pos->second.end());
1702 }
1703 
1705  const CXXMethodDecl *Overridden) {
1706  assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1707  OverriddenMethods[Method].push_back(Overridden);
1708 }
1709 
1711  const NamedDecl *D,
1712  SmallVectorImpl<const NamedDecl *> &Overridden) const {
1713  assert(D);
1714 
1715  if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1716  Overridden.append(overridden_methods_begin(CXXMethod),
1717  overridden_methods_end(CXXMethod));
1718  return;
1719  }
1720 
1721  const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1722  if (!Method)
1723  return;
1724 
1726  Method->getOverriddenMethods(OverDecls);
1727  Overridden.append(OverDecls.begin(), OverDecls.end());
1728 }
1729 
1731  assert(!Import->getNextLocalImport() &&
1732  "Import declaration already in the chain");
1733  assert(!Import->isFromASTFile() && "Non-local import declaration");
1734  if (!FirstLocalImport) {
1735  FirstLocalImport = Import;
1736  LastLocalImport = Import;
1737  return;
1738  }
1739 
1740  LastLocalImport->setNextLocalImport(Import);
1741  LastLocalImport = Import;
1742 }
1743 
1744 //===----------------------------------------------------------------------===//
1745 // Type Sizing and Analysis
1746 //===----------------------------------------------------------------------===//
1747 
1748 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1749 /// scalar floating point type.
1750 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1751  switch (T->castAs<BuiltinType>()->getKind()) {
1752  default:
1753  llvm_unreachable("Not a floating point type!");
1754  case BuiltinType::BFloat16:
1755  return Target->getBFloat16Format();
1756  case BuiltinType::Float16:
1757  return Target->getHalfFormat();
1758  case BuiltinType::Half:
1759  // For HLSL, when the native half type is disabled, half will be treat as
1760  // float.
1761  if (getLangOpts().HLSL)
1762  if (getLangOpts().NativeHalfType)
1763  return Target->getHalfFormat();
1764  else
1765  return Target->getFloatFormat();
1766  else
1767  return Target->getHalfFormat();
1768  case BuiltinType::Float: return Target->getFloatFormat();
1769  case BuiltinType::Double: return Target->getDoubleFormat();
1770  case BuiltinType::Ibm128:
1771  return Target->getIbm128Format();
1772  case BuiltinType::LongDouble:
1773  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1774  return AuxTarget->getLongDoubleFormat();
1775  return Target->getLongDoubleFormat();
1776  case BuiltinType::Float128:
1777  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
1778  return AuxTarget->getFloat128Format();
1779  return Target->getFloat128Format();
1780  }
1781 }
1782 
1783 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1784  unsigned Align = Target->getCharWidth();
1785 
1786  bool UseAlignAttrOnly = false;
1787  if (unsigned AlignFromAttr = D->getMaxAlignment()) {
1788  Align = AlignFromAttr;
1789 
1790  // __attribute__((aligned)) can increase or decrease alignment
1791  // *except* on a struct or struct member, where it only increases
1792  // alignment unless 'packed' is also specified.
1793  //
1794  // It is an error for alignas to decrease alignment, so we can
1795  // ignore that possibility; Sema should diagnose it.
1796  if (isa<FieldDecl>(D)) {
1797  UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
1798  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1799  } else {
1800  UseAlignAttrOnly = true;
1801  }
1802  }
1803  else if (isa<FieldDecl>(D))
1804  UseAlignAttrOnly =
1805  D->hasAttr<PackedAttr>() ||
1806  cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
1807 
1808  // If we're using the align attribute only, just ignore everything
1809  // else about the declaration and its type.
1810  if (UseAlignAttrOnly) {
1811  // do nothing
1812  } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1813  QualType T = VD->getType();
1814  if (const auto *RT = T->getAs<ReferenceType>()) {
1815  if (ForAlignof)
1816  T = RT->getPointeeType();
1817  else
1818  T = getPointerType(RT->getPointeeType());
1819  }
1820  QualType BaseT = getBaseElementType(T);
1821  if (T->isFunctionType())
1822  Align = getTypeInfoImpl(T.getTypePtr()).Align;
1823  else if (!BaseT->isIncompleteType()) {
1824  // Adjust alignments of declarations with array type by the
1825  // large-array alignment on the target.
1826  if (const ArrayType *arrayType = getAsArrayType(T)) {
1827  unsigned MinWidth = Target->getLargeArrayMinWidth();
1828  if (!ForAlignof && MinWidth) {
1829  if (isa<VariableArrayType>(arrayType))
1830  Align = std::max(Align, Target->getLargeArrayAlign());
1831  else if (isa<ConstantArrayType>(arrayType) &&
1832  MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1833  Align = std::max(Align, Target->getLargeArrayAlign());
1834  }
1835  }
1836  Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1837  if (BaseT.getQualifiers().hasUnaligned())
1838  Align = Target->getCharWidth();
1839  if (const auto *VD = dyn_cast<VarDecl>(D)) {
1840  if (VD->hasGlobalStorage() && !ForAlignof) {
1841  uint64_t TypeSize = getTypeSize(T.getTypePtr());
1842  Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
1843  }
1844  }
1845  }
1846 
1847  // Fields can be subject to extra alignment constraints, like if
1848  // the field is packed, the struct is packed, or the struct has a
1849  // a max-field-alignment constraint (#pragma pack). So calculate
1850  // the actual alignment of the field within the struct, and then
1851  // (as we're expected to) constrain that by the alignment of the type.
1852  if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1853  const RecordDecl *Parent = Field->getParent();
1854  // We can only produce a sensible answer if the record is valid.
1855  if (!Parent->isInvalidDecl()) {
1856  const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1857 
1858  // Start with the record's overall alignment.
1859  unsigned FieldAlign = toBits(Layout.getAlignment());
1860 
1861  // Use the GCD of that and the offset within the record.
1862  uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1863  if (Offset > 0) {
1864  // Alignment is always a power of 2, so the GCD will be a power of 2,
1865  // which means we get to do this crazy thing instead of Euclid's.
1866  uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1867  if (LowBitOfOffset < FieldAlign)
1868  FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1869  }
1870 
1871  Align = std::min(Align, FieldAlign);
1872  }
1873  }
1874  }
1875 
1876  // Some targets have hard limitation on the maximum requestable alignment in
1877  // aligned attribute for static variables.
1878  const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1879  const auto *VD = dyn_cast<VarDecl>(D);
1880  if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1881  Align = std::min(Align, MaxAlignedAttr);
1882 
1883  return toCharUnitsFromBits(Align);
1884 }
1885 
1887  return toCharUnitsFromBits(Target->getExnObjectAlignment());
1888 }
1889 
1890 // getTypeInfoDataSizeInChars - Return the size of a type, in
1891 // chars. If the type is a record, its data size is returned. This is
1892 // the size of the memcpy that's performed when assigning this type
1893 // using a trivial copy/move assignment operator.
1896 
1897  // In C++, objects can sometimes be allocated into the tail padding
1898  // of a base-class subobject. We decide whether that's possible
1899  // during class layout, so here we can just trust the layout results.
1900  if (getLangOpts().CPlusPlus) {
1901  if (const auto *RT = T->getAs<RecordType>()) {
1902  const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1903  Info.Width = layout.getDataSize();
1904  }
1905  }
1906 
1907  return Info;
1908 }
1909 
1910 /// getConstantArrayInfoInChars - Performing the computation in CharUnits
1911 /// instead of in bits prevents overflowing the uint64_t for some large arrays.
1914  const ConstantArrayType *CAT) {
1915  TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
1916  uint64_t Size = CAT->getSize().getZExtValue();
1917  assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1918  (uint64_t)(-1)/Size) &&
1919  "Overflow in array type char size evaluation");
1920  uint64_t Width = EltInfo.Width.getQuantity() * Size;
1921  unsigned Align = EltInfo.Align.getQuantity();
1922  if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1924  Width = llvm::alignTo(Width, Align);
1925  return TypeInfoChars(CharUnits::fromQuantity(Width),
1926  CharUnits::fromQuantity(Align),
1927  EltInfo.AlignRequirement);
1928 }
1929 
1931  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1932  return getConstantArrayInfoInChars(*this, CAT);
1933  TypeInfo Info = getTypeInfo(T);
1936 }
1937 
1939  return getTypeInfoInChars(T.getTypePtr());
1940 }
1941 
1943  // HLSL doesn't promote all small integer types to int, it
1944  // just uses the rank-based promotion rules for all types.
1945  if (getLangOpts().HLSL)
1946  return false;
1947 
1948  if (const auto *BT = T->getAs<BuiltinType>())
1949  switch (BT->getKind()) {
1950  case BuiltinType::Bool:
1951  case BuiltinType::Char_S:
1952  case BuiltinType::Char_U:
1953  case BuiltinType::SChar:
1954  case BuiltinType::UChar:
1955  case BuiltinType::Short:
1956  case BuiltinType::UShort:
1957  case BuiltinType::WChar_S:
1958  case BuiltinType::WChar_U:
1959  case BuiltinType::Char8:
1960  case BuiltinType::Char16:
1961  case BuiltinType::Char32:
1962  return true;
1963  default:
1964  return false;
1965  }
1966 
1967  // Enumerated types are promotable to their compatible integer types
1968  // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1969  if (const auto *ET = T->getAs<EnumType>()) {
1970  if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
1971  ET->getDecl()->isScoped())
1972  return false;
1973 
1974  return true;
1975  }
1976 
1977  return false;
1978 }
1979 
1982 }
1983 
1985  return isAlignmentRequired(T.getTypePtr());
1986 }
1987 
1989  bool NeedsPreferredAlignment) const {
1990  // An alignment on a typedef overrides anything else.
1991  if (const auto *TT = T->getAs<TypedefType>())
1992  if (unsigned Align = TT->getDecl()->getMaxAlignment())
1993  return Align;
1994 
1995  // If we have an (array of) complete type, we're done.
1996  T = getBaseElementType(T);
1997  if (!T->isIncompleteType())
1998  return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1999 
2000  // If we had an array type, its element type might be a typedef
2001  // type with an alignment attribute.
2002  if (const auto *TT = T->getAs<TypedefType>())
2003  if (unsigned Align = TT->getDecl()->getMaxAlignment())
2004  return Align;
2005 
2006  // Otherwise, see if the declaration of the type had an attribute.
2007  if (const auto *TT = T->getAs<TagType>())
2008  return TT->getDecl()->getMaxAlignment();
2009 
2010  return 0;
2011 }
2012 
2014  TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
2015  if (I != MemoizedTypeInfo.end())
2016  return I->second;
2017 
2018  // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
2019  TypeInfo TI = getTypeInfoImpl(T);
2020  MemoizedTypeInfo[T] = TI;
2021  return TI;
2022 }
2023 
2024 /// getTypeInfoImpl - Return the size of the specified type, in bits. This
2025 /// method does not work on incomplete types.
2026 ///
2027 /// FIXME: Pointers into different addr spaces could have different sizes and
2028 /// alignment requirements: getPointerInfo should take an AddrSpace, this
2029 /// should take a QualType, &c.
2030 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
2031  uint64_t Width = 0;
2032  unsigned Align = 8;
2034  LangAS AS = LangAS::Default;
2035  switch (T->getTypeClass()) {
2036 #define TYPE(Class, Base)
2037 #define ABSTRACT_TYPE(Class, Base)
2038 #define NON_CANONICAL_TYPE(Class, Base)
2039 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2040 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
2041  case Type::Class: \
2042  assert(!T->isDependentType() && "should not see dependent types here"); \
2043  return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
2044 #include "clang/AST/TypeNodes.inc"
2045  llvm_unreachable("Should not see dependent types");
2046 
2047  case Type::FunctionNoProto:
2048  case Type::FunctionProto:
2049  // GCC extension: alignof(function) = 32 bits
2050  Width = 0;
2051  Align = 32;
2052  break;
2053 
2054  case Type::IncompleteArray:
2055  case Type::VariableArray:
2056  case Type::ConstantArray: {
2057  // Model non-constant sized arrays as size zero, but track the alignment.
2058  uint64_t Size = 0;
2059  if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
2060  Size = CAT->getSize().getZExtValue();
2061 
2062  TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
2063  assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
2064  "Overflow in array type bit size evaluation");
2065  Width = EltInfo.Width * Size;
2066  Align = EltInfo.Align;
2067  AlignRequirement = EltInfo.AlignRequirement;
2068  if (!getTargetInfo().getCXXABI().isMicrosoft() ||
2069  getTargetInfo().getPointerWidth(LangAS::Default) == 64)
2070  Width = llvm::alignTo(Width, Align);
2071  break;
2072  }
2073 
2074  case Type::ExtVector:
2075  case Type::Vector: {
2076  const auto *VT = cast<VectorType>(T);
2077  TypeInfo EltInfo = getTypeInfo(VT->getElementType());
2078  Width = VT->isExtVectorBoolType() ? VT->getNumElements()
2079  : EltInfo.Width * VT->getNumElements();
2080  // Enforce at least byte size and alignment.
2081  Width = std::max<unsigned>(8, Width);
2082  Align = std::max<unsigned>(8, Width);
2083 
2084  // If the alignment is not a power of 2, round up to the next power of 2.
2085  // This happens for non-power-of-2 length vectors.
2086  if (Align & (Align-1)) {
2087  Align = llvm::NextPowerOf2(Align);
2088  Width = llvm::alignTo(Width, Align);
2089  }
2090  // Adjust the alignment based on the target max.
2091  uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2092  if (TargetVectorAlign && TargetVectorAlign < Align)
2093  Align = TargetVectorAlign;
2094  if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
2095  // Adjust the alignment for fixed-length SVE vectors. This is important
2096  // for non-power-of-2 vector lengths.
2097  Align = 128;
2098  else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
2099  // Adjust the alignment for fixed-length SVE predicates.
2100  Align = 16;
2101  break;
2102  }
2103 
2104  case Type::ConstantMatrix: {
2105  const auto *MT = cast<ConstantMatrixType>(T);
2106  TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
2107  // The internal layout of a matrix value is implementation defined.
2108  // Initially be ABI compatible with arrays with respect to alignment and
2109  // size.
2110  Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2111  Align = ElementInfo.Align;
2112  break;
2113  }
2114 
2115  case Type::Builtin:
2116  switch (cast<BuiltinType>(T)->getKind()) {
2117  default: llvm_unreachable("Unknown builtin type!");
2118  case BuiltinType::Void:
2119  // GCC extension: alignof(void) = 8 bits.
2120  Width = 0;
2121  Align = 8;
2122  break;
2123  case BuiltinType::Bool:
2124  Width = Target->getBoolWidth();
2125  Align = Target->getBoolAlign();
2126  break;
2127  case BuiltinType::Char_S:
2128  case BuiltinType::Char_U:
2129  case BuiltinType::UChar:
2130  case BuiltinType::SChar:
2131  case BuiltinType::Char8:
2132  Width = Target->getCharWidth();
2133  Align = Target->getCharAlign();
2134  break;
2135  case BuiltinType::WChar_S:
2136  case BuiltinType::WChar_U:
2137  Width = Target->getWCharWidth();
2138  Align = Target->getWCharAlign();
2139  break;
2140  case BuiltinType::Char16:
2141  Width = Target->getChar16Width();
2142  Align = Target->getChar16Align();
2143  break;
2144  case BuiltinType::Char32:
2145  Width = Target->getChar32Width();
2146  Align = Target->getChar32Align();
2147  break;
2148  case BuiltinType::UShort:
2149  case BuiltinType::Short:
2150  Width = Target->getShortWidth();
2151  Align = Target->getShortAlign();
2152  break;
2153  case BuiltinType::UInt:
2154  case BuiltinType::Int:
2155  Width = Target->getIntWidth();
2156  Align = Target->getIntAlign();
2157  break;
2158  case BuiltinType::ULong:
2159  case BuiltinType::Long:
2160  Width = Target->getLongWidth();
2161  Align = Target->getLongAlign();
2162  break;
2163  case BuiltinType::ULongLong:
2164  case BuiltinType::LongLong:
2165  Width = Target->getLongLongWidth();
2166  Align = Target->getLongLongAlign();
2167  break;
2168  case BuiltinType::Int128:
2169  case BuiltinType::UInt128:
2170  Width = 128;
2171  Align = Target->getInt128Align();
2172  break;
2173  case BuiltinType::ShortAccum:
2174  case BuiltinType::UShortAccum:
2175  case BuiltinType::SatShortAccum:
2176  case BuiltinType::SatUShortAccum:
2177  Width = Target->getShortAccumWidth();
2178  Align = Target->getShortAccumAlign();
2179  break;
2180  case BuiltinType::Accum:
2181  case BuiltinType::UAccum:
2182  case BuiltinType::SatAccum:
2183  case BuiltinType::SatUAccum:
2184  Width = Target->getAccumWidth();
2185  Align = Target->getAccumAlign();
2186  break;
2187  case BuiltinType::LongAccum:
2188  case BuiltinType::ULongAccum:
2189  case BuiltinType::SatLongAccum:
2190  case BuiltinType::SatULongAccum:
2191  Width = Target->getLongAccumWidth();
2192  Align = Target->getLongAccumAlign();
2193  break;
2194  case BuiltinType::ShortFract:
2195  case BuiltinType::UShortFract:
2196  case BuiltinType::SatShortFract:
2197  case BuiltinType::SatUShortFract:
2198  Width = Target->getShortFractWidth();
2199  Align = Target->getShortFractAlign();
2200  break;
2201  case BuiltinType::Fract:
2202  case BuiltinType::UFract:
2203  case BuiltinType::SatFract:
2204  case BuiltinType::SatUFract:
2205  Width = Target->getFractWidth();
2206  Align = Target->getFractAlign();
2207  break;
2208  case BuiltinType::LongFract:
2209  case BuiltinType::ULongFract:
2210  case BuiltinType::SatLongFract:
2211  case BuiltinType::SatULongFract:
2212  Width = Target->getLongFractWidth();
2213  Align = Target->getLongFractAlign();
2214  break;
2215  case BuiltinType::BFloat16:
2216  if (Target->hasBFloat16Type()) {
2217  Width = Target->getBFloat16Width();
2218  Align = Target->getBFloat16Align();
2219  } else if ((getLangOpts().SYCLIsDevice ||
2220  (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)) &&
2221  AuxTarget->hasBFloat16Type()) {
2222  Width = AuxTarget->getBFloat16Width();
2223  Align = AuxTarget->getBFloat16Align();
2224  }
2225  break;
2226  case BuiltinType::Float16:
2227  case BuiltinType::Half:
2228  if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2229  !getLangOpts().OpenMPIsDevice) {
2230  Width = Target->getHalfWidth();
2231  Align = Target->getHalfAlign();
2232  } else {
2233  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
2234  "Expected OpenMP device compilation.");
2235  Width = AuxTarget->getHalfWidth();
2236  Align = AuxTarget->getHalfAlign();
2237  }
2238  break;
2239  case BuiltinType::Float:
2240  Width = Target->getFloatWidth();
2241  Align = Target->getFloatAlign();
2242  break;
2243  case BuiltinType::Double:
2244  Width = Target->getDoubleWidth();
2245  Align = Target->getDoubleAlign();
2246  break;
2247  case BuiltinType::Ibm128:
2248  Width = Target->getIbm128Width();
2249  Align = Target->getIbm128Align();
2250  break;
2251  case BuiltinType::LongDouble:
2252  if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
2253  (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2254  Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2255  Width = AuxTarget->getLongDoubleWidth();
2256  Align = AuxTarget->getLongDoubleAlign();
2257  } else {
2258  Width = Target->getLongDoubleWidth();
2259  Align = Target->getLongDoubleAlign();
2260  }
2261  break;
2262  case BuiltinType::Float128:
2263  if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2264  !getLangOpts().OpenMPIsDevice) {
2265  Width = Target->getFloat128Width();
2266  Align = Target->getFloat128Align();
2267  } else {
2268  assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
2269  "Expected OpenMP device compilation.");
2270  Width = AuxTarget->getFloat128Width();
2271  Align = AuxTarget->getFloat128Align();
2272  }
2273  break;
2274  case BuiltinType::NullPtr:
2275  // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2276  Width = Target->getPointerWidth(LangAS::Default);
2277  Align = Target->getPointerAlign(LangAS::Default);
2278  break;
2279  case BuiltinType::ObjCId:
2280  case BuiltinType::ObjCClass:
2281  case BuiltinType::ObjCSel:
2282  Width = Target->getPointerWidth(LangAS::Default);
2283  Align = Target->getPointerAlign(LangAS::Default);
2284  break;
2285  case BuiltinType::OCLSampler:
2286  case BuiltinType::OCLEvent:
2287  case BuiltinType::OCLClkEvent:
2288  case BuiltinType::OCLQueue:
2289  case BuiltinType::OCLReserveID:
2290 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2291  case BuiltinType::Id:
2292 #include "clang/Basic/OpenCLImageTypes.def"
2293 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2294  case BuiltinType::Id:
2295 #include "clang/Basic/OpenCLExtensionTypes.def"
2296  AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T));
2297  Width = Target->getPointerWidth(AS);
2298  Align = Target->getPointerAlign(AS);
2299  break;
2300  // The SVE types are effectively target-specific. The length of an
2301  // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2302  // of 128 bits. There is one predicate bit for each vector byte, so the
2303  // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2304  //
2305  // Because the length is only known at runtime, we use a dummy value
2306  // of 0 for the static length. The alignment values are those defined
2307  // by the Procedure Call Standard for the Arm Architecture.
2308 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
2309  IsSigned, IsFP, IsBF) \
2310  case BuiltinType::Id: \
2311  Width = 0; \
2312  Align = 128; \
2313  break;
2314 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
2315  case BuiltinType::Id: \
2316  Width = 0; \
2317  Align = 16; \
2318  break;
2319 #include "clang/Basic/AArch64SVEACLETypes.def"
2320 #define PPC_VECTOR_TYPE(Name, Id, Size) \
2321  case BuiltinType::Id: \
2322  Width = Size; \
2323  Align = Size; \
2324  break;
2325 #include "clang/Basic/PPCTypes.def"
2326 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2327  IsFP) \
2328  case BuiltinType::Id: \
2329  Width = 0; \
2330  Align = ElBits; \
2331  break;
2332 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2333  case BuiltinType::Id: \
2334  Width = 0; \
2335  Align = 8; \
2336  break;
2337 #include "clang/Basic/RISCVVTypes.def"
2338  }
2339  break;
2340  case Type::ObjCObjectPointer:
2341  Width = Target->getPointerWidth(LangAS::Default);
2342  Align = Target->getPointerAlign(LangAS::Default);
2343  break;
2344  case Type::BlockPointer:
2345  AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace();
2346  Width = Target->getPointerWidth(AS);
2347  Align = Target->getPointerAlign(AS);
2348  break;
2349  case Type::LValueReference:
2350  case Type::RValueReference:
2351  // alignof and sizeof should never enter this code path here, so we go
2352  // the pointer route.
2353  AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace();
2354  Width = Target->getPointerWidth(AS);
2355  Align = Target->getPointerAlign(AS);
2356  break;
2357  case Type::Pointer:
2358  AS = cast<PointerType>(T)->getPointeeType().getAddressSpace();
2359  Width = Target->getPointerWidth(AS);
2360  Align = Target->getPointerAlign(AS);
2361  break;
2362  case Type::MemberPointer: {
2363  const auto *MPT = cast<MemberPointerType>(T);
2364  CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2365  Width = MPI.Width;
2366  Align = MPI.Align;
2367  break;
2368  }
2369  case Type::Complex: {
2370  // Complex types have the same alignment as their elements, but twice the
2371  // size.
2372  TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2373  Width = EltInfo.Width * 2;
2374  Align = EltInfo.Align;
2375  break;
2376  }
2377  case Type::ObjCObject:
2378  return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2379  case Type::Adjusted:
2380  case Type::Decayed:
2381  return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2382  case Type::ObjCInterface: {
2383  const auto *ObjCI = cast<ObjCInterfaceType>(T);
2384  if (ObjCI->getDecl()->isInvalidDecl()) {
2385  Width = 8;
2386  Align = 8;
2387  break;
2388  }
2389  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2390  Width = toBits(Layout.getSize());
2391  Align = toBits(Layout.getAlignment());
2392  break;
2393  }
2394  case Type::BitInt: {
2395  const auto *EIT = cast<BitIntType>(T);
2396  Align =
2397  std::min(static_cast<unsigned>(std::max(
2398  getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
2399  Target->getLongLongAlign());
2400  Width = llvm::alignTo(EIT->getNumBits(), Align);
2401  break;
2402  }
2403  case Type::Record:
2404  case Type::Enum: {
2405  const auto *TT = cast<TagType>(T);
2406 
2407  if (TT->getDecl()->isInvalidDecl()) {
2408  Width = 8;
2409  Align = 8;
2410  break;
2411  }
2412 
2413  if (const auto *ET = dyn_cast<EnumType>(TT)) {
2414  const EnumDecl *ED = ET->getDecl();
2415  TypeInfo Info =
2417  if (unsigned AttrAlign = ED->getMaxAlignment()) {
2418  Info.Align = AttrAlign;
2420  }
2421  return Info;
2422  }
2423 
2424  const auto *RT = cast<RecordType>(TT);
2425  const RecordDecl *RD = RT->getDecl();
2426  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2427  Width = toBits(Layout.getSize());
2428  Align = toBits(Layout.getAlignment());
2429  AlignRequirement = RD->hasAttr<AlignedAttr>()
2432  break;
2433  }
2434 
2435  case Type::SubstTemplateTypeParm:
2436  return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2437  getReplacementType().getTypePtr());
2438 
2439  case Type::Auto:
2440  case Type::DeducedTemplateSpecialization: {
2441  const auto *A = cast<DeducedType>(T);
2442  assert(!A->getDeducedType().isNull() &&
2443  "cannot request the size of an undeduced or dependent auto type");
2444  return getTypeInfo(A->getDeducedType().getTypePtr());
2445  }
2446 
2447  case Type::Paren:
2448  return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2449 
2450  case Type::MacroQualified:
2451  return getTypeInfo(
2452  cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
2453 
2454  case Type::ObjCTypeParam:
2455  return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2456 
2457  case Type::Using:
2458  return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr());
2459 
2460  case Type::Typedef: {
2461  const auto *TT = cast<TypedefType>(T);
2462  TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr());
2463  // If the typedef has an aligned attribute on it, it overrides any computed
2464  // alignment we have. This violates the GCC documentation (which says that
2465  // attribute(aligned) can only round up) but matches its implementation.
2466  if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2467  Align = AttrAlign;
2468  AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2469  } else {
2470  Align = Info.Align;
2471  AlignRequirement = Info.AlignRequirement;
2472  }
2473  Width = Info.Width;
2474  break;
2475  }
2476 
2477  case Type::Elaborated:
2478  return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2479 
2480  case Type::Attributed:
2481  return getTypeInfo(
2482  cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2483 
2484  case Type::BTFTagAttributed:
2485  return getTypeInfo(
2486  cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
2487 
2488  case Type::Atomic: {
2489  // Start with the base type information.
2490  TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2491  Width = Info.Width;
2492  Align = Info.Align;
2493 
2494  if (!Width) {
2495  // An otherwise zero-sized type should still generate an
2496  // atomic operation.
2497  Width = Target->getCharWidth();
2498  assert(Align);
2499  } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2500  // If the size of the type doesn't exceed the platform's max
2501  // atomic promotion width, make the size and alignment more
2502  // favorable to atomic operations:
2503 
2504  // Round the size up to a power of 2.
2505  if (!llvm::isPowerOf2_64(Width))
2506  Width = llvm::NextPowerOf2(Width);
2507 
2508  // Set the alignment equal to the size.
2509  Align = static_cast<unsigned>(Width);
2510  }
2511  }
2512  break;
2513 
2514  case Type::Pipe:
2515  Width = Target->getPointerWidth(LangAS::opencl_global);
2516  Align = Target->getPointerAlign(LangAS::opencl_global);
2517  break;
2518  }
2519 
2520  assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2521  return TypeInfo(Width, Align, AlignRequirement);
2522 }
2523 
2524 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2525  UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2526  if (I != MemoizedUnadjustedAlign.end())
2527  return I->second;
2528 
2529  unsigned UnadjustedAlign;
2530  if (const auto *RT = T->getAs<RecordType>()) {
2531  const RecordDecl *RD = RT->getDecl();
2532  const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2533  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2534  } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2535  const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2536  UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2537  } else {
2538  UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
2539  }
2540 
2541  MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2542  return UnadjustedAlign;
2543 }
2544 
2546  unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2547  getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap);
2548  return SimdAlign;
2549 }
2550 
2551 /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2553  return CharUnits::fromQuantity(BitSize / getCharWidth());
2554 }
2555 
2556 /// toBits - Convert a size in characters to a size in characters.
2558  return CharSize.getQuantity() * getCharWidth();
2559 }
2560 
2561 /// getTypeSizeInChars - Return the size of the specified type, in characters.
2562 /// This method does not work on incomplete types.
2564  return getTypeInfoInChars(T).Width;
2565 }
2567  return getTypeInfoInChars(T).Width;
2568 }
2569 
2570 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2571 /// characters. This method does not work on incomplete types.
2573  return toCharUnitsFromBits(getTypeAlign(T));
2574 }
2576  return toCharUnitsFromBits(getTypeAlign(T));
2577 }
2578 
2579 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2580 /// type, in characters, before alignment adjustments. This method does
2581 /// not work on incomplete types.
2584 }
2587 }
2588 
2589 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2590 /// type for the current target in bits. This can be different than the ABI
2591 /// alignment in cases where it is beneficial for performance or backwards
2592 /// compatibility preserving to overalign a data type. (Note: despite the name,
2593 /// the preferred alignment is ABI-impacting, and not an optimization.)
2594 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2595  TypeInfo TI = getTypeInfo(T);
2596  unsigned ABIAlign = TI.Align;
2597 
2598  T = T->getBaseElementTypeUnsafe();
2599 
2600  // The preferred alignment of member pointers is that of a pointer.
2601  if (T->isMemberPointerType())
2602  return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
2603 
2604  if (!Target->allowsLargerPreferedTypeAlignment())
2605  return ABIAlign;
2606 
2607  if (const auto *RT = T->getAs<RecordType>()) {
2608  const RecordDecl *RD = RT->getDecl();
2609 
2610  // When used as part of a typedef, or together with a 'packed' attribute,
2611  // the 'aligned' attribute can be used to decrease alignment. Note that the
2612  // 'packed' case is already taken into consideration when computing the
2613  // alignment, we only need to handle the typedef case here.
2615  RD->isInvalidDecl())
2616  return ABIAlign;
2617 
2618  unsigned PreferredAlign = static_cast<unsigned>(
2619  toBits(getASTRecordLayout(RD).PreferredAlignment));
2620  assert(PreferredAlign >= ABIAlign &&
2621  "PreferredAlign should be at least as large as ABIAlign.");
2622  return PreferredAlign;
2623  }
2624 
2625  // Double (and, for targets supporting AIX `power` alignment, long double) and
2626  // long long should be naturally aligned (despite requiring less alignment) if
2627  // possible.
2628  if (const auto *CT = T->getAs<ComplexType>())
2629  T = CT->getElementType().getTypePtr();
2630  if (const auto *ET = T->getAs<EnumType>())
2631  T = ET->getDecl()->getIntegerType().getTypePtr();
2632  if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2633  T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2634  T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2635  (T->isSpecificBuiltinType(BuiltinType::LongDouble) &&
2636  Target->defaultsToAIXPowerAlignment()))
2637  // Don't increase the alignment if an alignment attribute was specified on a
2638  // typedef declaration.
2639  if (!TI.isAlignRequired())
2640  return std::max(ABIAlign, (unsigned)getTypeSize(T));
2641 
2642  return ABIAlign;
2643 }
2644 
2645 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2646 /// for __attribute__((aligned)) on this target, to be used if no alignment
2647 /// value is specified.
2650 }
2651 
2652 /// getAlignOfGlobalVar - Return the alignment in bits that should be given
2653 /// to a global variable of the specified type.
2655  uint64_t TypeSize = getTypeSize(T.getTypePtr());
2656  return std::max(getPreferredTypeAlign(T),
2657  getTargetInfo().getMinGlobalAlign(TypeSize));
2658 }
2659 
2660 /// getAlignOfGlobalVarInChars - Return the alignment in characters that
2661 /// should be given to a global variable of the specified type.
2664 }
2665 
2668  const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2669  while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2670  Offset += Layout->getBaseClassOffset(Base);
2671  Layout = &getASTRecordLayout(Base);
2672  }
2673  return Offset;
2674 }
2675 
2677  const ValueDecl *MPD = MP.getMemberPointerDecl();
2680  bool DerivedMember = MP.isMemberPointerToDerivedMember();
2681  const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
2682  for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2683  const CXXRecordDecl *Base = RD;
2684  const CXXRecordDecl *Derived = Path[I];
2685  if (DerivedMember)
2686  std::swap(Base, Derived);
2688  RD = Path[I];
2689  }
2690  if (DerivedMember)
2692  return ThisAdjustment;
2693 }
2694 
2695 /// DeepCollectObjCIvars -
2696 /// This routine first collects all declared, but not synthesized, ivars in
2697 /// super class and then collects all ivars, including those synthesized for
2698 /// current class. This routine is used for implementation of current class
2699 /// when all ivars, declared and synthesized are known.
2701  bool leafClass,
2702  SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2703  if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2704  DeepCollectObjCIvars(SuperClass, false, Ivars);
2705  if (!leafClass) {
2706  llvm::append_range(Ivars, OI->ivars());
2707  } else {
2708  auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2709  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2710  Iv= Iv->getNextIvar())
2711  Ivars.push_back(Iv);
2712  }
2713 }
2714 
2715 /// CollectInheritedProtocols - Collect all protocols in current class and
2716 /// those inherited by it.
2719  if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2720  // We can use protocol_iterator here instead of
2721  // all_referenced_protocol_iterator since we are walking all categories.
2722  for (auto *Proto : OI->all_referenced_protocols()) {
2723  CollectInheritedProtocols(Proto, Protocols);
2724  }
2725 
2726  // Categories of this Interface.
2727  for (const auto *Cat : OI->visible_categories())
2728  CollectInheritedProtocols(Cat, Protocols);
2729 
2730  if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2731  while (SD) {
2732  CollectInheritedProtocols(SD, Protocols);
2733  SD = SD->getSuperClass();
2734  }
2735  } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2736  for (auto *Proto : OC->protocols()) {
2737  CollectInheritedProtocols(Proto, Protocols);
2738  }
2739  } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2740  // Insert the protocol.
2741  if (!Protocols.insert(
2742  const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2743  return;
2744 
2745  for (auto *Proto : OP->protocols())
2746  CollectInheritedProtocols(Proto, Protocols);
2747  }
2748 }
2749 
2751  const RecordDecl *RD) {
2752  assert(RD->isUnion() && "Must be union type");
2753  CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2754 
2755  for (const auto *Field : RD->fields()) {
2756  if (!Context.hasUniqueObjectRepresentations(Field->getType()))
2757  return false;
2758  CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2759  if (FieldSize != UnionSize)
2760  return false;
2761  }
2762  return !RD->field_empty();
2763 }
2764 
2766  const ASTContext &Context,
2767  const clang::ASTRecordLayout & /*Layout*/) {
2768  return Context.getFieldOffset(Field);
2769 }
2770 
2772  const ASTContext &Context,
2773  const clang::ASTRecordLayout &Layout) {
2774  return Context.toBits(Layout.getBaseClassOffset(RD));
2775 }
2776 
2777 static std::optional<int64_t>
2779  const RecordDecl *RD);
2780 
2781 static std::optional<int64_t>
2782 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) {
2783  if (Field->getType()->isRecordType()) {
2784  const RecordDecl *RD = Field->getType()->getAsRecordDecl();
2785  if (!RD->isUnion())
2786  return structHasUniqueObjectRepresentations(Context, RD);
2787  }
2788 
2789  // A _BitInt type may not be unique if it has padding bits
2790  // but if it is a bitfield the padding bits are not used.
2791  bool IsBitIntType = Field->getType()->isBitIntType();
2792  if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2793  !Context.hasUniqueObjectRepresentations(Field->getType()))
2794  return std::nullopt;
2795 
2796  int64_t FieldSizeInBits =
2797  Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2798  if (Field->isBitField()) {
2799  int64_t BitfieldSize = Field->getBitWidthValue(Context);
2800  if (IsBitIntType) {
2801  if ((unsigned)BitfieldSize >
2802  cast<BitIntType>(Field->getType())->getNumBits())
2803  return std::nullopt;
2804  } else if (BitfieldSize > FieldSizeInBits) {
2805  return std::nullopt;
2806  }
2807  FieldSizeInBits = BitfieldSize;
2808  } else if (IsBitIntType &&
2809  !Context.hasUniqueObjectRepresentations(Field->getType())) {
2810  return std::nullopt;
2811  }
2812  return FieldSizeInBits;
2813 }
2814 
2815 static std::optional<int64_t>
2817  return structHasUniqueObjectRepresentations(Context, RD);
2818 }
2819 
2820 template <typename RangeT>
2822  const RangeT &Subobjects, int64_t CurOffsetInBits,
2823  const ASTContext &Context, const clang::ASTRecordLayout &Layout) {
2824  for (const auto *Subobject : Subobjects) {
2825  std::optional<int64_t> SizeInBits =
2826  getSubobjectSizeInBits(Subobject, Context);
2827  if (!SizeInBits)
2828  return std::nullopt;
2829  if (*SizeInBits != 0) {
2830  int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2831  if (Offset != CurOffsetInBits)
2832  return std::nullopt;
2833  CurOffsetInBits += *SizeInBits;
2834  }
2835  }
2836  return CurOffsetInBits;
2837 }
2838 
2839 static std::optional<int64_t>
2841  const RecordDecl *RD) {
2842  assert(!RD->isUnion() && "Must be struct/class type");
2843  const auto &Layout = Context.getASTRecordLayout(RD);
2844 
2845  int64_t CurOffsetInBits = 0;
2846  if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2847  if (ClassDecl->isDynamicClass())
2848  return std::nullopt;
2849 
2851  for (const auto &Base : ClassDecl->bases()) {
2852  // Empty types can be inherited from, and non-empty types can potentially
2853  // have tail padding, so just make sure there isn't an error.
2854  Bases.emplace_back(Base.getType()->getAsCXXRecordDecl());
2855  }
2856 
2857  llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2858  return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
2859  });
2860 
2861  std::optional<int64_t> OffsetAfterBases =
2862  structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits,
2863  Context, Layout);
2864  if (!OffsetAfterBases)
2865  return std::nullopt;
2866  CurOffsetInBits = *OffsetAfterBases;
2867  }
2868 
2869  std::optional<int64_t> OffsetAfterFields =
2871  RD->fields(), CurOffsetInBits, Context, Layout);
2872  if (!OffsetAfterFields)
2873  return std::nullopt;
2874  CurOffsetInBits = *OffsetAfterFields;
2875 
2876  return CurOffsetInBits;
2877 }
2878 
2880  // C++17 [meta.unary.prop]:
2881  // The predicate condition for a template specialization
2882  // has_unique_object_representations<T> shall be
2883  // satisfied if and only if:
2884  // (9.1) - T is trivially copyable, and
2885  // (9.2) - any two objects of type T with the same value have the same
2886  // object representation, where two objects
2887  // of array or non-union class type are considered to have the same value
2888  // if their respective sequences of
2889  // direct subobjects have the same values, and two objects of union type
2890  // are considered to have the same
2891  // value if they have the same active member and the corresponding members
2892  // have the same value.
2893  // The set of scalar types for which this condition holds is
2894  // implementation-defined. [ Note: If a type has padding
2895  // bits, the condition does not hold; otherwise, the condition holds true
2896  // for unsigned integral types. -- end note ]
2897  assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2898 
2899  // Arrays are unique only if their element type is unique.
2900  if (Ty->isArrayType())
2902 
2903  // (9.1) - T is trivially copyable...
2904  if (!Ty.isTriviallyCopyableType(*this))
2905  return false;
2906 
2907  // All integrals and enums are unique.
2908  if (Ty->isIntegralOrEnumerationType()) {
2909  // Except _BitInt types that have padding bits.
2910  if (const auto *BIT = dyn_cast<BitIntType>(Ty))
2911  return getTypeSize(BIT) == BIT->getNumBits();
2912 
2913  return true;
2914  }
2915 
2916  // All other pointers are unique.
2917  if (Ty->isPointerType())
2918  return true;
2919 
2920  if (Ty->isMemberPointerType()) {
2921  const auto *MPT = Ty->getAs<MemberPointerType>();
2922  return !ABI->getMemberPointerInfo(MPT).HasPadding;
2923  }
2924 
2925  if (Ty->isRecordType()) {
2926  const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
2927 
2928  if (Record->isInvalidDecl())
2929  return false;
2930 
2931  if (Record->isUnion())
2932  return unionHasUniqueObjectRepresentations(*this, Record);
2933 
2934  std::optional<int64_t> StructSize =
2935  structHasUniqueObjectRepresentations(*this, Record);
2936 
2937  return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty));
2938  }
2939 
2940  // FIXME: More cases to handle here (list by rsmith):
2941  // vectors (careful about, eg, vector of 3 foo)
2942  // _Complex int and friends
2943  // _Atomic T
2944  // Obj-C block pointers
2945  // Obj-C object pointers
2946  // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2947  // clk_event_t, queue_t, reserve_id_t)
2948  // There're also Obj-C class types and the Obj-C selector type, but I think it
2949  // makes sense for those to return false here.
2950 
2951  return false;
2952 }
2953 
2955  unsigned count = 0;
2956  // Count ivars declared in class extension.
2957  for (const auto *Ext : OI->known_extensions())
2958  count += Ext->ivar_size();
2959 
2960  // Count ivar defined in this class's implementation. This
2961  // includes synthesized ivars.
2962  if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2963  count += ImplDecl->ivar_size();
2964 
2965  return count;
2966 }
2967 
2969  if (!E)
2970  return false;
2971 
2972  // nullptr_t is always treated as null.
2973  if (E->getType()->isNullPtrType()) return true;
2974 
2975  if (E->getType()->isAnyPointerType() &&
2978  return true;
2979 
2980  // Unfortunately, __null has type 'int'.
2981  if (isa<GNUNullExpr>(E)) return true;
2982 
2983  return false;
2984 }
2985 
2986 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2987 /// exists.
2989  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2990  I = ObjCImpls.find(D);
2991  if (I != ObjCImpls.end())
2992  return cast<ObjCImplementationDecl>(I->second);
2993  return nullptr;
2994 }
2995 
2996 /// Get the implementation of ObjCCategoryDecl, or nullptr if none
2997 /// exists.
2999  llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3000  I = ObjCImpls.find(D);
3001  if (I != ObjCImpls.end())
3002  return cast<ObjCCategoryImplDecl>(I->second);
3003  return nullptr;
3004 }
3005 
3006 /// Set the implementation of ObjCInterfaceDecl.
3008  ObjCImplementationDecl *ImplD) {
3009  assert(IFaceD && ImplD && "Passed null params");
3010  ObjCImpls[IFaceD] = ImplD;
3011 }
3012 
3013 /// Set the implementation of ObjCCategoryDecl.
3015  ObjCCategoryImplDecl *ImplD) {
3016  assert(CatD && ImplD && "Passed null params");
3017  ObjCImpls[CatD] = ImplD;
3018 }
3019 
3020 const ObjCMethodDecl *
3022  return ObjCMethodRedecls.lookup(MD);
3023 }
3024 
3026  const ObjCMethodDecl *Redecl) {
3027  assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
3028  ObjCMethodRedecls[MD] = Redecl;
3029 }
3030 
3032  const NamedDecl *ND) const {
3033  if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
3034  return ID;
3035  if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
3036  return CD->getClassInterface();
3037  if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
3038  return IMD->getClassInterface();
3039 
3040  return nullptr;
3041 }
3042 
3043 /// Get the copy initialization expression of VarDecl, or nullptr if
3044 /// none exists.
3046  assert(VD && "Passed null params");
3047  assert(VD->hasAttr<BlocksAttr>() &&
3048  "getBlockVarCopyInits - not __block var");
3049  auto I = BlockVarCopyInits.find(VD);
3050  if (I != BlockVarCopyInits.end())
3051  return I->second;
3052  return {nullptr, false};
3053 }
3054 
3055 /// Set the copy initialization expression of a block var decl.
3057  bool CanThrow) {
3058  assert(VD && CopyExpr && "Passed null params");
3059  assert(VD->hasAttr<BlocksAttr>() &&
3060  "setBlockVarCopyInits - not __block var");
3061  BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3062 }
3063 
3065  unsigned DataSize) const {
3066  if (!DataSize)
3067  DataSize = TypeLoc::getFullDataSizeForType(T);
3068  else
3069  assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3070  "incorrect data size provided to CreateTypeSourceInfo!");
3071 
3072  auto *TInfo =
3073  (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
3074  new (TInfo) TypeSourceInfo(T);
3075  return TInfo;
3076 }
3077 
3079  SourceLocation L) const {
3081  DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
3082  return DI;
3083 }
3084 
3085 const ASTRecordLayout &
3087  return getObjCLayout(D, nullptr);
3088 }
3089 
3090 const ASTRecordLayout &
3092  const ObjCImplementationDecl *D) const {
3093  return getObjCLayout(D->getClassInterface(), D);
3094 }
3095 
3098  bool &AnyNonCanonArgs) {
3099  SmallVector<TemplateArgument, 16> CanonArgs(Args);
3100  for (auto &Arg : CanonArgs) {
3101  TemplateArgument OrigArg = Arg;
3102  Arg = C.getCanonicalTemplateArgument(Arg);
3103  AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg);
3104  }
3105  return CanonArgs;
3106 }
3107 
3108 //===----------------------------------------------------------------------===//
3109 // Type creation/memoization methods
3110 //===----------------------------------------------------------------------===//
3111 
3112 QualType
3113 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3114  unsigned fastQuals = quals.getFastQualifiers();
3115  quals.removeFastQualifiers();
3116 
3117  // Check if we've already instantiated this type.
3118  llvm::FoldingSetNodeID ID;
3119  ExtQuals::Profile(ID, baseType, quals);
3120  void *insertPos = nullptr;
3121  if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
3122  assert(eq->getQualifiers() == quals);
3123  return QualType(eq, fastQuals);
3124  }
3125 
3126  // If the base type is not canonical, make the appropriate canonical type.
3127  QualType canon;
3128  if (!baseType->isCanonicalUnqualified()) {
3129  SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3130  canonSplit.Quals.addConsistentQualifiers(quals);
3131  canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
3132 
3133  // Re-find the insert position.
3134  (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
3135  }
3136 
3137  auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
3138  ExtQualNodes.InsertNode(eq, insertPos);
3139  return QualType(eq, fastQuals);
3140 }
3141 
3143  LangAS AddressSpace) const {
3144  QualType CanT = getCanonicalType(T);
3145  if (CanT.getAddressSpace() == AddressSpace)
3146  return T;
3147 
3148  // If we are composing extended qualifiers together, merge together
3149  // into one ExtQuals node.
3150  QualifierCollector Quals;
3151  const Type *TypeNode = Quals.strip(T);
3152 
3153  // If this type already has an address space specified, it cannot get
3154  // another one.
3155  assert(!Quals.hasAddressSpace() &&
3156  "Type cannot be in multiple addr spaces!");
3157  Quals.addAddressSpace(AddressSpace);
3158 
3159  return getExtQualType(TypeNode, Quals);
3160 }
3161 
3163  // If the type is not qualified with an address space, just return it
3164  // immediately.
3165  if (!T.hasAddressSpace())
3166  return T;
3167 
3168  // If we are composing extended qualifiers together, merge together
3169  // into one ExtQuals node.
3170  QualifierCollector Quals;
3171  const Type *TypeNode;
3172 
3173  while (T.hasAddressSpace()) {
3174  TypeNode = Quals.strip(T);
3175 
3176  // If the type no longer has an address space after stripping qualifiers,
3177  // jump out.
3178  if (!QualType(TypeNode, 0).hasAddressSpace())
3179  break;
3180 
3181  // There might be sugar in the way. Strip it and try again.
3182  T = T.getSingleStepDesugaredType(*this);
3183  }
3184 
3185  Quals.removeAddressSpace();
3186 
3187  // Removal of the address space can mean there are no longer any
3188  // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3189  // or required.
3190  if (Quals.hasNonFastQualifiers())
3191  return getExtQualType(TypeNode, Quals);
3192  else
3193  return QualType(TypeNode, Quals.getFastQualifiers());
3194 }
3195 
3197  Qualifiers::GC GCAttr) const {
3198  QualType CanT = getCanonicalType(T);
3199  if (CanT.getObjCGCAttr() == GCAttr)
3200  return T;
3201 
3202  if (const auto *ptr = T->getAs<PointerType>()) {
3203  QualType Pointee = ptr->getPointeeType();
3204  if (Pointee->isAnyPointerType()) {
3205  QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
3206  return getPointerType(ResultType);
3207  }
3208  }
3209 
3210  // If we are composing extended qualifiers together, merge together
3211  // into one ExtQuals node.
3212  QualifierCollector Quals;
3213  const Type *TypeNode = Quals.strip(T);
3214 
3215  // If this type already has an ObjCGC specified, it cannot get
3216  // another one.
3217  assert(!Quals.hasObjCGCAttr() &&
3218  "Type cannot have multiple ObjCGCs!");
3219  Quals.addObjCGCAttr(GCAttr);
3220 
3221  return getExtQualType(TypeNode, Quals);
3222 }
3223 
3225  if (const PointerType *Ptr = T->getAs<PointerType>()) {
3226  QualType Pointee = Ptr->getPointeeType();
3227  if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) {
3228  return getPointerType(removeAddrSpaceQualType(Pointee));
3229  }
3230  }
3231  return T;
3232 }
3233 
3235  FunctionType::ExtInfo Info) {
3236  if (T->getExtInfo() == Info)
3237  return T;
3238 
3239  QualType Result;
3240  if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
3241  Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
3242  } else {
3243  const auto *FPT = cast<FunctionProtoType>(T);
3244  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3245  EPI.ExtInfo = Info;
3246  Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
3247  }
3248 
3249  return cast<FunctionType>(Result.getTypePtr());
3250 }
3251 
3253  QualType ResultType) {
3254  FD = FD->getMostRecentDecl();
3255  while (true) {
3256  const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
3257  FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3258  FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
3259  if (FunctionDecl *Next = FD->getPreviousDecl())
3260  FD = Next;
3261  else
3262  break;
3263  }
3265  L->DeducedReturnType(FD, ResultType);
3266 }
3267 
3268 /// Get a function type and produce the equivalent function type with the
3269 /// specified exception specification. Type sugar that can be present on a
3270 /// declaration of a function with an exception specification is permitted
3271 /// and preserved. Other type sugar (for instance, typedefs) is not.
3273  QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3274  // Might have some parens.
3275  if (const auto *PT = dyn_cast<ParenType>(Orig))
3276  return getParenType(
3277  getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
3278 
3279  // Might be wrapped in a macro qualified type.
3280  if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig))
3281  return getMacroQualifiedType(
3282  getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI),
3283  MQT->getMacroIdentifier());
3284 
3285  // Might have a calling-convention attribute.
3286  if (const auto *AT = dyn_cast<AttributedType>(Orig))
3287  return getAttributedType(
3288  AT->getAttrKind(),
3289  getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
3290  getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
3291 
3292  // Anything else must be a function type. Rebuild it with the new exception
3293  // specification.
3294  const auto *Proto = Orig->castAs<FunctionProtoType>();
3295  return getFunctionType(
3296  Proto->getReturnType(), Proto->getParamTypes(),
3297  Proto->getExtProtoInfo().withExceptionSpec(ESI));
3298 }
3299 
3301  QualType U) const {
3302  return hasSameType(T, U) ||
3303  (getLangOpts().CPlusPlus17 &&
3306 }
3307 
3309  if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3310  QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3311  SmallVector<QualType, 16> Args(Proto->param_types().size());
3312  for (unsigned i = 0, n = Args.size(); i != n; ++i)
3313  Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]);
3314  return getFunctionType(RetTy, Args, Proto->getExtProtoInfo());
3315  }
3316 
3317  if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3318  QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3319  return getFunctionNoProtoType(RetTy, Proto->getExtInfo());
3320  }
3321 
3322  return T;
3323 }
3324 
3326  return hasSameType(T, U) ||
3329 }
3330 
3333  bool AsWritten) {
3334  // Update the type.
3335  QualType Updated =
3337  FD->setType(Updated);
3338 
3339  if (!AsWritten)
3340  return;
3341 
3342  // Update the type in the type source information too.
3343  if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3344  // If the type and the type-as-written differ, we may need to update
3345  // the type-as-written too.
3346  if (TSInfo->getType() != FD->getType())
3347  Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
3348 
3349  // FIXME: When we get proper type location information for exceptions,
3350  // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3351  // up the TypeSourceInfo;
3352  assert(TypeLoc::getFullDataSizeForType(Updated) ==
3353  TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3354  "TypeLoc size mismatch from updating exception specification");
3355  TSInfo->overrideType(Updated);
3356  }
3357 }
3358 
3359 /// getComplexType - Return the uniqued reference to the type for a complex
3360 /// number with the specified element type.
3362  // Unique pointers, to guarantee there is only one pointer of a particular
3363  // structure.
3364  llvm::FoldingSetNodeID ID;
3366 
3367  void *InsertPos = nullptr;
3368  if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3369  return QualType(CT, 0);
3370 
3371  // If the pointee type isn't canonical, this won't be a canonical type either,
3372  // so fill in the canonical type field.
3373  QualType Canonical;
3374  if (!T.isCanonical()) {
3375  Canonical = getComplexType(getCanonicalType(T));
3376 
3377  // Get the new insert position for the node we care about.
3378  ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3379  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3380  }
3381  auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
3382  Types.push_back(New);
3383  ComplexTypes.InsertNode(New, InsertPos);
3384  return QualType(New, 0);
3385 }
3386 
3387 /// getPointerType - Return the uniqued reference to the type for a pointer to
3388 /// the specified type.
3390  // Unique pointers, to guarantee there is only one pointer of a particular
3391  // structure.
3392  llvm::FoldingSetNodeID ID;
3394 
3395  void *InsertPos = nullptr;
3396  if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3397  return QualType(PT, 0);
3398 
3399  // If the pointee type isn't canonical, this won't be a canonical type either,
3400  // so fill in the canonical type field.
3401  QualType Canonical;
3402  if (!T.isCanonical()) {
3403  Canonical = getPointerType(getCanonicalType(T));
3404 
3405  // Get the new insert position for the node we care about.
3406  PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3407  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3408  }
3409  auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
3410  Types.push_back(New);
3411  PointerTypes.InsertNode(New, InsertPos);
3412  return QualType(New, 0);
3413 }
3414 
3416  llvm::FoldingSetNodeID ID;
3417  AdjustedType::Profile(ID, Orig, New);
3418  void *InsertPos = nullptr;
3419  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3420  if (AT)
3421  return QualType(AT, 0);
3422 
3423  QualType Canonical = getCanonicalType(New);
3424 
3425  // Get the new insert position for the node we care about.
3426  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3427  assert(!AT && "Shouldn't be in the map!");
3428 
3429  AT = new (*this, TypeAlignment)
3430  AdjustedType(Type::Adjusted, Orig, New, Canonical);
3431  Types.push_back(AT);
3432  AdjustedTypes.InsertNode(AT, InsertPos);
3433  return QualType(AT, 0);
3434 }
3435 
3437  llvm::FoldingSetNodeID ID;
3438  AdjustedType::Profile(ID, Orig, Decayed);
3439  void *InsertPos = nullptr;
3440  AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3441  if (AT)
3442  return QualType(AT, 0);
3443 
3444  QualType Canonical = getCanonicalType(Decayed);
3445 
3446  // Get the new insert position for the node we care about.
3447  AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3448  assert(!AT && "Shouldn't be in the map!");
3449 
3450  AT = new (*this, TypeAlignment) DecayedType(Orig, Decayed, Canonical);
3451  Types.push_back(AT);
3452  AdjustedTypes.InsertNode(AT, InsertPos);
3453  return QualType(AT, 0);
3454 }
3455 
3457  assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3458 
3459  QualType Decayed;
3460 
3461  // C99 6.7.5.3p7:
3462  // A declaration of a parameter as "array of type" shall be
3463  // adjusted to "qualified pointer to type", where the type
3464  // qualifiers (if any) are those specified within the [ and ] of
3465  // the array type derivation.
3466  if (T->isArrayType())
3467  Decayed = getArrayDecayedType(T);
3468 
3469  // C99 6.7.5.3p8:
3470  // A declaration of a parameter as "function returning type"
3471  // shall be adjusted to "pointer to function returning type", as
3472  // in 6.3.2.1.
3473  if (T->isFunctionType())
3474  Decayed = getPointerType(T);
3475 
3476  return getDecayedType(T, Decayed);
3477 }
3478 
3479 /// getBlockPointerType - Return the uniqued reference to the type for
3480 /// a pointer to the specified block.
3482  assert(T->isFunctionType() && "block of function types only");
3483  // Unique pointers, to guarantee there is only one block of a particular
3484  // structure.
3485  llvm::FoldingSetNodeID ID;
3487 
3488  void *InsertPos = nullptr;
3489  if (BlockPointerType *PT =
3490  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3491  return QualType(PT, 0);
3492 
3493  // If the block pointee type isn't canonical, this won't be a canonical
3494  // type either so fill in the canonical type field.
3495  QualType Canonical;
3496  if (!T.isCanonical()) {
3497  Canonical = getBlockPointerType(getCanonicalType(T));
3498 
3499  // Get the new insert position for the node we care about.
3500  BlockPointerType *NewIP =
3501  BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3502  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3503  }
3504  auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
3505  Types.push_back(New);
3506  BlockPointerTypes.InsertNode(New, InsertPos);
3507  return QualType(New, 0);
3508 }
3509 
3510 /// getLValueReferenceType - Return the uniqued reference to the type for an
3511 /// lvalue reference to the specified type.
3512 QualType
3513 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3514  assert((!T->isPlaceholderType() ||
3515  T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3516  "Unresolved placeholder type");
3517 
3518  // Unique pointers, to guarantee there is only one pointer of a particular
3519  // structure.
3520  llvm::FoldingSetNodeID ID;
3521  ReferenceType::Profile(ID, T, SpelledAsLValue);
3522 
3523  void *InsertPos = nullptr;
3524  if (LValueReferenceType *RT =
3525  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3526  return QualType(RT, 0);
3527 
3528  const auto *InnerRef = T->getAs<ReferenceType>();
3529 
3530  // If the referencee type isn't canonical, this won't be a canonical type
3531  // either, so fill in the canonical type field.
3532  QualType Canonical;
3533  if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
3534  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3535  Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
3536 
3537  // Get the new insert position for the node we care about.
3538  LValueReferenceType *NewIP =
3539  LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3540  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3541  }
3542 
3543  auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
3544  SpelledAsLValue);
3545  Types.push_back(New);
3546  LValueReferenceTypes.InsertNode(New, InsertPos);
3547 
3548  return QualType(New, 0);
3549 }
3550 
3551 /// getRValueReferenceType - Return the uniqued reference to the type for an
3552 /// rvalue reference to the specified type.
3554  assert((!T->isPlaceholderType() ||
3555  T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3556  "Unresolved placeholder type");
3557 
3558  // Unique pointers, to guarantee there is only one pointer of a particular
3559  // structure.
3560  llvm::FoldingSetNodeID ID;
3561  ReferenceType::Profile(ID, T, false);
3562 
3563  void *InsertPos = nullptr;
3564  if (RValueReferenceType *RT =
3565  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3566  return QualType(RT, 0);
3567 
3568  const auto *InnerRef = T->getAs<ReferenceType>();
3569 
3570  // If the referencee type isn't canonical, this won't be a canonical type
3571  // either, so fill in the canonical type field.
3572  QualType Canonical;
3573  if (InnerRef || !T.isCanonical()) {
3574  QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3575  Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3576 
3577  // Get the new insert position for the node we care about.
3578  RValueReferenceType *NewIP =
3579  RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3580  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3581  }
3582 
3583  auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
3584  Types.push_back(New);
3585  RValueReferenceTypes.InsertNode(New, InsertPos);
3586  return QualType(New, 0);
3587 }
3588 
3589 /// getMemberPointerType - Return the uniqued reference to the type for a
3590 /// member pointer to the specified type, in the specified class.
3592  // Unique pointers, to guarantee there is only one pointer of a particular
3593  // structure.
3594  llvm::FoldingSetNodeID ID;
3595  MemberPointerType::Profile(ID, T, Cls);
3596 
3597  void *InsertPos = nullptr;
3598  if (MemberPointerType *PT =
3599  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3600  return QualType(PT, 0);
3601 
3602  // If the pointee or class type isn't canonical, this won't be a canonical
3603  // type either, so fill in the canonical type field.
3604  QualType Canonical;
3605  if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3607 
3608  // Get the new insert position for the node we care about.
3609  MemberPointerType *NewIP =
3610  MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3611  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3612  }
3613  auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
3614  Types.push_back(New);
3615  MemberPointerTypes.InsertNode(New, InsertPos);
3616  return QualType(New, 0);
3617 }
3618 
3619 /// getConstantArrayType - Return the unique reference to the type for an
3620 /// array of the specified element type.
3622  const llvm::APInt &ArySizeIn,
3623  const Expr *SizeExpr,
3625  unsigned IndexTypeQuals) const {
3626  assert((EltTy->isDependentType() ||
3627  EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3628  "Constant array of VLAs is illegal!");
3629 
3630  // We only need the size as part of the type if it's instantiation-dependent.
3631  if (SizeExpr && !SizeExpr->isInstantiationDependent())
3632  SizeExpr = nullptr;
3633 
3634  // Convert the array size into a canonical width matching the pointer size for
3635  // the target.
3636  llvm::APInt ArySize(ArySizeIn);
3637  ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3638 
3639  llvm::FoldingSetNodeID ID;
3640  ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM,
3641  IndexTypeQuals);
3642 
3643  void *InsertPos = nullptr;
3644  if (ConstantArrayType *ATP =
3645  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3646  return QualType(ATP, 0);
3647 
3648  // If the element type isn't canonical or has qualifiers, or the array bound
3649  // is instantiation-dependent, this won't be a canonical type either, so fill
3650  // in the canonical type field.
3651  QualType Canon;
3652  // FIXME: Check below should look for qualifiers behind sugar.
3653  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
3654  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3655  Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr,
3656  ASM, IndexTypeQuals);
3657  Canon = getQualifiedType(Canon, canonSplit.Quals);
3658 
3659  // Get the new insert position for the node we care about.
3660  ConstantArrayType *NewIP =
3661  ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3662  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3663  }
3664 
3665  void *Mem = Allocate(
3666  ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
3667  TypeAlignment);
3668  auto *New = new (Mem)
3669  ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
3670  ConstantArrayTypes.InsertNode(New, InsertPos);
3671  Types.push_back(New);
3672  return QualType(New, 0);
3673 }
3674 
3675 /// getVariableArrayDecayedType - Turns the given type, which may be
3676 /// variably-modified, into the corresponding type with all the known
3677 /// sizes replaced with [*].
3679  // Vastly most common case.
3680  if (!type->isVariablyModifiedType()) return type;
3681 
3682  QualType result;
3683 
3684  SplitQualType split = type.getSplitDesugaredType();
3685  const Type *ty = split.Ty;
3686  switch (ty->getTypeClass()) {
3687 #define TYPE(Class, Base)
3688 #define ABSTRACT_TYPE(Class, Base)
3689 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3690 #include "clang/AST/TypeNodes.inc"
3691  llvm_unreachable("didn't desugar past all non-canonical types?");
3692 
3693  // These types should never be variably-modified.
3694  case Type::Builtin:
3695  case Type::Complex:
3696  case Type::Vector:
3697  case Type::DependentVector:
3698  case Type::ExtVector:
3699  case Type::DependentSizedExtVector:
3700  case Type::ConstantMatrix:
3701  case Type::DependentSizedMatrix:
3702  case Type::DependentAddressSpace:
3703  case Type::ObjCObject:
3704  case Type::ObjCInterface:
3705  case Type::ObjCObjectPointer:
3706  case Type::Record:
3707  case Type::Enum:
3708  case Type::UnresolvedUsing:
3709  case Type::TypeOfExpr:
3710  case Type::TypeOf:
3711  case Type::Decltype:
3712  case Type::UnaryTransform:
3713  case Type::DependentName:
3714  case Type::InjectedClassName:
3715  case Type::TemplateSpecialization:
3716  case Type::DependentTemplateSpecialization:
3717  case Type::TemplateTypeParm:
3718  case Type::SubstTemplateTypeParmPack:
3719  case Type::Auto:
3720  case Type::DeducedTemplateSpecialization:
3721  case Type::PackExpansion:
3722  case Type::BitInt:
3723  case Type::DependentBitInt:
3724  llvm_unreachable("type should never be variably-modified");
3725 
3726  // These types can be variably-modified but should never need to
3727  // further decay.
3728  case Type::FunctionNoProto:
3729  case Type::FunctionProto:
3730  case Type::BlockPointer:
3731  case Type::MemberPointer:
3732  case Type::Pipe:
3733  return type;
3734 
3735  // These types can be variably-modified. All these modifications
3736  // preserve structure except as noted by comments.
3737  // TODO: if we ever care about optimizing VLAs, there are no-op
3738  // optimizations available here.
3739  case Type::Pointer:
3741  cast<PointerType>(ty)->getPointeeType()));
3742  break;
3743 
3744  case Type::LValueReference: {
3745  const auto *lv = cast<LValueReferenceType>(ty);
3746  result = getLValueReferenceType(
3747  getVariableArrayDecayedType(lv->getPointeeType()),
3748  lv->isSpelledAsLValue());
3749  break;
3750  }
3751 
3752  case Type::RValueReference: {
3753  const auto *lv = cast<RValueReferenceType>(ty);
3754  result = getRValueReferenceType(
3755  getVariableArrayDecayedType(lv->getPointeeType()));
3756  break;
3757  }
3758 
3759  case Type::Atomic: {
3760  const auto *at = cast<AtomicType>(ty);
3761  result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3762  break;
3763  }
3764 
3765  case Type::ConstantArray: {
3766  const auto *cat = cast<ConstantArrayType>(ty);
3767  result = getConstantArrayType(
3768  getVariableArrayDecayedType(cat->getElementType()),
3769  cat->getSize(),
3770  cat->getSizeExpr(),
3771  cat->getSizeModifier(),
3772  cat->getIndexTypeCVRQualifiers());
3773  break;
3774  }
3775 
3776  case Type::DependentSizedArray: {
3777  const auto *dat = cast<DependentSizedArrayType>(ty);
3778  result = getDependentSizedArrayType(
3779  getVariableArrayDecayedType(dat->getElementType()),
3780  dat->getSizeExpr(),
3781  dat->getSizeModifier(),
3782  dat->getIndexTypeCVRQualifiers(),
3783  dat->getBracketsRange());
3784  break;
3785  }
3786 
3787  // Turn incomplete types into [*] types.
3788  case Type::IncompleteArray: {
3789  const auto *iat = cast<IncompleteArrayType>(ty);
3790  result = getVariableArrayType(
3791  getVariableArrayDecayedType(iat->getElementType()),
3792  /*size*/ nullptr,
3794  iat->getIndexTypeCVRQualifiers(),
3795  SourceRange());
3796  break;
3797  }
3798 
3799  // Turn VLA types into [*] types.
3800  case Type::VariableArray: {
3801  const auto *vat = cast<VariableArrayType>(ty);
3802  result = getVariableArrayType(
3803  getVariableArrayDecayedType(vat->getElementType()),
3804  /*size*/ nullptr,
3806  vat->getIndexTypeCVRQualifiers(),
3807  vat->getBracketsRange());
3808  break;
3809  }
3810  }
3811 
3812  // Apply the top-level qualifiers from the original.
3813  return getQualifiedType(result, split.Quals);
3814 }
3815 
3816 /// getVariableArrayType - Returns a non-unique reference to the type for a
3817 /// variable array of the specified element type.
3819  Expr *NumElts,
3821  unsigned IndexTypeQuals,
3822  SourceRange Brackets) const {
3823  // Since we don't unique expressions, it isn't possible to unique VLA's
3824  // that have an expression provided for their size.
3825  QualType Canon;
3826 
3827  // Be sure to pull qualifiers off the element type.
3828  // FIXME: Check below should look for qualifiers behind sugar.
3829  if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3830  SplitQualType canonSplit = getCanonicalType(EltTy).split();
3831  Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3832  IndexTypeQuals, Brackets);
3833  Canon = getQualifiedType(Canon, canonSplit.Quals);
3834  }
3835 
3836  auto *New = new (*this, TypeAlignment)
3837  VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3838 
3839  VariableArrayTypes.push_back(New);
3840  Types.push_back(New);
3841  return QualType(New, 0);
3842 }
3843 
3844 /// getDependentSizedArrayType - Returns a non-unique reference to
3845 /// the type for a dependently-sized array of the specified element
3846 /// type.
3848  Expr *numElements,
3850  unsigned elementTypeQuals,
3851  SourceRange brackets) const {
3852  assert((!numElements || numElements->isTypeDependent() ||
3853  numElements->isValueDependent()) &&
3854  "Size must be type- or value-dependent!");
3855 
3856  // Dependently-sized array types that do not have a specified number
3857  // of elements will have their sizes deduced from a dependent
3858  // initializer. We do no canonicalization here at all, which is okay
3859  // because they can't be used in most locations.
3860  if (!numElements) {
3861  auto *newType
3862  = new (*this, TypeAlignment)
3863  DependentSizedArrayType(*this, elementType, QualType(),
3864  numElements, ASM, elementTypeQuals,
3865  brackets);
3866  Types.push_back(newType);
3867  return QualType(newType, 0);
3868  }
3869 
3870  // Otherwise, we actually build a new type every time, but we
3871  // also build a canonical type.
3872 
3873  SplitQualType canonElementType = getCanonicalType(elementType).split();
3874 
3875  void *insertPos = nullptr;
3876  llvm::FoldingSetNodeID ID;
3878  QualType(canonElementType.Ty, 0),
3879  ASM, elementTypeQuals, numElements);
3880 
3881  // Look for an existing type with these properties.
3882  DependentSizedArrayType *canonTy =
3883  DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3884 
3885  // If we don't have one, build one.
3886  if (!canonTy) {
3887  canonTy = new (*this, TypeAlignment)
3888  DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
3889  QualType(), numElements, ASM, elementTypeQuals,
3890  brackets);
3891  DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3892  Types.push_back(canonTy);
3893  }
3894 
3895  // Apply qualifiers from the element type to the array.
3896  QualType canon = getQualifiedType(QualType(canonTy,0),
3897  canonElementType.Quals);
3898 
3899  // If we didn't need extra canonicalization for the element type or the size
3900  // expression, then just use that as our result.
3901  if (QualType(canonElementType.Ty, 0) == elementType &&
3902  canonTy->getSizeExpr() == numElements)
3903  return canon;
3904 
3905  // Otherwise, we need to build a type which follows the spelling
3906  // of the element type.
3907  auto *sugaredType
3908  = new (*this, TypeAlignment)
3909  DependentSizedArrayType(*this, elementType, canon, numElements,
3910  ASM, elementTypeQuals, brackets);
3911  Types.push_back(sugaredType);
3912  return QualType(sugaredType, 0);
3913 }
3914 
3917  unsigned elementTypeQuals) const {
3918  llvm::FoldingSetNodeID ID;
3919  IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3920 
3921  void *insertPos = nullptr;
3922  if (IncompleteArrayType *iat =
3923  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3924  return QualType(iat, 0);
3925 
3926  // If the element type isn't canonical, this won't be a canonical type
3927  // either, so fill in the canonical type field. We also have to pull
3928  // qualifiers off the element type.
3929  QualType canon;
3930 
3931  // FIXME: Check below should look for qualifiers behind sugar.
3932  if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3933  SplitQualType canonSplit = getCanonicalType(elementType).split();
3934  canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3935  ASM, elementTypeQuals);
3936  canon = getQualifiedType(canon, canonSplit.Quals);
3937 
3938  // Get the new insert position for the node we care about.
3939  IncompleteArrayType *existing =
3940  IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3941  assert(!existing && "Shouldn't be in the map!"); (void) existing;
3942  }
3943 
3944  auto *newType = new (*this, TypeAlignment)
3945  IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3946 
3947  IncompleteArrayTypes.InsertNode(newType, insertPos);
3948  Types.push_back(newType);
3949  return QualType(newType, 0);
3950 }
3951 
3954 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
3955  {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
3956  NUMVECTORS};
3957 
3958 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
3959  {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
3960 
3961  switch (Ty->getKind()) {
3962  default:
3963  llvm_unreachable("Unsupported builtin vector type");
3964  case BuiltinType::SveInt8:
3965  return SVE_INT_ELTTY(8, 16, true, 1);
3966  case BuiltinType::SveUint8:
3967  return SVE_INT_ELTTY(8, 16, false, 1);
3968  case BuiltinType::SveInt8x2:
3969  return SVE_INT_ELTTY(8, 16, true, 2);
3970  case BuiltinType::SveUint8x2:
3971  return SVE_INT_ELTTY(8, 16, false, 2);
3972  case BuiltinType::SveInt8x3:
3973  return SVE_INT_ELTTY(8, 16, true, 3);
3974  case BuiltinType::SveUint8x3:
3975  return SVE_INT_ELTTY(8, 16, false, 3);
3976  case BuiltinType::SveInt8x4:
3977  return SVE_INT_ELTTY(8, 16, true, 4);
3978  case BuiltinType::SveUint8x4:
3979  return SVE_INT_ELTTY(8, 16, false, 4);
3980  case BuiltinType::SveInt16:
3981  return SVE_INT_ELTTY(16, 8, true, 1);
3982  case BuiltinType::SveUint16:
3983  return SVE_INT_ELTTY(16, 8, false, 1);
3984  case BuiltinType::SveInt16x2:
3985  return SVE_INT_ELTTY(16, 8, true, 2);
3986  case BuiltinType::SveUint16x2:
3987  return SVE_INT_ELTTY(16, 8, false, 2);
3988  case BuiltinType::SveInt16x3:
3989  return SVE_INT_ELTTY(16, 8, true, 3);
3990  case BuiltinType::SveUint16x3:
3991  return SVE_INT_ELTTY(16, 8, false, 3);
3992  case BuiltinType::SveInt16x4:
3993  return SVE_INT_ELTTY(16, 8, true, 4);
3994  case BuiltinType::SveUint16x4:
3995  return SVE_INT_ELTTY(16, 8, false, 4);
3996  case BuiltinType::SveInt32:
3997  return SVE_INT_ELTTY(32, 4, true, 1);
3998  case BuiltinType::SveUint32:
3999  return SVE_INT_ELTTY(32, 4, false, 1);
4000  case BuiltinType::SveInt32x2:
4001  return SVE_INT_ELTTY(32, 4, true, 2);
4002  case BuiltinType::SveUint32x2:
4003  return SVE_INT_ELTTY(32, 4, false, 2);
4004  case BuiltinType::SveInt32x3:
4005  return SVE_INT_ELTTY(32, 4, true, 3);
4006  case BuiltinType::SveUint32x3:
4007  return SVE_INT_ELTTY(32, 4, false, 3);
4008  case BuiltinType::SveInt32x4:
4009  return SVE_INT_ELTTY(32, 4, true, 4);
4010  case BuiltinType::SveUint32x4:
4011  return SVE_INT_ELTTY(32, 4, false, 4);
4012  case BuiltinType::SveInt64:
4013  return SVE_INT_ELTTY(64, 2, true, 1);
4014  case BuiltinType::SveUint64:
4015  return SVE_INT_ELTTY(64, 2, false, 1);
4016  case BuiltinType::SveInt64x2:
4017  return SVE_INT_ELTTY(64, 2, true, 2);
4018  case BuiltinType::SveUint64x2:
4019  return SVE_INT_ELTTY(64, 2, false, 2);
4020  case BuiltinType::SveInt64x3:
4021  return SVE_INT_ELTTY(64, 2, true, 3);
4022  case BuiltinType::SveUint64x3:
4023  return SVE_INT_ELTTY(64, 2, false, 3);
4024  case BuiltinType::SveInt64x4:
4025  return SVE_INT_ELTTY(64, 2, true, 4);
4026  case BuiltinType::SveUint64x4:
4027  return SVE_INT_ELTTY(64, 2, false, 4);
4028  case BuiltinType::SveBool:
4029  return SVE_ELTTY(BoolTy, 16, 1);
4030  case BuiltinType::SveFloat16:
4031  return SVE_ELTTY(HalfTy, 8, 1);
4032  case BuiltinType::SveFloat16x2:
4033  return SVE_ELTTY(HalfTy, 8, 2);
4034  case BuiltinType::SveFloat16x3:
4035  return SVE_ELTTY(HalfTy, 8, 3);
4036  case BuiltinType::SveFloat16x4:
4037  return SVE_ELTTY(HalfTy, 8, 4);
4038  case BuiltinType::SveFloat32:
4039  return SVE_ELTTY(FloatTy, 4, 1);
4040  case BuiltinType::SveFloat32x2:
4041  return SVE_ELTTY(FloatTy, 4, 2);
4042  case BuiltinType::SveFloat32x3:
4043  return SVE_ELTTY(FloatTy, 4, 3);
4044  case BuiltinType::SveFloat32x4:
4045  return SVE_ELTTY(FloatTy, 4, 4);
4046  case BuiltinType::SveFloat64:
4047  return SVE_ELTTY(DoubleTy, 2, 1);
4048  case BuiltinType::SveFloat64x2:
4049  return SVE_ELTTY(DoubleTy, 2, 2);
4050  case BuiltinType::SveFloat64x3:
4051  return SVE_ELTTY(DoubleTy, 2, 3);
4052  case BuiltinType::SveFloat64x4:
4053  return SVE_ELTTY(DoubleTy, 2, 4);
4054  case BuiltinType::SveBFloat16:
4055  return SVE_ELTTY(BFloat16Ty, 8, 1);
4056  case BuiltinType::SveBFloat16x2:
4057  return SVE_ELTTY(BFloat16Ty, 8, 2);
4058  case BuiltinType::SveBFloat16x3:
4059  return SVE_ELTTY(BFloat16Ty, 8, 3);
4060  case BuiltinType::SveBFloat16x4:
4061  return SVE_ELTTY(BFloat16Ty, 8, 4);
4062 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4063  IsSigned) \
4064  case BuiltinType::Id: \
4065  return {getIntTypeForBitwidth(ElBits, IsSigned), \
4066  llvm::ElementCount::getScalable(NumEls), NF};
4067 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4068  case BuiltinType::Id: \
4069  return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4070  llvm::ElementCount::getScalable(NumEls), NF};
4071 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4072  case BuiltinType::Id: \
4073  return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4074 #include "clang/Basic/RISCVVTypes.def"
4075  }
4076 }
4077 
4078 /// getScalableVectorType - Return the unique reference to a scalable vector
4079 /// type of the specified element type and size. VectorType must be a built-in
4080 /// type.
4082  unsigned NumElts) const {
4083  if (Target->hasAArch64SVETypes()) {
4084  uint64_t EltTySize = getTypeSize(EltTy);
4085 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
4086  IsSigned, IsFP, IsBF) \
4087  if (!EltTy->isBooleanType() && \
4088  ((EltTy->hasIntegerRepresentation() && \
4089  EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4090  (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4091  IsFP && !IsBF) || \
4092  (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4093  IsBF && !IsFP)) && \
4094  EltTySize == ElBits && NumElts == NumEls) { \
4095  return SingletonId; \
4096  }
4097 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
4098  if (EltTy->isBooleanType() && NumElts == NumEls) \
4099  return SingletonId;
4100 #include "clang/Basic/AArch64SVEACLETypes.def"
4101  } else if (Target->hasRISCVVTypes()) {
4102  uint64_t EltTySize = getTypeSize(EltTy);
4103 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4104  IsFP) \
4105  if (!EltTy->isBooleanType() && \
4106  ((EltTy->hasIntegerRepresentation() && \
4107  EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4108  (EltTy->hasFloatingRepresentation() && IsFP)) && \
4109  EltTySize == ElBits && NumElts == NumEls) \
4110  return SingletonId;
4111 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4112  if (EltTy->isBooleanType() && NumElts == NumEls) \
4113  return SingletonId;
4114 #include "clang/Basic/RISCVVTypes.def"
4115  }
4116  return QualType();
4117 }
4118 
4119 /// getVectorType - Return the unique reference to a vector type of
4120 /// the specified element type and size. VectorType must be a built-in type.
4121 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4122  VectorType::VectorKind VecKind) const {
4123  assert(vecType->isBuiltinType() ||
4124  (vecType->isBitIntType() &&
4125  // Only support _BitInt elements with byte-sized power of 2 NumBits.
4126  llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) &&
4127  vecType->getAs<BitIntType>()->getNumBits() >= 8));
4128 
4129  // Check if we've already instantiated a vector of this type.
4130  llvm::FoldingSetNodeID ID;
4131  VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
4132 
4133  void *InsertPos = nullptr;
4134  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4135  return QualType(VTP, 0);
4136 
4137  // If the element type isn't canonical, this won't be a canonical type either,
4138  // so fill in the canonical type field.
4139  QualType Canonical;
4140  if (!vecType.isCanonical()) {
4141  Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
4142 
4143  // Get the new insert position for the node we care about.
4144  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4145  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4146  }
4147  auto *New = new (*this, TypeAlignment)
4148  VectorType(vecType, NumElts, Canonical, VecKind);
4149  VectorTypes.InsertNode(New, InsertPos);
4150  Types.push_back(New);
4151  return QualType(New, 0);
4152 }
4153 
4154 QualType
4156  SourceLocation AttrLoc,
4157  VectorType::VectorKind VecKind) const {
4158  llvm::FoldingSetNodeID ID;
4159  DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
4160  VecKind);
4161  void *InsertPos = nullptr;
4162  DependentVectorType *Canon =
4163  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4164  DependentVectorType *New;
4165 
4166  if (Canon) {
4167  New = new (*this, TypeAlignment) DependentVectorType(
4168  *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4169  } else {
4170  QualType CanonVecTy = getCanonicalType(VecType);
4171  if (CanonVecTy == VecType) {
4172  New = new (*this, TypeAlignment) DependentVectorType(
4173  *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4174 
4175  DependentVectorType *CanonCheck =
4176  DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4177  assert(!CanonCheck &&
4178  "Dependent-sized vector_size canonical type broken");
4179  (void)CanonCheck;
4180  DependentVectorTypes.InsertNode(New, InsertPos);
4181  } else {
4182  QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
4183  SourceLocation(), VecKind);
4184  New = new (*this, TypeAlignment) DependentVectorType(
4185  *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4186  }
4187  }
4188 
4189  Types.push_back(New);
4190  return QualType(New, 0);
4191 }
4192 
4193 /// getExtVectorType - Return the unique reference to an extended vector type of
4194 /// the specified element type and size. VectorType must be a built-in type.
4196  unsigned NumElts) const {
4197  assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4198  (vecType->isBitIntType() &&
4199  // Only support _BitInt elements with byte-sized power of 2 NumBits.
4200  llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) &&
4201  vecType->getAs<BitIntType>()->getNumBits() >= 8));
4202 
4203  // Check if we've already instantiated a vector of this type.
4204  llvm::FoldingSetNodeID ID;
4205  VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
4207  void *InsertPos = nullptr;
4208  if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4209  return QualType(VTP, 0);
4210 
4211  // If the element type isn't canonical, this won't be a canonical type either,
4212  // so fill in the canonical type field.
4213  QualType Canonical;
4214  if (!vecType.isCanonical()) {
4215  Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
4216 
4217  // Get the new insert position for the node we care about.
4218  VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4219  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4220  }
4221  auto *New = new (*this, TypeAlignment)
4222  ExtVectorType(vecType, NumElts, Canonical);
4223  VectorTypes.InsertNode(New, InsertPos);
4224  Types.push_back(New);
4225  return QualType(New, 0);
4226 }
4227 
4228 QualType
4230  Expr *SizeExpr,
4231  SourceLocation AttrLoc) const {
4232  llvm::FoldingSetNodeID ID;
4234  SizeExpr);
4235 
4236  void *InsertPos = nullptr;
4238  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4240  if (Canon) {
4241  // We already have a canonical version of this array type; use it as
4242  // the canonical type for a newly-built type.
4243  New = new (*this, TypeAlignment)
4244  DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
4245  SizeExpr, AttrLoc);
4246  } else {
4247  QualType CanonVecTy = getCanonicalType(vecType);
4248  if (CanonVecTy == vecType) {
4249  New = new (*this, TypeAlignment)
4250  DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
4251  AttrLoc);
4252 
4253  DependentSizedExtVectorType *CanonCheck
4254  = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4255  assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4256  (void)CanonCheck;
4257  DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
4258  } else {
4259  QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
4260  SourceLocation());
4261  New = new (*this, TypeAlignment) DependentSizedExtVectorType(
4262  *this, vecType, CanonExtTy, SizeExpr, AttrLoc);
4263  }
4264  }
4265 
4266  Types.push_back(New);
4267  return QualType(New, 0);
4268 }
4269 
4271  unsigned NumColumns) const {
4272  llvm::FoldingSetNodeID ID;
4273  ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
4274  Type::ConstantMatrix);
4275 
4276  assert(MatrixType::isValidElementType(ElementTy) &&
4277  "need a valid element type");
4278  assert(ConstantMatrixType::isDimensionValid(NumRows) &&
4280  "need valid matrix dimensions");
4281  void *InsertPos = nullptr;
4282  if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4283  return QualType(MTP, 0);
4284 
4285  QualType Canonical;
4286  if (!ElementTy.isCanonical()) {
4287  Canonical =
4288  getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
4289 
4290  ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4291  assert(!NewIP && "Matrix type shouldn't already exist in the map");
4292  (void)NewIP;
4293  }
4294 
4295  auto *New = new (*this, TypeAlignment)
4296  ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4297  MatrixTypes.InsertNode(New, InsertPos);
4298  Types.push_back(New);
4299  return QualType(New, 0);
4300 }
4301 
4303  Expr *RowExpr,
4304  Expr *ColumnExpr,
4305  SourceLocation AttrLoc) const {
4306  QualType CanonElementTy = getCanonicalType(ElementTy);
4307  llvm::FoldingSetNodeID ID;
4308  DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
4309  ColumnExpr);
4310 
4311  void *InsertPos = nullptr;
4312  DependentSizedMatrixType *Canon =
4313  DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4314 
4315  if (!Canon) {
4316  Canon = new (*this, TypeAlignment) DependentSizedMatrixType(
4317  *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc);
4318 #ifndef NDEBUG
4319  DependentSizedMatrixType *CanonCheck =
4320  DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4321  assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4322 #endif
4323  DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
4324  Types.push_back(Canon);
4325  }
4326 
4327  // Already have a canonical version of the matrix type
4328  //
4329  // If it exactly matches the requested type, use it directly.
4330  if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4331  Canon->getRowExpr() == ColumnExpr)
4332  return QualType(Canon, 0);
4333 
4334  // Use Canon as the canonical type for newly-built type.
4335  DependentSizedMatrixType *New = new (*this, TypeAlignment)
4336  DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr,
4337  ColumnExpr, AttrLoc);
4338  Types.push_back(New);
4339  return QualType(New, 0);
4340 }
4341 
4343  Expr *AddrSpaceExpr,
4344  SourceLocation AttrLoc) const {
4345  assert(AddrSpaceExpr->isInstantiationDependent());
4346 
4347  QualType canonPointeeType = getCanonicalType(PointeeType);
4348 
4349  void *insertPos = nullptr;
4350  llvm::FoldingSetNodeID ID;
4351  DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
4352  AddrSpaceExpr);
4353 
4354  DependentAddressSpaceType *canonTy =
4355  DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
4356 
4357  if (!canonTy) {
4358  canonTy = new (*this, TypeAlignment)
4359  DependentAddressSpaceType(*this, canonPointeeType,
4360  QualType(), AddrSpaceExpr, AttrLoc);
4361  DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
4362  Types.push_back(canonTy);
4363  }
4364 
4365  if (canonPointeeType == PointeeType &&
4366  canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4367  return QualType(canonTy, 0);
4368 
4369  auto *sugaredType
4370  = new (*this, TypeAlignment)
4371  DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
4372  AddrSpaceExpr, AttrLoc);
4373  Types.push_back(sugaredType);
4374  return QualType(sugaredType, 0);
4375 }
4376 
4377 /// Determine whether \p T is canonical as the result type of a function.
4379  return T.isCanonical() &&
4382 }
4383 
4384 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4385 QualType
4387  const FunctionType::ExtInfo &Info) const {
4388  // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4389  // functionality creates a function without a prototype regardless of
4390  // language mode (so it makes them even in C++). Once the rewriter has been
4391  // fixed, this assertion can be enabled again.
4392  //assert(!LangOpts.requiresStrictPrototypes() &&
4393  // "strict prototypes are disabled");
4394 
4395  // Unique functions, to guarantee there is only one function of a particular
4396  // structure.
4397  llvm::FoldingSetNodeID ID;
4398  FunctionNoProtoType::Profile(ID, ResultTy, Info);
4399 
4400  void *InsertPos = nullptr;
4401  if (FunctionNoProtoType *FT =
4402  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4403  return QualType(FT, 0);
4404 
4405  QualType Canonical;
4406  if (!isCanonicalResultType(ResultTy)) {
4407  Canonical =
4409 
4410  // Get the new insert position for the node we care about.
4411  FunctionNoProtoType *NewIP =
4412  FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4413  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4414  }
4415 
4416  auto *New = new (*this, TypeAlignment)
4417  FunctionNoProtoType(ResultTy, Canonical, Info);
4418  Types.push_back(New);
4419  FunctionNoProtoTypes.InsertNode(New, InsertPos);
4420  return QualType(New, 0);
4421 }
4422 
4425  CanQualType CanResultType = getCanonicalType(ResultType);
4426 
4427  // Canonical result types do not have ARC lifetime qualifiers.
4428  if (CanResultType.getQualifiers().hasObjCLifetime()) {
4429  Qualifiers Qs = CanResultType.getQualifiers();
4430  Qs.removeObjCLifetime();
4432  getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
4433  }
4434 
4435  return CanResultType;
4436 }
4437 
4439  const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4440  if (ESI.Type == EST_None)
4441  return true;
4442  if (!NoexceptInType)
4443  return false;
4444 
4445  // C++17 onwards: exception specification is part of the type, as a simple
4446  // boolean "can this function type throw".
4447  if (ESI.Type == EST_BasicNoexcept)
4448  return true;
4449 
4450  // A noexcept(expr) specification is (possibly) canonical if expr is
4451  // value-dependent.
4452  if (ESI.Type == EST_DependentNoexcept)
4453  return true;
4454 
4455  // A dynamic exception specification is canonical if it only contains pack
4456  // expansions (so we can't tell whether it's non-throwing) and all its
4457  // contained types are canonical.
4458  if (ESI.Type == EST_Dynamic) {
4459  bool AnyPackExpansions = false;
4460  for (QualType ET : ESI.Exceptions) {
4461  if (!ET.isCanonical())
4462  return false;
4463  if (ET->getAs<PackExpansionType>())
4464  AnyPackExpansions = true;
4465  }
4466  return AnyPackExpansions;
4467  }
4468 
4469  return false;
4470 }
4471 
4472 QualType ASTContext::getFunctionTypeInternal(
4473  QualType ResultTy, ArrayRef<QualType> ArgArray,
4474  const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4475  size_t NumArgs = ArgArray.size();
4476 
4477  // Unique functions, to guarantee there is only one function of a particular
4478  // structure.
4479  llvm::FoldingSetNodeID ID;
4480  FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
4481  *this, true);
4482 
4483  QualType Canonical;
4484  bool Unique = false;
4485 
4486  void *InsertPos = nullptr;
4487  if (FunctionProtoType *FPT =
4488  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4489  QualType Existing = QualType(FPT, 0);
4490 
4491  // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4492  // it so long as our exception specification doesn't contain a dependent
4493  // noexcept expression, or we're just looking for a canonical type.
4494  // Otherwise, we're going to need to create a type
4495  // sugar node to hold the concrete expression.
4496  if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
4497  EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4498  return Existing;
4499 
4500  // We need a new type sugar node for this one, to hold the new noexcept
4501  // expression. We do no canonicalization here, but that's OK since we don't
4502  // expect to see the same noexcept expression much more than once.
4503  Canonical = getCanonicalType(Existing);
4504  Unique = true;
4505  }
4506 
4507  bool NoexceptInType = getLangOpts().CPlusPlus17;
4508  bool IsCanonicalExceptionSpec =
4509  isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
4510 
4511  // Determine whether the type being created is already canonical or not.
4512  bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
4513  isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
4514  for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
4515  if (!ArgArray[i].isCanonicalAsParam())
4516  isCanonical = false;
4517 
4518  if (OnlyWantCanonical)
4519  assert(isCanonical &&
4520  "given non-canonical parameters constructing canonical type");
4521 
4522  // If this type isn't canonical, get the canonical version of it if we don't
4523  // already have it. The exception spec is only partially part of the
4524  // canonical type, and only in C++17 onwards.
4525  if (!isCanonical && Canonical.isNull()) {
4526  SmallVector<QualType, 16> CanonicalArgs;
4527  CanonicalArgs.reserve(NumArgs);
4528  for (unsigned i = 0; i != NumArgs; ++i)
4529  CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
4530 
4531  llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
4532  FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
4533  CanonicalEPI.HasTrailingReturn = false;
4534 
4535  if (IsCanonicalExceptionSpec) {
4536  // Exception spec is already OK.
4537  } else if (NoexceptInType) {
4538  switch (EPI.ExceptionSpec.Type) {
4540  // We don't know yet. It shouldn't matter what we pick here; no-one
4541  // should ever look at this.
4542  [[fallthrough]];
4543  case EST_None: case EST_MSAny: case EST_NoexceptFalse:
4544  CanonicalEPI.ExceptionSpec.Type = EST_None;
4545  break;
4546 
4547  // A dynamic exception specification is almost always "not noexcept",
4548  // with the exception that a pack expansion might expand to no types.
4549  case EST_Dynamic: {
4550  bool AnyPacks = false;
4551  for (QualType ET : EPI.ExceptionSpec.Exceptions) {
4552  if (ET->getAs<PackExpansionType>())
4553  AnyPacks = true;
4554  ExceptionTypeStorage.push_back(getCanonicalType(ET));
4555  }
4556  if (!AnyPacks)
4557  CanonicalEPI.ExceptionSpec.Type = EST_None;
4558  else {
4559  CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
4560  CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
4561  }
4562  break;
4563  }
4564 
4565  case EST_DynamicNone:
4566  case EST_BasicNoexcept:
4567  case EST_NoexceptTrue:
4568  case EST_NoThrow:
4569  CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
4570  break;
4571 
4572  case EST_DependentNoexcept:
4573  llvm_unreachable("dependent noexcept is already canonical");
4574  }
4575  } else {
4577  }
4578 
4579  // Adjust the canonical function result type.
4580  CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
4581  Canonical =
4582  getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
4583 
4584  // Get the new insert position for the node we care about.
4585  FunctionProtoType *NewIP =
4586  FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4587  assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4588  }
4589 
4590  // Compute the needed size to hold this FunctionProtoType and the
4591  // various trailing objects.
4592  auto ESH = FunctionProtoType::getExceptionSpecSize(
4593  EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
4594  size_t Size = FunctionProtoType::totalSizeToAlloc<
4597  FunctionProtoType::ExtParameterInfo, Qualifiers>(
4599  ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
4600  EPI.ExtParameterInfos ? NumArgs : 0,
4601  EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
4602 
4603  auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
4604  FunctionProtoType::ExtProtoInfo newEPI = EPI;
4605  new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
4606  Types.push_back(FTP);
4607  if (!Unique)
4608  FunctionProtoTypes.InsertNode(FTP, InsertPos);
4609  return QualType(FTP, 0);
4610 }
4611 
4612 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
4613  llvm::FoldingSetNodeID ID;
4614  PipeType::Profile(ID, T, ReadOnly);
4615 
4616  void *InsertPos = nullptr;
4617  if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
4618  return QualType(PT, 0);
4619 
4620  // If the pipe element type isn't canonical, this won't be a canonical type
4621  // either, so fill in the canonical type field.
4622  QualType Canonical;
4623  if (!T.isCanonical()) {
4624  Canonical = getPipeType(getCanonicalType(T), ReadOnly);
4625 
4626  // Get the new insert position for the node we care about.
4627  PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
4628  assert(!NewIP && "Shouldn't be in the map!");
4629  (void)NewIP;
4630  }
4631  auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
4632  Types.push_back(New);
4633  PipeTypes.InsertNode(New, InsertPos);
4634  return QualType(New, 0);
4635 }
4636 
4638  // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
4639  return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
4640  : Ty;
4641 }
4642 
4644  return getPipeType(T, true);
4645 }
4646 
4648  return getPipeType(T, false);
4649 }
4650 
4651 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
4652  llvm::FoldingSetNodeID ID;
4653  BitIntType::Profile(ID, IsUnsigned, NumBits);
4654 
4655  void *InsertPos = nullptr;
4656  if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4657  return QualType(EIT, 0);
4658 
4659  auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits);
4660  BitIntTypes.InsertNode(New, InsertPos);
4661  Types.push_back(New);
4662  return QualType(New, 0);
4663 }
4664 
4666  Expr *NumBitsExpr) const {
4667  assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
4668  llvm::FoldingSetNodeID ID;
4669  DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
4670 
4671  void *InsertPos = nullptr;
4672  if (DependentBitIntType *Existing =
4673  DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4674  return QualType(Existing, 0);
4675 
4676  auto *New = new (*this, TypeAlignment)
4677  DependentBitIntType(*this, IsUnsigned, NumBitsExpr);
4678  DependentBitIntTypes.InsertNode(New, InsertPos);
4679 
4680  Types.push_back(New);
4681  return QualType(New, 0);
4682 }
4683 
4684 #ifndef NDEBUG
4686  if (!isa<CXXRecordDecl>(D)) return false;
4687  const auto *RD = cast<CXXRecordDecl>(D);
4688  if (isa<ClassTemplatePartialSpecializationDecl>(RD))
4689  return true;
4690  if (RD->getDescribedClassTemplate() &&
4691  !isa<ClassTemplateSpecializationDecl>(RD))
4692  return true;
4693  return false;
4694 }
4695 #endif
4696 
4697 /// getInjectedClassNameType - Return the unique reference to the
4698 /// injected class name type for the specified templated declaration.
4700  QualType TST) const {
4702  if (Decl->TypeForDecl) {
4703  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
4704  } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
4705  assert(PrevDecl->TypeForDecl && "previous declaration has no type");
4706  Decl->TypeForDecl = PrevDecl->TypeForDecl;
4707  assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
4708  } else {
4709  Type *newType =
4710  new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
4711  Decl->TypeForDecl = newType;
4712  Types.push_back(newType);
4713  }
4714  return QualType(Decl->TypeForDecl, 0);
4715 }
4716 
4717 /// getTypeDeclType - Return the unique reference to the type for the
4718 /// specified type declaration.
4719 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
4720  assert(Decl && "Passed null for Decl param");
4721  assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
4722 
4723  if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
4724  return getTypedefType(Typedef);
4725 
4726  assert(!isa<TemplateTypeParmDecl>(Decl) &&
4727  "Template type parameter types are always available.");
4728 
4729  if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
4730  assert(Record->isFirstDecl() && "struct/union has previous declaration");
4731  assert(!NeedsInjectedClassNameType(Record));
4732  return getRecordType(Record);
4733  } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
4734  assert(Enum->isFirstDecl() && "enum has previous declaration");
4735  return getEnumType(Enum);
4736  } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
4737  return getUnresolvedUsingType(Using);
4738  } else
4739  llvm_unreachable("TypeDecl without a type?");
4740 
4741  return QualType(Decl->TypeForDecl, 0);
4742 }
4743 
4744 /// getTypedefType - Return the unique reference to the type for the
4745 /// specified typedef name decl.
4747  QualType Underlying) const {
4748  if (!Decl->TypeForDecl) {
4749  if (Underlying.isNull())
4750  Underlying = Decl->getUnderlyingType();
4751  auto *NewType = new (*this, TypeAlignment) TypedefType(
4752  Type::Typedef, Decl, QualType(), getCanonicalType(Underlying));
4753  Decl->TypeForDecl = NewType;
4754  Types.push_back(NewType);
4755  return QualType(NewType, 0);
4756  }
4757  if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
4758  return QualType(Decl->TypeForDecl, 0);
4759  assert(hasSameType(Decl->getUnderlyingType(), Underlying));
4760 
4761  llvm::FoldingSetNodeID ID;
4762  TypedefType::Profile(ID, Decl, Underlying);
4763 
4764  void *InsertPos = nullptr;
4765  if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4766  assert(!T->typeMatchesDecl() &&
4767  "non-divergent case should be handled with TypeDecl");
4768  return QualType(T, 0);
4769  }
4770 
4771  void *Mem =
4772  Allocate(TypedefType::totalSizeToAlloc<QualType>(true), TypeAlignment);
4773  auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
4774  getCanonicalType(Underlying));
4775  TypedefTypes.InsertNode(NewType, InsertPos);
4776  Types.push_back(NewType);
4777  return QualType(NewType, 0);
4778 }
4779 
4781  QualType Underlying) const {
4782  llvm::FoldingSetNodeID ID;
4783  UsingType::Profile(ID, Found, Underlying);
4784 
4785  void *InsertPos = nullptr;
4786  if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
4787  return QualType(T, 0);
4788 
4789  const Type *TypeForDecl =
4790  cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl();
4791 
4792  assert(!Underlying.hasLocalQualifiers());
4793  QualType Canon = Underlying->getCanonicalTypeInternal();
4794  assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
4795 
4796  if (Underlying.getTypePtr() == TypeForDecl)
4797  Underlying = QualType();
4798  void *Mem =
4799  Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()),
4800  TypeAlignment);
4801  UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
4802  Types.push_back(NewType);
4803  UsingTypes.InsertNode(NewType, InsertPos);
4804  return QualType(NewType, 0);
4805 }
4806 
4808  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4809 
4810  if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
4811  if (PrevDecl->TypeForDecl)
4812  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4813 
4814  auto *newType = new (*this, TypeAlignment) RecordType(Decl);
4815  Decl->TypeForDecl = newType;
4816  Types.push_back(newType);
4817  return QualType(newType, 0);
4818 }
4819 
4821  if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4822 
4823  if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
4824  if (PrevDecl->TypeForDecl)
4825  return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4826 
4827  auto *newType = new (*this, TypeAlignment) EnumType(Decl);
4828  Decl->TypeForDecl = newType;
4829  Types.push_back(newType);
4830  return QualType(newType, 0);
4831 }
4832 
4834  const UnresolvedUsingTypenameDecl *Decl) const {
4835  if (Decl->TypeForDecl)
4836  return QualType(Decl->TypeForDecl, 0);
4837 
4838  if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
4840  if (CanonicalDecl->TypeForDecl)
4841  return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
4842 
4843  Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl);
4844  Decl->TypeForDecl = newType;
4845  Types.push_back(newType);
4846  return QualType(newType, 0);
4847 }
4848 
4850  QualType modifiedType,
4851  QualType equivalentType) const {
4852  llvm::FoldingSetNodeID id;
4853  AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
4854 
4855  void *insertPos = nullptr;
4856  AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
4857  if (type) return QualType(type, 0);
4858 
4859  QualType canon = getCanonicalType(equivalentType);
4860  type = new (*this, TypeAlignment)
4861  AttributedType(canon, attrKind, modifiedType, equivalentType);
4862 
4863  Types.push_back(type);
4864  AttributedTypes.InsertNode(type, insertPos);
4865 
4866  return QualType(type, 0);
4867 }
4868 
4869 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
4870  QualType Wrapped) {
4871  llvm::FoldingSetNodeID ID;
4872  BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
4873 
4874  void *InsertPos = nullptr;
4875  BTFTagAttributedType *Ty =
4876  BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
4877  if (Ty)
4878  return QualType(Ty, 0);
4879 
4880  QualType Canon = getCanonicalType(Wrapped);
4881  Ty = new (*this, TypeAlignment) BTFTagAttributedType(Canon, Wrapped, BTFAttr);
4882 
4883  Types.push_back(Ty);
4884  BTFTagAttributedTypes.InsertNode(Ty, InsertPos);
4885 
4886  return QualType(Ty, 0);
4887 }
4888 
4889 /// Retrieve a substitution-result type.
4891  QualType Replacement, Decl *AssociatedDecl, unsigned Index,
4892  std::optional<unsigned> PackIndex) const {
4893  llvm::FoldingSetNodeID ID;
4894  SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
4895  PackIndex);
4896  void *InsertPos = nullptr;
4897  SubstTemplateTypeParmType *SubstParm =
4898  SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4899 
4900  if (!SubstParm) {
4901  void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
4902  !Replacement.isCanonical()),
4903  TypeAlignment);
4904  SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
4905  Index, PackIndex);
4906  Types.push_back(SubstParm);
4907  SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
4908  }
4909 
4910  return QualType(SubstParm, 0);
4911 }
4912 
4913 /// Retrieve a
4914 QualType
4916  unsigned Index, bool Final,
4917  const TemplateArgument &ArgPack) {
4918 #ifndef NDEBUG
4919  for (const auto &P : ArgPack.pack_elements())
4920  assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
4921 #endif
4922 
4923  llvm::FoldingSetNodeID ID;
4924  SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
4925  ArgPack);
4926  void *InsertPos = nullptr;
4927  if (SubstTemplateTypeParmPackType *SubstParm =
4928  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
4929  return QualType(SubstParm, 0);
4930 
4931  QualType Canon;
4932  {
4933  TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack);
4934  if (!AssociatedDecl->isCanonicalDecl() ||
4935  !CanonArgPack.structurallyEquals(ArgPack)) {
4937  AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack);
4938  [[maybe_unused]] const auto *Nothing =
4939  SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
4940  assert(!Nothing);
4941  }
4942  }
4943 
4944  auto *SubstParm = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(
4945  Canon, AssociatedDecl, Index, Final, ArgPack);
4946  Types.push_back(SubstParm);
4947  SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
4948  return QualType(SubstParm, 0);
4949 }
4950 
4951 /// Retrieve the template type parameter type for a template
4952 /// parameter or parameter pack with the given depth, index, and (optionally)
4953 /// name.
4955  bool ParameterPack,
4956  TemplateTypeParmDecl *TTPDecl) const {
4957  llvm::FoldingSetNodeID ID;
4958  TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4959  void *InsertPos = nullptr;
4960  TemplateTypeParmType *TypeParm
4961  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4962 
4963  if (TypeParm)
4964  return QualType(TypeParm, 0);
4965 
4966  if (TTPDecl) {
4967  QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4968  TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
4969 
4970  TemplateTypeParmType *TypeCheck
4971  = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4972  assert(!TypeCheck && "Template type parameter canonical type broken");
4973  (void)TypeCheck;
4974  } else
4975  TypeParm = new (*this, TypeAlignment)
4976  TemplateTypeParmType(Depth, Index, ParameterPack);
4977 
4978  Types.push_back(TypeParm);
4979  TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
4980 
4981  return QualType(TypeParm, 0);
4982 }
4983 
4986  SourceLocation NameLoc,
4987  const TemplateArgumentListInfo &Args,
4988  QualType Underlying) const {
4989  assert(!Name.getAsDependentTemplateName() &&
4990  "No dependent template names here!");
4991  QualType TST =
4992  getTemplateSpecializationType(Name, Args.arguments(), Underlying);
4993 
4998  TL.setTemplateNameLoc(NameLoc);
4999  TL.setLAngleLoc(Args.getLAngleLoc());
5000  TL.setRAngleLoc(Args.getRAngleLoc());
5001  for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
5002  TL.setArgLocInfo(i, Args[i].getLocInfo());
5003  return DI;
5004 }
5005 
5006 QualType
5009  QualType Underlying) const {
5010  assert(!Template.getAsDependentTemplateName() &&
5011  "No dependent template names here!");
5012 
5014  ArgVec.reserve(Args.size());
5015  for (const TemplateArgumentLoc &Arg : Args)
5016  ArgVec.push_back(Arg.getArgument());
5017 
5018  return getTemplateSpecializationType(Template, ArgVec, Underlying);
5019 }
5020 
5021 #ifndef NDEBUG
5023  for (const TemplateArgument &Arg : Args)
5024  if (Arg.isPackExpansion())
5025  return true;
5026 
5027  return true;
5028 }
5029 #endif
5030 
5031 QualType
5034  QualType Underlying) const {
5035  assert(!Template.getAsDependentTemplateName() &&
5036  "No dependent template names here!");
5037  // Look through qualified template names.
5038  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
5039  Template = QTN->getUnderlyingTemplate();
5040 
5041  const auto *TD = Template.getAsTemplateDecl();
5042  bool IsTypeAlias = TD && TD->isTypeAlias();
5043  QualType CanonType;
5044  if (!Underlying.isNull())
5045  CanonType = getCanonicalType(Underlying);
5046  else {
5047  // We can get here with an alias template when the specialization contains
5048  // a pack expansion that does not match up with a parameter pack.
5049  assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
5050  "Caller must compute aliased type");
5051  IsTypeAlias = false;
5052  CanonType = getCanonicalTemplateSpecializationType(Template, Args);
5053  }
5054 
5055  // Allocate the (non-canonical) template specialization type, but don't
5056  // try to unique it: these types typically have location information that
5057  // we don't unique and don't want to lose.
5058  void *Mem = Allocate(sizeof(TemplateSpecializationType) +
5059  sizeof(TemplateArgument) * Args.size() +
5060  (IsTypeAlias? sizeof(QualType) : 0),
5061  TypeAlignment);
5062  auto *Spec
5063  = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
5064  IsTypeAlias ? Underlying : QualType());
5065 
5066  Types.push_back(Spec);
5067  return QualType(Spec, 0);
5068 }
5069 
5071  TemplateName Template, ArrayRef<TemplateArgument> Args) const {
5072  assert(!Template.getAsDependentTemplateName() &&
5073  "No dependent template names here!");
5074 
5075  // Look through qualified template names.
5076  if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
5077  Template = TemplateName(QTN->getUnderlyingTemplate());
5078 
5079  // Build the canonical template specialization type.
5080  TemplateName CanonTemplate = getCanonicalTemplateName(Template);
5081  bool AnyNonCanonArgs = false;
5082  auto CanonArgs =
5083  ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs);
5084 
5085  // Determine whether this canonical template specialization type already
5086  // exists.
5087  llvm::FoldingSetNodeID ID;
5088  TemplateSpecializationType::Profile(ID, CanonTemplate,
5089  CanonArgs, *this);
5090 
5091  void *InsertPos = nullptr;
5093  = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5094 
5095  if (!Spec) {
5096  // Allocate a new canonical template specialization type.
5097  void *Mem = Allocate((sizeof(TemplateSpecializationType) +
5098  sizeof(TemplateArgument) * CanonArgs.size()),
5099  TypeAlignment);
5100  Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
5101  CanonArgs,
5102  QualType(), QualType());
5103  Types.push_back(Spec);
5104  TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
5105  }
5106 
5107  assert(Spec->isDependentType() &&
5108  "Non-dependent template-id type must have a canonical type");
5109  return QualType(Spec, 0);
5110 }
5111 
5113  NestedNameSpecifier *NNS,
5114  QualType NamedType,
5115  TagDecl *OwnedTagDecl) const {
5116  llvm::FoldingSetNodeID ID;
5117  ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
5118 
5119  void *InsertPos = nullptr;
5120  ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5121  if (T)
5122  return QualType(T, 0);
5123 
5124  QualType Canon = NamedType;
5125  if (!Canon.isCanonical()) {
5126  Canon = getCanonicalType(NamedType);
5127  ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5128  assert(!CheckT && "Elaborated canonical type broken");
5129  (void)CheckT;
5130  }
5131 
5132  void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
5133  TypeAlignment);
5134  T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
5135 
5136  Types.push_back(T);
5137  ElaboratedTypes.InsertNode(T, InsertPos);
5138  return QualType(T, 0);
5139 }
5140 
5141 QualType
5143  llvm::FoldingSetNodeID ID;
5144  ParenType::Profile(ID, InnerType);
5145 
5146  void *InsertPos = nullptr;
5147  ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5148  if (T)
5149  return QualType(T, 0);
5150 
5151  QualType Canon = InnerType;
5152  if (!Canon.isCanonical()) {
5153  Canon = getCanonicalType(InnerType);
5154  ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5155  assert(!CheckT && "Paren canonical type broken");
5156  (void)CheckT;
5157  }
5158 
5159  T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
5160  Types.push_back(T);
5161  ParenTypes.InsertNode(T, InsertPos);
5162  return QualType(T, 0);
5163 }
5164 
5165 QualType
5167  const IdentifierInfo *MacroII) const {
5168  QualType Canon = UnderlyingTy;
5169  if (!Canon.isCanonical())
5170  Canon = getCanonicalType(UnderlyingTy);
5171 
5172  auto *newType = new (*this, TypeAlignment)
5173  MacroQualifiedType(UnderlyingTy, Canon, MacroII);
5174  Types.push_back(newType);
5175  return QualType(newType, 0);
5176 }
5177 
5179  NestedNameSpecifier *NNS,
5180  const IdentifierInfo *Name,
5181  QualType Canon) const {
5182  if (Canon.isNull()) {
5184  if (CanonNNS != NNS)
5185  Canon = getDependentNameType(Keyword, CanonNNS, Name);
5186  }
5187 
5188  llvm::FoldingSetNodeID ID;
5189  DependentNameType::Profile(ID, Keyword, NNS, Name);
5190 
5191  void *InsertPos = nullptr;
5193  = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
5194  if (T)
5195  return QualType(T, 0);
5196 
5197  T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
5198  Types.push_back(T);
5199  DependentNameTypes.InsertNode(T, InsertPos);
5200  return QualType(T, 0);
5201 }
5202 
5205  const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const {
5206  // TODO: avoid this copy
5208  for (unsigned I = 0, E = Args.size(); I != E; ++I)
5209  ArgCopy.push_back(Args[I].getArgument());
5210  return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
5211 }
5212 
5213 QualType
5215  ElaboratedTypeKeyword Keyword,
5216  NestedNameSpecifier *NNS,
5217  const IdentifierInfo *Name,
5218  ArrayRef<TemplateArgument> Args) const {
5219  assert((!NNS || NNS->isDependent()) &&
5220  "nested-name-specifier must be dependent");
5221 
5222  llvm::FoldingSetNodeID ID;
5223  DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
5224  Name, Args);
5225 
5226  void *InsertPos = nullptr;
5228  = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5229  if (T)
5230  return QualType(T, 0);
5231 
5233 
5234  ElaboratedTypeKeyword CanonKeyword = Keyword;
5235  if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
5236 
5237  bool AnyNonCanonArgs = false;
5238  auto CanonArgs =
5239  ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs);
5240 
5241  QualType Canon;
5242  if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
5243  Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
5244  Name,
5245  CanonArgs);
5246 
5247  // Find the insert position again.
5248  [[maybe_unused]] auto *Nothing =
5249  DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5250  assert(!Nothing && "canonical type broken");
5251  }
5252 
5253  void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
5254  sizeof(TemplateArgument) * Args.size()),
5255  TypeAlignment);
5256  T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
5257  Name, Args, Canon);
5258  Types.push_back(T);
5259  DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
5260  return QualType(T, 0);
5261 }
5262 
5264  TemplateArgument Arg;
5265  if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
5266  QualType ArgType = getTypeDeclType(TTP);
5267  if (TTP->isParameterPack())
5268  ArgType = getPackExpansionType(ArgType, std::nullopt);
5269 
5270  Arg = TemplateArgument(ArgType);
5271  } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
5272  QualType T =
5273  NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this);
5274  // For class NTTPs, ensure we include the 'const' so the type matches that
5275  // of a real template argument.
5276  // FIXME: It would be more faithful to model this as something like an
5277  // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
5278  if (T->isRecordType())
5279  T.addConst();
5280  Expr *E = new (*this) DeclRefExpr(
5281  *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T,
5282  Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
5283 
5284  if (NTTP->isParameterPack())
5285  E = new (*this)
5286  PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt);
5287  Arg = TemplateArgument(E);
5288  } else {
5289  auto *TTP = cast<TemplateTemplateParmDecl>(Param);
5290  if (TTP->isParameterPack())
5291  Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>());
5292  else
5293  Arg = TemplateArgument(TemplateName(TTP));
5294  }
5295 
5296  if (Param->isTemplateParameterPack())
5297  Arg = TemplateArgument::CreatePackCopy(*this, Arg);
5298 
5299  return Arg;
5300 }
5301 
5302 void
5305  Args.reserve(Args.size() + Params->size());
5306 
5307  for (NamedDecl *Param : *Params)
5308  Args.push_back(getInjectedTemplateArg(Param));
5309 }
5310 
5312  std::optional<unsigned> NumExpansions,
5313  bool ExpectPackInType) {
5314  assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
5315  "Pack expansions must expand one or more parameter packs");
5316 
5317  llvm::FoldingSetNodeID ID;
5318  PackExpansionType::Profile(ID, Pattern, NumExpansions);
5319 
5320  void *InsertPos = nullptr;
5321  PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
5322  if (T)
5323  return QualType(T, 0);
5324 
5325  QualType Canon;
5326  if (!Pattern.isCanonical()) {
5327  Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions,
5328  /*ExpectPackInType=*/false);
5329 
5330  // Find the insert position again, in case we inserted an element into
5331  // PackExpansionTypes and invalidated our insert position.
5332  PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
5333  }
5334 
5335  T = new (*this, TypeAlignment)
5336  PackExpansionType(Pattern, Canon, NumExpansions);
5337  Types.push_back(T);
5338  PackExpansionTypes.InsertNode(T, InsertPos);
5339  return QualType(T, 0);
5340 }
5341 
5342 /// CmpProtocolNames - Comparison predicate for sorting protocols
5343 /// alphabetically.
5344 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
5345  ObjCProtocolDecl *const *RHS) {
5346  return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
5347 }
5348 
5350  if (Protocols.empty()) return true;
5351 
5352  if (Protocols[0]->getCanonicalDecl() != Protocols[0])
5353  return false;
5354 
5355  for (unsigned i = 1; i != Protocols.size(); ++i)
5356  if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
5357  Protocols[i]->getCanonicalDecl() != Protocols[i])
5358  return false;
5359  return true;
5360 }
5361 
5362 static void
5364  // Sort protocols, keyed by name.
5365  llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
5366 
5367  // Canonicalize.
5368  for (ObjCProtocolDecl *&P : Protocols)
5369  P = P->getCanonicalDecl();
5370 
5371  // Remove duplicates.
5372  auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end());
5373  Protocols.erase(ProtocolsEnd, Protocols.end());
5374 }
5375 
5377  ObjCProtocolDecl * const *Protocols,
5378  unsigned NumProtocols) const {
5379  return getObjCObjectType(BaseType, {},
5380  llvm::ArrayRef(Protocols, NumProtocols),
5381  /*isKindOf=*/false);
5382 }
5383 
5385  QualType baseType,
5386  ArrayRef<QualType> typeArgs,
5387  ArrayRef<ObjCProtocolDecl *> protocols,
5388  bool isKindOf) const {
5389  // If the base type is an interface and there aren't any protocols or
5390  // type arguments to add, then the interface type will do just fine.
5391  if (typeArgs.empty() && protocols.empty() && !isKindOf &&
5392  isa<ObjCInterfaceType>(baseType))
5393  return baseType;
5394 
5395  // Look in the folding set for an existing type.
5396  llvm::FoldingSetNodeID ID;
5397  ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
5398  void *InsertPos = nullptr;
5399  if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
5400  return QualType(QT, 0);
5401 
5402  // Determine the type arguments to be used for canonicalization,