clang 18.0.0git
ASTContext.cpp
Go to the documentation of this file.
1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
14#include "CXXABI.h"
15#include "Interp/Context.h"
16#include "clang/AST/APValue.h"
20#include "clang/AST/Attr.h"
22#include "clang/AST/CharUnits.h"
23#include "clang/AST/Comment.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/DeclBase.h"
26#include "clang/AST/DeclCXX.h"
28#include "clang/AST/DeclObjC.h"
33#include "clang/AST/Expr.h"
34#include "clang/AST/ExprCXX.h"
37#include "clang/AST/Mangle.h"
43#include "clang/AST/Stmt.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
55#include "clang/Basic/LLVM.h"
57#include "clang/Basic/Linkage.h"
58#include "clang/Basic/Module.h"
68#include "llvm/ADT/APFixedPoint.h"
69#include "llvm/ADT/APInt.h"
70#include "llvm/ADT/APSInt.h"
71#include "llvm/ADT/ArrayRef.h"
72#include "llvm/ADT/DenseMap.h"
73#include "llvm/ADT/DenseSet.h"
74#include "llvm/ADT/FoldingSet.h"
75#include "llvm/ADT/PointerUnion.h"
76#include "llvm/ADT/STLExtras.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/StringExtras.h"
80#include "llvm/ADT/StringRef.h"
81#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82#include "llvm/Support/Capacity.h"
83#include "llvm/Support/Casting.h"
84#include "llvm/Support/Compiler.h"
85#include "llvm/Support/ErrorHandling.h"
86#include "llvm/Support/MD5.h"
87#include "llvm/Support/MathExtras.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/TargetParser/Triple.h"
90#include <algorithm>
91#include <cassert>
92#include <cstddef>
93#include <cstdint>
94#include <cstdlib>
95#include <map>
96#include <memory>
97#include <optional>
98#include <string>
99#include <tuple>
100#include <utility>
101
102using namespace clang;
103
114
115/// \returns The locations that are relevant when searching for Doc comments
116/// related to \p D.
119 assert(D);
120
121 // User can not attach documentation to implicit declarations.
122 if (D->isImplicit())
123 return {};
124
125 // User can not attach documentation to implicit instantiations.
126 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
127 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
128 return {};
129 }
130
131 if (const auto *VD = dyn_cast<VarDecl>(D)) {
132 if (VD->isStaticDataMember() &&
133 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
134 return {};
135 }
136
137 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
138 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
139 return {};
140 }
141
142 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
143 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
144 if (TSK == TSK_ImplicitInstantiation ||
145 TSK == TSK_Undeclared)
146 return {};
147 }
148
149 if (const auto *ED = dyn_cast<EnumDecl>(D)) {
150 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
151 return {};
152 }
153 if (const auto *TD = dyn_cast<TagDecl>(D)) {
154 // When tag declaration (but not definition!) is part of the
155 // decl-specifier-seq of some other declaration, it doesn't get comment
156 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
157 return {};
158 }
159 // TODO: handle comments for function parameters properly.
160 if (isa<ParmVarDecl>(D))
161 return {};
162
163 // TODO: we could look up template parameter documentation in the template
164 // documentation.
165 if (isa<TemplateTypeParmDecl>(D) ||
166 isa<NonTypeTemplateParmDecl>(D) ||
167 isa<TemplateTemplateParmDecl>(D))
168 return {};
169
171 // Find declaration location.
172 // For Objective-C declarations we generally don't expect to have multiple
173 // declarators, thus use declaration starting location as the "declaration
174 // location".
175 // For all other declarations multiple declarators are used quite frequently,
176 // so we use the location of the identifier as the "declaration location".
177 SourceLocation BaseLocation;
178 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
179 isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) ||
180 isa<ClassTemplateSpecializationDecl>(D) ||
181 // Allow association with Y across {} in `typedef struct X {} Y`.
182 isa<TypedefDecl>(D))
183 BaseLocation = D->getBeginLoc();
184 else
185 BaseLocation = D->getLocation();
186
187 if (!D->getLocation().isMacroID()) {
188 Locations.emplace_back(BaseLocation);
189 } else {
190 const auto *DeclCtx = D->getDeclContext();
191
192 // When encountering definitions generated from a macro (that are not
193 // contained by another declaration in the macro) we need to try and find
194 // the comment at the location of the expansion but if there is no comment
195 // there we should retry to see if there is a comment inside the macro as
196 // well. To this end we return first BaseLocation to first look at the
197 // expansion site, the second value is the spelling location of the
198 // beginning of the declaration defined inside the macro.
199 if (!(DeclCtx &&
200 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
201 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation));
202 }
203
204 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
205 // we don't refer to the macro argument location at the expansion site (this
206 // can happen if the name's spelling is provided via macro argument), and
207 // always to the declaration itself.
208 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc()));
209 }
210
211 return Locations;
212}
213
215 const Decl *D, const SourceLocation RepresentativeLocForDecl,
216 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
217 // If the declaration doesn't map directly to a location in a file, we
218 // can't find the comment.
219 if (RepresentativeLocForDecl.isInvalid() ||
220 !RepresentativeLocForDecl.isFileID())
221 return nullptr;
222
223 // If there are no comments anywhere, we won't find anything.
224 if (CommentsInTheFile.empty())
225 return nullptr;
226
227 // Decompose the location for the declaration and find the beginning of the
228 // file buffer.
229 const std::pair<FileID, unsigned> DeclLocDecomp =
230 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl);
231
232 // Slow path.
233 auto OffsetCommentBehindDecl =
234 CommentsInTheFile.lower_bound(DeclLocDecomp.second);
235
236 // First check whether we have a trailing comment.
237 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
238 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
239 if ((CommentBehindDecl->isDocumentation() ||
240 LangOpts.CommentOpts.ParseAllComments) &&
241 CommentBehindDecl->isTrailingComment() &&
242 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
243 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
244
245 // Check that Doxygen trailing comment comes after the declaration, starts
246 // on the same line and in the same file as the declaration.
247 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) ==
248 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first,
249 OffsetCommentBehindDecl->first)) {
250 return CommentBehindDecl;
251 }
252 }
253 }
254
255 // The comment just after the declaration was not a trailing comment.
256 // Let's look at the previous comment.
257 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
258 return nullptr;
259
260 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
261 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
262
263 // Check that we actually have a non-member Doxygen comment.
264 if (!(CommentBeforeDecl->isDocumentation() ||
265 LangOpts.CommentOpts.ParseAllComments) ||
266 CommentBeforeDecl->isTrailingComment())
267 return nullptr;
268
269 // Decompose the end of the comment.
270 const unsigned CommentEndOffset =
271 Comments.getCommentEndOffset(CommentBeforeDecl);
272
273 // Get the corresponding buffer.
274 bool Invalid = false;
275 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
276 &Invalid).data();
277 if (Invalid)
278 return nullptr;
279
280 // Extract text between the comment and declaration.
281 StringRef Text(Buffer + CommentEndOffset,
282 DeclLocDecomp.second - CommentEndOffset);
283
284 // There should be no other declarations or preprocessor directives between
285 // comment and declaration.
286 if (Text.find_last_of(";{}#@") != StringRef::npos)
287 return nullptr;
288
289 return CommentBeforeDecl;
290}
291
293 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
294
295 for (const auto DeclLoc : DeclLocs) {
296 // If the declaration doesn't map directly to a location in a file, we
297 // can't find the comment.
298 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
299 continue;
300
303 CommentsLoaded = true;
304 }
305
306 if (Comments.empty())
307 continue;
308
309 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
310 if (!File.isValid())
311 continue;
312
313 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
314 if (!CommentsInThisFile || CommentsInThisFile->empty())
315 continue;
316
317 if (RawComment *Comment =
318 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile))
319 return Comment;
320 }
321
322 return nullptr;
323}
324
326 assert(LangOpts.RetainCommentsFromSystemHeaders ||
327 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
328 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
329}
330
331/// If we have a 'templated' declaration for a template, adjust 'D' to
332/// refer to the actual template.
333/// If we have an implicit instantiation, adjust 'D' to refer to template.
334static const Decl &adjustDeclToTemplate(const Decl &D) {
335 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) {
336 // Is this function declaration part of a function template?
337 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
338 return *FTD;
339
340 // Nothing to do if function is not an implicit instantiation.
341 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
342 return D;
343
344 // Function is an implicit instantiation of a function template?
345 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
346 return *FTD;
347
348 // Function is instantiated from a member definition of a class template?
349 if (const FunctionDecl *MemberDecl =
351 return *MemberDecl;
352
353 return D;
354 }
355 if (const auto *VD = dyn_cast<VarDecl>(&D)) {
356 // Static data member is instantiated from a member definition of a class
357 // template?
358 if (VD->isStaticDataMember())
359 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
360 return *MemberDecl;
361
362 return D;
363 }
364 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) {
365 // Is this class declaration part of a class template?
366 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
367 return *CTD;
368
369 // Class is an implicit instantiation of a class template or partial
370 // specialization?
371 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
372 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
373 return D;
374 llvm::PointerUnion<ClassTemplateDecl *,
377 return PU.is<ClassTemplateDecl *>()
378 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>())
379 : *static_cast<const Decl *>(
381 }
382
383 // Class is instantiated from a member definition of a class template?
384 if (const MemberSpecializationInfo *Info =
385 CRD->getMemberSpecializationInfo())
386 return *Info->getInstantiatedFrom();
387
388 return D;
389 }
390 if (const auto *ED = dyn_cast<EnumDecl>(&D)) {
391 // Enum is instantiated from a member definition of a class template?
392 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
393 return *MemberDecl;
394
395 return D;
396 }
397 // FIXME: Adjust alias templates?
398 return D;
399}
400
402 const Decl *D,
403 const Decl **OriginalDecl) const {
404 if (!D) {
405 if (OriginalDecl)
406 OriginalDecl = nullptr;
407 return nullptr;
408 }
409
410 D = &adjustDeclToTemplate(*D);
411
412 // Any comment directly attached to D?
413 {
414 auto DeclComment = DeclRawComments.find(D);
415 if (DeclComment != DeclRawComments.end()) {
416 if (OriginalDecl)
417 *OriginalDecl = D;
418 return DeclComment->second;
419 }
420 }
421
422 // Any comment attached to any redeclaration of D?
423 const Decl *CanonicalD = D->getCanonicalDecl();
424 if (!CanonicalD)
425 return nullptr;
426
427 {
428 auto RedeclComment = RedeclChainComments.find(CanonicalD);
429 if (RedeclComment != RedeclChainComments.end()) {
430 if (OriginalDecl)
431 *OriginalDecl = RedeclComment->second;
432 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second);
433 assert(CommentAtRedecl != DeclRawComments.end() &&
434 "This decl is supposed to have comment attached.");
435 return CommentAtRedecl->second;
436 }
437 }
438
439 // Any redeclarations of D that we haven't checked for comments yet?
440 // We can't use DenseMap::iterator directly since it'd get invalid.
441 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
442 return CommentlessRedeclChains.lookup(CanonicalD);
443 }();
444
445 for (const auto Redecl : D->redecls()) {
446 assert(Redecl);
447 // Skip all redeclarations that have been checked previously.
448 if (LastCheckedRedecl) {
449 if (LastCheckedRedecl == Redecl) {
450 LastCheckedRedecl = nullptr;
451 }
452 continue;
453 }
454 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl);
455 if (RedeclComment) {
456 cacheRawCommentForDecl(*Redecl, *RedeclComment);
457 if (OriginalDecl)
458 *OriginalDecl = Redecl;
459 return RedeclComment;
460 }
461 CommentlessRedeclChains[CanonicalD] = Redecl;
462 }
463
464 if (OriginalDecl)
465 *OriginalDecl = nullptr;
466 return nullptr;
467}
468
470 const RawComment &Comment) const {
471 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
472 DeclRawComments.try_emplace(&OriginalD, &Comment);
473 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
474 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD);
475 CommentlessRedeclChains.erase(CanonicalDecl);
476}
477
478static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
480 const DeclContext *DC = ObjCMethod->getDeclContext();
481 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
482 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
483 if (!ID)
484 return;
485 // Add redeclared method here.
486 for (const auto *Ext : ID->known_extensions()) {
487 if (ObjCMethodDecl *RedeclaredMethod =
488 Ext->getMethod(ObjCMethod->getSelector(),
489 ObjCMethod->isInstanceMethod()))
490 Redeclared.push_back(RedeclaredMethod);
491 }
492 }
493}
494
496 const Preprocessor *PP) {
497 if (Comments.empty() || Decls.empty())
498 return;
499
500 FileID File;
501 for (Decl *D : Decls) {
502 SourceLocation Loc = D->getLocation();
503 if (Loc.isValid()) {
504 // See if there are any new comments that are not attached to a decl.
505 // The location doesn't have to be precise - we care only about the file.
506 File = SourceMgr.getDecomposedLoc(Loc).first;
507 break;
508 }
509 }
510
511 if (File.isInvalid())
512 return;
513
514 auto CommentsInThisFile = Comments.getCommentsInFile(File);
515 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
516 CommentsInThisFile->rbegin()->second->isAttached())
517 return;
518
519 // There is at least one comment not attached to a decl.
520 // Maybe it should be attached to one of Decls?
521 //
522 // Note that this way we pick up not only comments that precede the
523 // declaration, but also comments that *follow* the declaration -- thanks to
524 // the lookahead in the lexer: we've consumed the semicolon and looked
525 // ahead through comments.
526 for (const Decl *D : Decls) {
527 assert(D);
528 if (D->isInvalidDecl())
529 continue;
530
531 D = &adjustDeclToTemplate(*D);
532
533 if (DeclRawComments.count(D) > 0)
534 continue;
535
536 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
537
538 for (const auto DeclLoc : DeclLocs) {
539 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
540 continue;
541
542 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
543 D, DeclLoc, *CommentsInThisFile)) {
544 cacheRawCommentForDecl(*D, *DocComment);
545 comments::FullComment *FC = DocComment->parse(*this, PP, D);
546 ParsedComments[D->getCanonicalDecl()] = FC;
547 break;
548 }
549 }
550 }
551}
552
554 const Decl *D) const {
555 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
556 ThisDeclInfo->CommentDecl = D;
557 ThisDeclInfo->IsFilled = false;
558 ThisDeclInfo->fill();
559 ThisDeclInfo->CommentDecl = FC->getDecl();
560 if (!ThisDeclInfo->TemplateParameters)
561 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
563 new (*this) comments::FullComment(FC->getBlocks(),
564 ThisDeclInfo);
565 return CFC;
566}
567
570 return RC ? RC->parse(*this, nullptr, D) : nullptr;
571}
572
574 const Decl *D,
575 const Preprocessor *PP) const {
576 if (!D || D->isInvalidDecl())
577 return nullptr;
578 D = &adjustDeclToTemplate(*D);
579
580 const Decl *Canonical = D->getCanonicalDecl();
581 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
582 ParsedComments.find(Canonical);
583
584 if (Pos != ParsedComments.end()) {
585 if (Canonical != D) {
586 comments::FullComment *FC = Pos->second;
588 return CFC;
589 }
590 return Pos->second;
591 }
592
593 const Decl *OriginalDecl = nullptr;
594
595 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
596 if (!RC) {
597 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
599 const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
600 if (OMD && OMD->isPropertyAccessor())
601 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
602 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
603 return cloneFullComment(FC, D);
604 if (OMD)
605 addRedeclaredMethods(OMD, Overridden);
606 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
607 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
608 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
609 return cloneFullComment(FC, D);
610 }
611 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
612 // Attach any tag type's documentation to its typedef if latter
613 // does not have one of its own.
614 QualType QT = TD->getUnderlyingType();
615 if (const auto *TT = QT->getAs<TagType>())
616 if (const Decl *TD = TT->getDecl())
618 return cloneFullComment(FC, D);
619 }
620 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
621 while (IC->getSuperClass()) {
622 IC = IC->getSuperClass();
624 return cloneFullComment(FC, D);
625 }
626 }
627 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
628 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
630 return cloneFullComment(FC, D);
631 }
632 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
633 if (!(RD = RD->getDefinition()))
634 return nullptr;
635 // Check non-virtual bases.
636 for (const auto &I : RD->bases()) {
637 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
638 continue;
639 QualType Ty = I.getType();
640 if (Ty.isNull())
641 continue;
643 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
644 continue;
645
647 return cloneFullComment(FC, D);
648 }
649 }
650 // Check virtual bases.
651 for (const auto &I : RD->vbases()) {
652 if (I.getAccessSpecifier() != AS_public)
653 continue;
654 QualType Ty = I.getType();
655 if (Ty.isNull())
656 continue;
657 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
658 if (!(VirtualBase= VirtualBase->getDefinition()))
659 continue;
661 return cloneFullComment(FC, D);
662 }
663 }
664 }
665 return nullptr;
666 }
667
668 // If the RawComment was attached to other redeclaration of this Decl, we
669 // should parse the comment in context of that other Decl. This is important
670 // because comments can contain references to parameter names which can be
671 // different across redeclarations.
672 if (D != OriginalDecl && OriginalDecl)
673 return getCommentForDecl(OriginalDecl, PP);
674
675 comments::FullComment *FC = RC->parse(*this, PP, D);
676 ParsedComments[Canonical] = FC;
677 return FC;
678}
679
680void
681ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
682 const ASTContext &C,
684 ID.AddInteger(Parm->getDepth());
685 ID.AddInteger(Parm->getPosition());
686 ID.AddBoolean(Parm->isParameterPack());
687
689 ID.AddInteger(Params->size());
691 PEnd = Params->end();
692 P != PEnd; ++P) {
693 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
694 ID.AddInteger(0);
695 ID.AddBoolean(TTP->isParameterPack());
696 if (TTP->isExpandedParameterPack()) {
697 ID.AddBoolean(true);
698 ID.AddInteger(TTP->getNumExpansionParameters());
699 } else
700 ID.AddBoolean(false);
701 continue;
702 }
703
704 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
705 ID.AddInteger(1);
706 ID.AddBoolean(NTTP->isParameterPack());
707 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType()))
708 .getAsOpaquePtr());
709 if (NTTP->isExpandedParameterPack()) {
710 ID.AddBoolean(true);
711 ID.AddInteger(NTTP->getNumExpansionTypes());
712 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
713 QualType T = NTTP->getExpansionType(I);
714 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
715 }
716 } else
717 ID.AddBoolean(false);
718 continue;
719 }
720
721 auto *TTP = cast<TemplateTemplateParmDecl>(*P);
722 ID.AddInteger(2);
723 Profile(ID, C, TTP);
724 }
725}
726
728ASTContext::getCanonicalTemplateTemplateParmDecl(
729 TemplateTemplateParmDecl *TTP) const {
730 // Check if we already have a canonical template template parameter.
731 llvm::FoldingSetNodeID ID;
732 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP);
733 void *InsertPos = nullptr;
734 CanonicalTemplateTemplateParm *Canonical
735 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
736 if (Canonical)
737 return Canonical->getParam();
738
739 // Build a canonical template parameter list.
741 SmallVector<NamedDecl *, 4> CanonParams;
742 CanonParams.reserve(Params->size());
744 PEnd = Params->end();
745 P != PEnd; ++P) {
746 // Note that, per C++20 [temp.over.link]/6, when determining whether
747 // template-parameters are equivalent, constraints are ignored.
748 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
751 TTP->getDepth(), TTP->getIndex(), nullptr, false,
752 TTP->isParameterPack(), /*HasTypeConstraint=*/false,
754 ? std::optional<unsigned>(TTP->getNumExpansionParameters())
755 : std::nullopt);
756 CanonParams.push_back(NewTTP);
757 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
758 QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType()));
761 if (NTTP->isExpandedParameterPack()) {
762 SmallVector<QualType, 2> ExpandedTypes;
764 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
765 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
766 ExpandedTInfos.push_back(
767 getTrivialTypeSourceInfo(ExpandedTypes.back()));
768 }
769
773 NTTP->getDepth(),
774 NTTP->getPosition(), nullptr,
775 T,
776 TInfo,
777 ExpandedTypes,
778 ExpandedTInfos);
779 } else {
783 NTTP->getDepth(),
784 NTTP->getPosition(), nullptr,
785 T,
786 NTTP->isParameterPack(),
787 TInfo);
788 }
789 CanonParams.push_back(Param);
790 } else
791 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
792 cast<TemplateTemplateParmDecl>(*P)));
793 }
794
797 TTP->getPosition(), TTP->isParameterPack(), nullptr,
799 CanonParams, SourceLocation(),
800 /*RequiresClause=*/nullptr));
801
802 // Get the new insert position for the node we care about.
803 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
804 assert(!Canonical && "Shouldn't be in the map!");
805 (void)Canonical;
806
807 // Create the canonical template template parameter entry.
808 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
809 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
810 return CanonTTP;
811}
812
814 auto Kind = getTargetInfo().getCXXABI().getKind();
815 return getLangOpts().CXXABI.value_or(Kind);
816}
817
818CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
819 if (!LangOpts.CPlusPlus) return nullptr;
820
821 switch (getCXXABIKind()) {
822 case TargetCXXABI::AppleARM64:
823 case TargetCXXABI::Fuchsia:
824 case TargetCXXABI::GenericARM: // Same as Itanium at this level
825 case TargetCXXABI::iOS:
826 case TargetCXXABI::WatchOS:
827 case TargetCXXABI::GenericAArch64:
828 case TargetCXXABI::GenericMIPS:
829 case TargetCXXABI::GenericItanium:
830 case TargetCXXABI::WebAssembly:
831 case TargetCXXABI::XL:
832 return CreateItaniumCXXABI(*this);
833 case TargetCXXABI::Microsoft:
834 return CreateMicrosoftCXXABI(*this);
835 }
836 llvm_unreachable("Invalid CXXABI type!");
837}
838
840 if (!InterpContext) {
841 InterpContext.reset(new interp::Context(*this));
842 }
843 return *InterpContext.get();
844}
845
847 if (!ParentMapCtx)
848 ParentMapCtx.reset(new ParentMapContext(*this));
849 return *ParentMapCtx.get();
850}
851
853 const LangOptions &LangOpts) {
854 switch (LangOpts.getAddressSpaceMapMangling()) {
856 return TI.useAddressSpaceMapMangling();
858 return true;
860 return false;
861 }
862 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
863}
864
866 IdentifierTable &idents, SelectorTable &sels,
867 Builtin::Context &builtins, TranslationUnitKind TUKind)
868 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
869 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
870 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
871 DependentSizedMatrixTypes(this_()),
872 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
873 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
874 TemplateSpecializationTypes(this_()),
875 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
876 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
877 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
878 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
879 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
880 LangOpts.XRayNeverInstrumentFiles,
881 LangOpts.XRayAttrListFiles, SM)),
882 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
883 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
884 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
885 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
886 CompCategories(this_()), LastSDM(nullptr, 0) {
888}
889
891 // Release the DenseMaps associated with DeclContext objects.
892 // FIXME: Is this the ideal solution?
893 ReleaseDeclContextMaps();
894
895 // Call all of the deallocation functions on all of their targets.
896 for (auto &Pair : Deallocations)
897 (Pair.first)(Pair.second);
898 Deallocations.clear();
899
900 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
901 // because they can contain DenseMaps.
902 for (llvm::DenseMap<const ObjCContainerDecl*,
903 const ASTRecordLayout*>::iterator
904 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
905 // Increment in loop to prevent using deallocated memory.
906 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
907 R->Destroy(*this);
908 ObjCLayouts.clear();
909
910 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
911 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
912 // Increment in loop to prevent using deallocated memory.
913 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
914 R->Destroy(*this);
915 }
916 ASTRecordLayouts.clear();
917
918 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
919 AEnd = DeclAttrs.end();
920 A != AEnd; ++A)
921 A->second->~AttrVec();
922 DeclAttrs.clear();
923
924 for (const auto &Value : ModuleInitializers)
925 Value.second->~PerModuleInitializers();
926 ModuleInitializers.clear();
927}
928
930
931void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
932 TraversalScope = TopLevelDecls;
934}
935
936void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
937 Deallocations.push_back({Callback, Data});
938}
939
940void
942 ExternalSource = std::move(Source);
943}
944
946 llvm::errs() << "\n*** AST Context Stats:\n";
947 llvm::errs() << " " << Types.size() << " types total.\n";
948
949 unsigned counts[] = {
950#define TYPE(Name, Parent) 0,
951#define ABSTRACT_TYPE(Name, Parent)
952#include "clang/AST/TypeNodes.inc"
953 0 // Extra
954 };
955
956 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
957 Type *T = Types[i];
958 counts[(unsigned)T->getTypeClass()]++;
959 }
960
961 unsigned Idx = 0;
962 unsigned TotalBytes = 0;
963#define TYPE(Name, Parent) \
964 if (counts[Idx]) \
965 llvm::errs() << " " << counts[Idx] << " " << #Name \
966 << " types, " << sizeof(Name##Type) << " each " \
967 << "(" << counts[Idx] * sizeof(Name##Type) \
968 << " bytes)\n"; \
969 TotalBytes += counts[Idx] * sizeof(Name##Type); \
970 ++Idx;
971#define ABSTRACT_TYPE(Name, Parent)
972#include "clang/AST/TypeNodes.inc"
973
974 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
975
976 // Implicit special member functions.
977 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
979 << " implicit default constructors created\n";
980 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
982 << " implicit copy constructors created\n";
984 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
986 << " implicit move constructors created\n";
987 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
989 << " implicit copy assignment operators created\n";
991 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
993 << " implicit move assignment operators created\n";
994 llvm::errs() << NumImplicitDestructorsDeclared << "/"
996 << " implicit destructors created\n";
997
998 if (ExternalSource) {
999 llvm::errs() << "\n";
1001 }
1002
1003 BumpAlloc.PrintStats();
1004}
1005
1007 bool NotifyListeners) {
1008 if (NotifyListeners)
1009 if (auto *Listener = getASTMutationListener())
1011
1012 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
1013}
1014
1016 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1017 if (It == MergedDefModules.end())
1018 return;
1019
1020 auto &Merged = It->second;
1022 for (Module *&M : Merged)
1023 if (!Found.insert(M).second)
1024 M = nullptr;
1025 llvm::erase(Merged, nullptr);
1026}
1027
1030 auto MergedIt =
1031 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
1032 if (MergedIt == MergedDefModules.end())
1033 return std::nullopt;
1034 return MergedIt->second;
1035}
1036
1037void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1038 if (LazyInitializers.empty())
1039 return;
1040
1041 auto *Source = Ctx.getExternalSource();
1042 assert(Source && "lazy initializers but no external source");
1043
1044 auto LazyInits = std::move(LazyInitializers);
1045 LazyInitializers.clear();
1046
1047 for (auto ID : LazyInits)
1048 Initializers.push_back(Source->GetExternalDecl(ID));
1049
1050 assert(LazyInitializers.empty() &&
1051 "GetExternalDecl for lazy module initializer added more inits");
1052}
1053
1055 // One special case: if we add a module initializer that imports another
1056 // module, and that module's only initializer is an ImportDecl, simplify.
1057 if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1058 auto It = ModuleInitializers.find(ID->getImportedModule());
1059
1060 // Maybe the ImportDecl does nothing at all. (Common case.)
1061 if (It == ModuleInitializers.end())
1062 return;
1063
1064 // Maybe the ImportDecl only imports another ImportDecl.
1065 auto &Imported = *It->second;
1066 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1067 Imported.resolve(*this);
1068 auto *OnlyDecl = Imported.Initializers.front();
1069 if (isa<ImportDecl>(OnlyDecl))
1070 D = OnlyDecl;
1071 }
1072 }
1073
1074 auto *&Inits = ModuleInitializers[M];
1075 if (!Inits)
1076 Inits = new (*this) PerModuleInitializers;
1077 Inits->Initializers.push_back(D);
1078}
1079
1081 auto *&Inits = ModuleInitializers[M];
1082 if (!Inits)
1083 Inits = new (*this) PerModuleInitializers;
1084 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1085 IDs.begin(), IDs.end());
1086}
1087
1089 auto It = ModuleInitializers.find(M);
1090 if (It == ModuleInitializers.end())
1091 return std::nullopt;
1092
1093 auto *Inits = It->second;
1094 Inits->resolve(*this);
1095 return Inits->Initializers;
1096}
1097
1099 assert(M->isNamedModule());
1100 assert(!CurrentCXXNamedModule &&
1101 "We should set named module for ASTContext for only once");
1102 CurrentCXXNamedModule = M;
1103}
1104
1106 if (!ExternCContext)
1107 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1108
1109 return ExternCContext;
1110}
1111
1114 const IdentifierInfo *II) const {
1115 auto *BuiltinTemplate =
1117 BuiltinTemplate->setImplicit();
1118 getTranslationUnitDecl()->addDecl(BuiltinTemplate);
1119
1120 return BuiltinTemplate;
1121}
1122
1125 if (!MakeIntegerSeqDecl)
1128 return MakeIntegerSeqDecl;
1129}
1130
1133 if (!TypePackElementDecl)
1136 return TypePackElementDecl;
1137}
1138
1140 RecordDecl::TagKind TK) const {
1141 SourceLocation Loc;
1142 RecordDecl *NewDecl;
1143 if (getLangOpts().CPlusPlus)
1144 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1145 Loc, &Idents.get(Name));
1146 else
1147 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1148 &Idents.get(Name));
1149 NewDecl->setImplicit();
1150 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1151 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1152 return NewDecl;
1153}
1154
1156 StringRef Name) const {
1159 const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1160 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1161 NewDecl->setImplicit();
1162 return NewDecl;
1163}
1164
1166 if (!Int128Decl)
1167 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1168 return Int128Decl;
1169}
1170
1172 if (!UInt128Decl)
1173 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1174 return UInt128Decl;
1175}
1176
1177void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1178 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1180 Types.push_back(Ty);
1181}
1182
1184 const TargetInfo *AuxTarget) {
1185 assert((!this->Target || this->Target == &Target) &&
1186 "Incorrect target reinitialization");
1187 assert(VoidTy.isNull() && "Context reinitialized?");
1188
1189 this->Target = &Target;
1190 this->AuxTarget = AuxTarget;
1191
1192 ABI.reset(createCXXABI(Target));
1193 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1194
1195 // C99 6.2.5p19.
1196 InitBuiltinType(VoidTy, BuiltinType::Void);
1197
1198 // C99 6.2.5p2.
1199 InitBuiltinType(BoolTy, BuiltinType::Bool);
1200 // C99 6.2.5p3.
1201 if (LangOpts.CharIsSigned)
1202 InitBuiltinType(CharTy, BuiltinType::Char_S);
1203 else
1204 InitBuiltinType(CharTy, BuiltinType::Char_U);
1205 // C99 6.2.5p4.
1206 InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1207 InitBuiltinType(ShortTy, BuiltinType::Short);
1208 InitBuiltinType(IntTy, BuiltinType::Int);
1209 InitBuiltinType(LongTy, BuiltinType::Long);
1210 InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1211
1212 // C99 6.2.5p6.
1213 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1214 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1215 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1216 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1217 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1218
1219 // C99 6.2.5p10.
1220 InitBuiltinType(FloatTy, BuiltinType::Float);
1221 InitBuiltinType(DoubleTy, BuiltinType::Double);
1222 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1223
1224 // GNU extension, __float128 for IEEE quadruple precision
1225 InitBuiltinType(Float128Ty, BuiltinType::Float128);
1226
1227 // __ibm128 for IBM extended precision
1228 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128);
1229
1230 // C11 extension ISO/IEC TS 18661-3
1231 InitBuiltinType(Float16Ty, BuiltinType::Float16);
1232
1233 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1234 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1235 InitBuiltinType(AccumTy, BuiltinType::Accum);
1236 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1237 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1238 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1239 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1240 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1241 InitBuiltinType(FractTy, BuiltinType::Fract);
1242 InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1243 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1244 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1245 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1246 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1247 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1248 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1249 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1250 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1251 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1252 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1253 InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1254 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1255 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1256 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1257 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1258
1259 // GNU extension, 128-bit integers.
1260 InitBuiltinType(Int128Ty, BuiltinType::Int128);
1261 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1262
1263 // C++ 3.9.1p5
1264 if (TargetInfo::isTypeSigned(Target.getWCharType()))
1265 InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1266 else // -fshort-wchar makes wchar_t be unsigned.
1267 InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1268 if (LangOpts.CPlusPlus && LangOpts.WChar)
1270 else {
1271 // C99 (or C++ using -fno-wchar).
1272 WideCharTy = getFromTargetType(Target.getWCharType());
1273 }
1274
1275 WIntTy = getFromTargetType(Target.getWIntType());
1276
1277 // C++20 (proposed)
1278 InitBuiltinType(Char8Ty, BuiltinType::Char8);
1279
1280 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1281 InitBuiltinType(Char16Ty, BuiltinType::Char16);
1282 else // C99
1283 Char16Ty = getFromTargetType(Target.getChar16Type());
1284
1285 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1286 InitBuiltinType(Char32Ty, BuiltinType::Char32);
1287 else // C99
1288 Char32Ty = getFromTargetType(Target.getChar32Type());
1289
1290 // Placeholder type for type-dependent expressions whose type is
1291 // completely unknown. No code should ever check a type against
1292 // DependentTy and users should never see it; however, it is here to
1293 // help diagnose failures to properly check for type-dependent
1294 // expressions.
1295 InitBuiltinType(DependentTy, BuiltinType::Dependent);
1296
1297 // Placeholder type for functions.
1298 InitBuiltinType(OverloadTy, BuiltinType::Overload);
1299
1300 // Placeholder type for bound members.
1301 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1302
1303 // Placeholder type for pseudo-objects.
1304 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1305
1306 // "any" type; useful for debugger-like clients.
1307 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1308
1309 // Placeholder type for unbridged ARC casts.
1310 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1311
1312 // Placeholder type for builtin functions.
1313 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1314
1315 // Placeholder type for OMP array sections.
1316 if (LangOpts.OpenMP) {
1317 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1318 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
1319 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
1320 }
1321 if (LangOpts.MatrixTypes)
1322 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
1323
1324 // Builtin types for 'id', 'Class', and 'SEL'.
1325 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1326 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1327 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1328
1329 if (LangOpts.OpenCL) {
1330#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1331 InitBuiltinType(SingletonId, BuiltinType::Id);
1332#include "clang/Basic/OpenCLImageTypes.def"
1333
1334 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1335 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1336 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1337 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1338 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1339
1340#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1341 InitBuiltinType(Id##Ty, BuiltinType::Id);
1342#include "clang/Basic/OpenCLExtensionTypes.def"
1343 }
1344
1345 if (Target.hasAArch64SVETypes()) {
1346#define SVE_TYPE(Name, Id, SingletonId) \
1347 InitBuiltinType(SingletonId, BuiltinType::Id);
1348#include "clang/Basic/AArch64SVEACLETypes.def"
1349 }
1350
1351 if (Target.getTriple().isPPC64()) {
1352#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1353 InitBuiltinType(Id##Ty, BuiltinType::Id);
1354#include "clang/Basic/PPCTypes.def"
1355#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1356 InitBuiltinType(Id##Ty, BuiltinType::Id);
1357#include "clang/Basic/PPCTypes.def"
1358 }
1359
1360 if (Target.hasRISCVVTypes()) {
1361#define RVV_TYPE(Name, Id, SingletonId) \
1362 InitBuiltinType(SingletonId, BuiltinType::Id);
1363#include "clang/Basic/RISCVVTypes.def"
1364 }
1365
1366 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) {
1367#define WASM_TYPE(Name, Id, SingletonId) \
1368 InitBuiltinType(SingletonId, BuiltinType::Id);
1369#include "clang/Basic/WebAssemblyReferenceTypes.def"
1370 }
1371
1372 // Builtin type for __objc_yes and __objc_no
1373 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1375
1376 ObjCConstantStringType = QualType();
1377
1378 ObjCSuperType = QualType();
1379
1380 // void * type
1381 if (LangOpts.OpenCLGenericAddressSpace) {
1382 auto Q = VoidTy.getQualifiers();
1386 } else {
1388 }
1389
1390 // nullptr type (C++0x 2.14.7)
1391 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1392
1393 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1394 InitBuiltinType(HalfTy, BuiltinType::Half);
1395
1396 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
1397
1398 // Builtin type used to help define __builtin_va_list.
1399 VaListTagDecl = nullptr;
1400
1401 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1402 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1405 }
1406}
1407
1409 return SourceMgr.getDiagnostics();
1410}
1411
1413 AttrVec *&Result = DeclAttrs[D];
1414 if (!Result) {
1415 void *Mem = Allocate(sizeof(AttrVec));
1416 Result = new (Mem) AttrVec;
1417 }
1418
1419 return *Result;
1420}
1421
1422/// Erase the attributes corresponding to the given declaration.
1424 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1425 if (Pos != DeclAttrs.end()) {
1426 Pos->second->~AttrVec();
1427 DeclAttrs.erase(Pos);
1428 }
1429}
1430
1431// FIXME: Remove ?
1434 assert(Var->isStaticDataMember() && "Not a static data member");
1436 .dyn_cast<MemberSpecializationInfo *>();
1437}
1438
1441 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1442 TemplateOrInstantiation.find(Var);
1443 if (Pos == TemplateOrInstantiation.end())
1444 return {};
1445
1446 return Pos->second;
1447}
1448
1449void
1452 SourceLocation PointOfInstantiation) {
1453 assert(Inst->isStaticDataMember() && "Not a static data member");
1454 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1456 Tmpl, TSK, PointOfInstantiation));
1457}
1458
1459void
1462 assert(!TemplateOrInstantiation[Inst] &&
1463 "Already noted what the variable was instantiated from");
1464 TemplateOrInstantiation[Inst] = TSI;
1465}
1466
1467NamedDecl *
1469 return InstantiatedFromUsingDecl.lookup(UUD);
1470}
1471
1472void
1474 assert((isa<UsingDecl>(Pattern) ||
1475 isa<UnresolvedUsingValueDecl>(Pattern) ||
1476 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1477 "pattern decl is not a using decl");
1478 assert((isa<UsingDecl>(Inst) ||
1479 isa<UnresolvedUsingValueDecl>(Inst) ||
1480 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1481 "instantiation did not produce a using decl");
1482 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1483 InstantiatedFromUsingDecl[Inst] = Pattern;
1484}
1485
1488 return InstantiatedFromUsingEnumDecl.lookup(UUD);
1489}
1490
1492 UsingEnumDecl *Pattern) {
1493 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1494 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1495}
1496
1499 return InstantiatedFromUsingShadowDecl.lookup(Inst);
1500}
1501
1502void
1504 UsingShadowDecl *Pattern) {
1505 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1506 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1507}
1508
1510 return InstantiatedFromUnnamedFieldDecl.lookup(Field);
1511}
1512
1514 FieldDecl *Tmpl) {
1515 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1516 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1517 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1518 "Already noted what unnamed field was instantiated from");
1519
1520 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1521}
1522
1525 return overridden_methods(Method).begin();
1526}
1527
1530 return overridden_methods(Method).end();
1531}
1532
1533unsigned
1535 auto Range = overridden_methods(Method);
1536 return Range.end() - Range.begin();
1537}
1538
1541 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1542 OverriddenMethods.find(Method->getCanonicalDecl());
1543 if (Pos == OverriddenMethods.end())
1544 return overridden_method_range(nullptr, nullptr);
1545 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1546}
1547
1549 const CXXMethodDecl *Overridden) {
1550 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1551 OverriddenMethods[Method].push_back(Overridden);
1552}
1553
1555 const NamedDecl *D,
1556 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1557 assert(D);
1558
1559 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1560 Overridden.append(overridden_methods_begin(CXXMethod),
1561 overridden_methods_end(CXXMethod));
1562 return;
1563 }
1564
1565 const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1566 if (!Method)
1567 return;
1568
1570 Method->getOverriddenMethods(OverDecls);
1571 Overridden.append(OverDecls.begin(), OverDecls.end());
1572}
1573
1575 assert(!Import->getNextLocalImport() &&
1576 "Import declaration already in the chain");
1577 assert(!Import->isFromASTFile() && "Non-local import declaration");
1578 if (!FirstLocalImport) {
1579 FirstLocalImport = Import;
1580 LastLocalImport = Import;
1581 return;
1582 }
1583
1584 LastLocalImport->setNextLocalImport(Import);
1585 LastLocalImport = Import;
1586}
1587
1588//===----------------------------------------------------------------------===//
1589// Type Sizing and Analysis
1590//===----------------------------------------------------------------------===//
1591
1592/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1593/// scalar floating point type.
1594const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1595 switch (T->castAs<BuiltinType>()->getKind()) {
1596 default:
1597 llvm_unreachable("Not a floating point type!");
1598 case BuiltinType::BFloat16:
1599 return Target->getBFloat16Format();
1600 case BuiltinType::Float16:
1601 return Target->getHalfFormat();
1602 case BuiltinType::Half:
1603 // For HLSL, when the native half type is disabled, half will be treat as
1604 // float.
1605 if (getLangOpts().HLSL)
1606 if (getLangOpts().NativeHalfType)
1607 return Target->getHalfFormat();
1608 else
1609 return Target->getFloatFormat();
1610 else
1611 return Target->getHalfFormat();
1612 case BuiltinType::Float: return Target->getFloatFormat();
1613 case BuiltinType::Double: return Target->getDoubleFormat();
1614 case BuiltinType::Ibm128:
1615 return Target->getIbm128Format();
1616 case BuiltinType::LongDouble:
1617 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1618 return AuxTarget->getLongDoubleFormat();
1619 return Target->getLongDoubleFormat();
1620 case BuiltinType::Float128:
1621 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1622 return AuxTarget->getFloat128Format();
1623 return Target->getFloat128Format();
1624 }
1625}
1626
1627CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1628 unsigned Align = Target->getCharWidth();
1629
1630 const unsigned AlignFromAttr = D->getMaxAlignment();
1631 if (AlignFromAttr)
1632 Align = AlignFromAttr;
1633
1634 // __attribute__((aligned)) can increase or decrease alignment
1635 // *except* on a struct or struct member, where it only increases
1636 // alignment unless 'packed' is also specified.
1637 //
1638 // It is an error for alignas to decrease alignment, so we can
1639 // ignore that possibility; Sema should diagnose it.
1640 bool UseAlignAttrOnly;
1641 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D))
1642 UseAlignAttrOnly =
1643 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1644 else
1645 UseAlignAttrOnly = AlignFromAttr != 0;
1646 // If we're using the align attribute only, just ignore everything
1647 // else about the declaration and its type.
1648 if (UseAlignAttrOnly) {
1649 // do nothing
1650 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1651 QualType T = VD->getType();
1652 if (const auto *RT = T->getAs<ReferenceType>()) {
1653 if (ForAlignof)
1654 T = RT->getPointeeType();
1655 else
1656 T = getPointerType(RT->getPointeeType());
1657 }
1658 QualType BaseT = getBaseElementType(T);
1659 if (T->isFunctionType())
1660 Align = getTypeInfoImpl(T.getTypePtr()).Align;
1661 else if (!BaseT->isIncompleteType()) {
1662 // Adjust alignments of declarations with array type by the
1663 // large-array alignment on the target.
1664 if (const ArrayType *arrayType = getAsArrayType(T)) {
1665 unsigned MinWidth = Target->getLargeArrayMinWidth();
1666 if (!ForAlignof && MinWidth) {
1667 if (isa<VariableArrayType>(arrayType))
1668 Align = std::max(Align, Target->getLargeArrayAlign());
1669 else if (isa<ConstantArrayType>(arrayType) &&
1670 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1671 Align = std::max(Align, Target->getLargeArrayAlign());
1672 }
1673 }
1674 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1675 if (BaseT.getQualifiers().hasUnaligned())
1676 Align = Target->getCharWidth();
1677 }
1678
1679 // Ensure miminum alignment for global variables.
1680 if (const auto *VD = dyn_cast<VarDecl>(D))
1681 if (VD->hasGlobalStorage() && !ForAlignof) {
1682 uint64_t TypeSize =
1683 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0;
1684 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
1685 }
1686
1687 // Fields can be subject to extra alignment constraints, like if
1688 // the field is packed, the struct is packed, or the struct has a
1689 // a max-field-alignment constraint (#pragma pack). So calculate
1690 // the actual alignment of the field within the struct, and then
1691 // (as we're expected to) constrain that by the alignment of the type.
1692 if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1693 const RecordDecl *Parent = Field->getParent();
1694 // We can only produce a sensible answer if the record is valid.
1695 if (!Parent->isInvalidDecl()) {
1696 const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1697
1698 // Start with the record's overall alignment.
1699 unsigned FieldAlign = toBits(Layout.getAlignment());
1700
1701 // Use the GCD of that and the offset within the record.
1702 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1703 if (Offset > 0) {
1704 // Alignment is always a power of 2, so the GCD will be a power of 2,
1705 // which means we get to do this crazy thing instead of Euclid's.
1706 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1707 if (LowBitOfOffset < FieldAlign)
1708 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1709 }
1710
1711 Align = std::min(Align, FieldAlign);
1712 }
1713 }
1714 }
1715
1716 // Some targets have hard limitation on the maximum requestable alignment in
1717 // aligned attribute for static variables.
1718 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1719 const auto *VD = dyn_cast<VarDecl>(D);
1720 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1721 Align = std::min(Align, MaxAlignedAttr);
1722
1723 return toCharUnitsFromBits(Align);
1724}
1725
1728}
1729
1730// getTypeInfoDataSizeInChars - Return the size of a type, in
1731// chars. If the type is a record, its data size is returned. This is
1732// the size of the memcpy that's performed when assigning this type
1733// using a trivial copy/move assignment operator.
1736
1737 // In C++, objects can sometimes be allocated into the tail padding
1738 // of a base-class subobject. We decide whether that's possible
1739 // during class layout, so here we can just trust the layout results.
1740 if (getLangOpts().CPlusPlus) {
1741 if (const auto *RT = T->getAs<RecordType>()) {
1742 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1743 Info.Width = layout.getDataSize();
1744 }
1745 }
1746
1747 return Info;
1748}
1749
1750/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1751/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1754 const ConstantArrayType *CAT) {
1755 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
1756 uint64_t Size = CAT->getSize().getZExtValue();
1757 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1758 (uint64_t)(-1)/Size) &&
1759 "Overflow in array type char size evaluation");
1760 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1761 unsigned Align = EltInfo.Align.getQuantity();
1762 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1764 Width = llvm::alignTo(Width, Align);
1767 EltInfo.AlignRequirement);
1768}
1769
1771 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1772 return getConstantArrayInfoInChars(*this, CAT);
1773 TypeInfo Info = getTypeInfo(T);
1776}
1777
1779 return getTypeInfoInChars(T.getTypePtr());
1780}
1781
1783 // HLSL doesn't promote all small integer types to int, it
1784 // just uses the rank-based promotion rules for all types.
1785 if (getLangOpts().HLSL)
1786 return false;
1787
1788 if (const auto *BT = T->getAs<BuiltinType>())
1789 switch (BT->getKind()) {
1790 case BuiltinType::Bool:
1791 case BuiltinType::Char_S:
1792 case BuiltinType::Char_U:
1793 case BuiltinType::SChar:
1794 case BuiltinType::UChar:
1795 case BuiltinType::Short:
1796 case BuiltinType::UShort:
1797 case BuiltinType::WChar_S:
1798 case BuiltinType::WChar_U:
1799 case BuiltinType::Char8:
1800 case BuiltinType::Char16:
1801 case BuiltinType::Char32:
1802 return true;
1803 default:
1804 return false;
1805 }
1806
1807 // Enumerated types are promotable to their compatible integer types
1808 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1809 if (const auto *ET = T->getAs<EnumType>()) {
1810 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
1811 ET->getDecl()->isScoped())
1812 return false;
1813
1814 return true;
1815 }
1816
1817 return false;
1818}
1819
1822}
1823
1825 return isAlignmentRequired(T.getTypePtr());
1826}
1827
1829 bool NeedsPreferredAlignment) const {
1830 // An alignment on a typedef overrides anything else.
1831 if (const auto *TT = T->getAs<TypedefType>())
1832 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1833 return Align;
1834
1835 // If we have an (array of) complete type, we're done.
1836 T = getBaseElementType(T);
1837 if (!T->isIncompleteType())
1838 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1839
1840 // If we had an array type, its element type might be a typedef
1841 // type with an alignment attribute.
1842 if (const auto *TT = T->getAs<TypedefType>())
1843 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1844 return Align;
1845
1846 // Otherwise, see if the declaration of the type had an attribute.
1847 if (const auto *TT = T->getAs<TagType>())
1848 return TT->getDecl()->getMaxAlignment();
1849
1850 return 0;
1851}
1852
1854 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1855 if (I != MemoizedTypeInfo.end())
1856 return I->second;
1857
1858 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1859 TypeInfo TI = getTypeInfoImpl(T);
1860 MemoizedTypeInfo[T] = TI;
1861 return TI;
1862}
1863
1864/// getTypeInfoImpl - Return the size of the specified type, in bits. This
1865/// method does not work on incomplete types.
1866///
1867/// FIXME: Pointers into different addr spaces could have different sizes and
1868/// alignment requirements: getPointerInfo should take an AddrSpace, this
1869/// should take a QualType, &c.
1870TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1871 uint64_t Width = 0;
1872 unsigned Align = 8;
1875 switch (T->getTypeClass()) {
1876#define TYPE(Class, Base)
1877#define ABSTRACT_TYPE(Class, Base)
1878#define NON_CANONICAL_TYPE(Class, Base)
1879#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1880#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1881 case Type::Class: \
1882 assert(!T->isDependentType() && "should not see dependent types here"); \
1883 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1884#include "clang/AST/TypeNodes.inc"
1885 llvm_unreachable("Should not see dependent types");
1886
1887 case Type::FunctionNoProto:
1888 case Type::FunctionProto:
1889 // GCC extension: alignof(function) = 32 bits
1890 Width = 0;
1891 Align = 32;
1892 break;
1893
1894 case Type::IncompleteArray:
1895 case Type::VariableArray:
1896 case Type::ConstantArray: {
1897 // Model non-constant sized arrays as size zero, but track the alignment.
1898 uint64_t Size = 0;
1899 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1900 Size = CAT->getSize().getZExtValue();
1901
1902 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
1903 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1904 "Overflow in array type bit size evaluation");
1905 Width = EltInfo.Width * Size;
1906 Align = EltInfo.Align;
1907 AlignRequirement = EltInfo.AlignRequirement;
1908 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1909 getTargetInfo().getPointerWidth(LangAS::Default) == 64)
1910 Width = llvm::alignTo(Width, Align);
1911 break;
1912 }
1913
1914 case Type::ExtVector:
1915 case Type::Vector: {
1916 const auto *VT = cast<VectorType>(T);
1917 TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1918 Width = VT->isExtVectorBoolType() ? VT->getNumElements()
1919 : EltInfo.Width * VT->getNumElements();
1920 // Enforce at least byte size and alignment.
1921 Width = std::max<unsigned>(8, Width);
1922 Align = std::max<unsigned>(8, Width);
1923
1924 // If the alignment is not a power of 2, round up to the next power of 2.
1925 // This happens for non-power-of-2 length vectors.
1926 if (Align & (Align-1)) {
1927 Align = llvm::bit_ceil(Align);
1928 Width = llvm::alignTo(Width, Align);
1929 }
1930 // Adjust the alignment based on the target max.
1931 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1932 if (TargetVectorAlign && TargetVectorAlign < Align)
1933 Align = TargetVectorAlign;
1934 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
1935 // Adjust the alignment for fixed-length SVE vectors. This is important
1936 // for non-power-of-2 vector lengths.
1937 Align = 128;
1938 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
1939 // Adjust the alignment for fixed-length SVE predicates.
1940 Align = 16;
1941 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData)
1942 // Adjust the alignment for fixed-length RVV vectors.
1943 Align = std::min<unsigned>(64, Width);
1944 break;
1945 }
1946
1947 case Type::ConstantMatrix: {
1948 const auto *MT = cast<ConstantMatrixType>(T);
1949 TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
1950 // The internal layout of a matrix value is implementation defined.
1951 // Initially be ABI compatible with arrays with respect to alignment and
1952 // size.
1953 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
1954 Align = ElementInfo.Align;
1955 break;
1956 }
1957
1958 case Type::Builtin:
1959 switch (cast<BuiltinType>(T)->getKind()) {
1960 default: llvm_unreachable("Unknown builtin type!");
1961 case BuiltinType::Void:
1962 // GCC extension: alignof(void) = 8 bits.
1963 Width = 0;
1964 Align = 8;
1965 break;
1966 case BuiltinType::Bool:
1967 Width = Target->getBoolWidth();
1968 Align = Target->getBoolAlign();
1969 break;
1970 case BuiltinType::Char_S:
1971 case BuiltinType::Char_U:
1972 case BuiltinType::UChar:
1973 case BuiltinType::SChar:
1974 case BuiltinType::Char8:
1975 Width = Target->getCharWidth();
1976 Align = Target->getCharAlign();
1977 break;
1978 case BuiltinType::WChar_S:
1979 case BuiltinType::WChar_U:
1980 Width = Target->getWCharWidth();
1981 Align = Target->getWCharAlign();
1982 break;
1983 case BuiltinType::Char16:
1984 Width = Target->getChar16Width();
1985 Align = Target->getChar16Align();
1986 break;
1987 case BuiltinType::Char32:
1988 Width = Target->getChar32Width();
1989 Align = Target->getChar32Align();
1990 break;
1991 case BuiltinType::UShort:
1992 case BuiltinType::Short:
1993 Width = Target->getShortWidth();
1994 Align = Target->getShortAlign();
1995 break;
1996 case BuiltinType::UInt:
1997 case BuiltinType::Int:
1998 Width = Target->getIntWidth();
1999 Align = Target->getIntAlign();
2000 break;
2001 case BuiltinType::ULong:
2002 case BuiltinType::Long:
2003 Width = Target->getLongWidth();
2004 Align = Target->getLongAlign();
2005 break;
2006 case BuiltinType::ULongLong:
2007 case BuiltinType::LongLong:
2008 Width = Target->getLongLongWidth();
2009 Align = Target->getLongLongAlign();
2010 break;
2011 case BuiltinType::Int128:
2012 case BuiltinType::UInt128:
2013 Width = 128;
2014 Align = Target->getInt128Align();
2015 break;
2016 case BuiltinType::ShortAccum:
2017 case BuiltinType::UShortAccum:
2018 case BuiltinType::SatShortAccum:
2019 case BuiltinType::SatUShortAccum:
2020 Width = Target->getShortAccumWidth();
2021 Align = Target->getShortAccumAlign();
2022 break;
2023 case BuiltinType::Accum:
2024 case BuiltinType::UAccum:
2025 case BuiltinType::SatAccum:
2026 case BuiltinType::SatUAccum:
2027 Width = Target->getAccumWidth();
2028 Align = Target->getAccumAlign();
2029 break;
2030 case BuiltinType::LongAccum:
2031 case BuiltinType::ULongAccum:
2032 case BuiltinType::SatLongAccum:
2033 case BuiltinType::SatULongAccum:
2034 Width = Target->getLongAccumWidth();
2035 Align = Target->getLongAccumAlign();
2036 break;
2037 case BuiltinType::ShortFract:
2038 case BuiltinType::UShortFract:
2039 case BuiltinType::SatShortFract:
2040 case BuiltinType::SatUShortFract:
2041 Width = Target->getShortFractWidth();
2042 Align = Target->getShortFractAlign();
2043 break;
2044 case BuiltinType::Fract:
2045 case BuiltinType::UFract:
2046 case BuiltinType::SatFract:
2047 case BuiltinType::SatUFract:
2048 Width = Target->getFractWidth();
2049 Align = Target->getFractAlign();
2050 break;
2051 case BuiltinType::LongFract:
2052 case BuiltinType::ULongFract:
2053 case BuiltinType::SatLongFract:
2054 case BuiltinType::SatULongFract:
2055 Width = Target->getLongFractWidth();
2056 Align = Target->getLongFractAlign();
2057 break;
2058 case BuiltinType::BFloat16:
2059 if (Target->hasBFloat16Type()) {
2060 Width = Target->getBFloat16Width();
2061 Align = Target->getBFloat16Align();
2062 } else if ((getLangOpts().SYCLIsDevice ||
2063 (getLangOpts().OpenMP &&
2064 getLangOpts().OpenMPIsTargetDevice)) &&
2065 AuxTarget->hasBFloat16Type()) {
2066 Width = AuxTarget->getBFloat16Width();
2067 Align = AuxTarget->getBFloat16Align();
2068 }
2069 break;
2070 case BuiltinType::Float16:
2071 case BuiltinType::Half:
2072 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2073 !getLangOpts().OpenMPIsTargetDevice) {
2074 Width = Target->getHalfWidth();
2075 Align = Target->getHalfAlign();
2076 } else {
2077 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2078 "Expected OpenMP device compilation.");
2079 Width = AuxTarget->getHalfWidth();
2080 Align = AuxTarget->getHalfAlign();
2081 }
2082 break;
2083 case BuiltinType::Float:
2084 Width = Target->getFloatWidth();
2085 Align = Target->getFloatAlign();
2086 break;
2087 case BuiltinType::Double:
2088 Width = Target->getDoubleWidth();
2089 Align = Target->getDoubleAlign();
2090 break;
2091 case BuiltinType::Ibm128:
2092 Width = Target->getIbm128Width();
2093 Align = Target->getIbm128Align();
2094 break;
2095 case BuiltinType::LongDouble:
2096 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2097 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2098 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2099 Width = AuxTarget->getLongDoubleWidth();
2100 Align = AuxTarget->getLongDoubleAlign();
2101 } else {
2102 Width = Target->getLongDoubleWidth();
2103 Align = Target->getLongDoubleAlign();
2104 }
2105 break;
2106 case BuiltinType::Float128:
2107 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2108 !getLangOpts().OpenMPIsTargetDevice) {
2109 Width = Target->getFloat128Width();
2110 Align = Target->getFloat128Align();
2111 } else {
2112 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2113 "Expected OpenMP device compilation.");
2114 Width = AuxTarget->getFloat128Width();
2115 Align = AuxTarget->getFloat128Align();
2116 }
2117 break;
2118 case BuiltinType::NullPtr:
2119 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2120 Width = Target->getPointerWidth(LangAS::Default);
2121 Align = Target->getPointerAlign(LangAS::Default);
2122 break;
2123 case BuiltinType::ObjCId:
2124 case BuiltinType::ObjCClass:
2125 case BuiltinType::ObjCSel:
2126 Width = Target->getPointerWidth(LangAS::Default);
2127 Align = Target->getPointerAlign(LangAS::Default);
2128 break;
2129 case BuiltinType::OCLSampler:
2130 case BuiltinType::OCLEvent:
2131 case BuiltinType::OCLClkEvent:
2132 case BuiltinType::OCLQueue:
2133 case BuiltinType::OCLReserveID:
2134#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2135 case BuiltinType::Id:
2136#include "clang/Basic/OpenCLImageTypes.def"
2137#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2138 case BuiltinType::Id:
2139#include "clang/Basic/OpenCLExtensionTypes.def"
2141 Width = Target->getPointerWidth(AS);
2142 Align = Target->getPointerAlign(AS);
2143 break;
2144 // The SVE types are effectively target-specific. The length of an
2145 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2146 // of 128 bits. There is one predicate bit for each vector byte, so the
2147 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2148 //
2149 // Because the length is only known at runtime, we use a dummy value
2150 // of 0 for the static length. The alignment values are those defined
2151 // by the Procedure Call Standard for the Arm Architecture.
2152#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
2153 IsSigned, IsFP, IsBF) \
2154 case BuiltinType::Id: \
2155 Width = 0; \
2156 Align = 128; \
2157 break;
2158#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
2159 case BuiltinType::Id: \
2160 Width = 0; \
2161 Align = 16; \
2162 break;
2163#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2164 case BuiltinType::Id: \
2165 Width = 0; \
2166 Align = 16; \
2167 break;
2168#include "clang/Basic/AArch64SVEACLETypes.def"
2169#define PPC_VECTOR_TYPE(Name, Id, Size) \
2170 case BuiltinType::Id: \
2171 Width = Size; \
2172 Align = Size; \
2173 break;
2174#include "clang/Basic/PPCTypes.def"
2175#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2176 IsFP, IsBF) \
2177 case BuiltinType::Id: \
2178 Width = 0; \
2179 Align = ElBits; \
2180 break;
2181#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2182 case BuiltinType::Id: \
2183 Width = 0; \
2184 Align = 8; \
2185 break;
2186#include "clang/Basic/RISCVVTypes.def"
2187#define WASM_TYPE(Name, Id, SingletonId) \
2188 case BuiltinType::Id: \
2189 Width = 0; \
2190 Align = 8; \
2191 break;
2192#include "clang/Basic/WebAssemblyReferenceTypes.def"
2193 }
2194 break;
2195 case Type::ObjCObjectPointer:
2196 Width = Target->getPointerWidth(LangAS::Default);
2197 Align = Target->getPointerAlign(LangAS::Default);
2198 break;
2199 case Type::BlockPointer:
2200 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace();
2201 Width = Target->getPointerWidth(AS);
2202 Align = Target->getPointerAlign(AS);
2203 break;
2204 case Type::LValueReference:
2205 case Type::RValueReference:
2206 // alignof and sizeof should never enter this code path here, so we go
2207 // the pointer route.
2208 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace();
2209 Width = Target->getPointerWidth(AS);
2210 Align = Target->getPointerAlign(AS);
2211 break;
2212 case Type::Pointer:
2213 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace();
2214 Width = Target->getPointerWidth(AS);
2215 Align = Target->getPointerAlign(AS);
2216 break;
2217 case Type::MemberPointer: {
2218 const auto *MPT = cast<MemberPointerType>(T);
2219 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2220 Width = MPI.Width;
2221 Align = MPI.Align;
2222 break;
2223 }
2224 case Type::Complex: {
2225 // Complex types have the same alignment as their elements, but twice the
2226 // size.
2227 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2228 Width = EltInfo.Width * 2;
2229 Align = EltInfo.Align;
2230 break;
2231 }
2232 case Type::ObjCObject:
2233 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2234 case Type::Adjusted:
2235 case Type::Decayed:
2236 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2237 case Type::ObjCInterface: {
2238 const auto *ObjCI = cast<ObjCInterfaceType>(T);
2239 if (ObjCI->getDecl()->isInvalidDecl()) {
2240 Width = 8;
2241 Align = 8;
2242 break;
2243 }
2244 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2245 Width = toBits(Layout.getSize());
2246 Align = toBits(Layout.getAlignment());
2247 break;
2248 }
2249 case Type::BitInt: {
2250 const auto *EIT = cast<BitIntType>(T);
2251 Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()),
2252 getCharWidth(), Target->getLongLongAlign());
2253 Width = llvm::alignTo(EIT->getNumBits(), Align);
2254 break;
2255 }
2256 case Type::Record:
2257 case Type::Enum: {
2258 const auto *TT = cast<TagType>(T);
2259
2260 if (TT->getDecl()->isInvalidDecl()) {
2261 Width = 8;
2262 Align = 8;
2263 break;
2264 }
2265
2266 if (const auto *ET = dyn_cast<EnumType>(TT)) {
2267 const EnumDecl *ED = ET->getDecl();
2268 TypeInfo Info =
2270 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2271 Info.Align = AttrAlign;
2273 }
2274 return Info;
2275 }
2276
2277 const auto *RT = cast<RecordType>(TT);
2278 const RecordDecl *RD = RT->getDecl();
2279 const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2280 Width = toBits(Layout.getSize());
2281 Align = toBits(Layout.getAlignment());
2282 AlignRequirement = RD->hasAttr<AlignedAttr>()
2285 break;
2286 }
2287
2288 case Type::SubstTemplateTypeParm:
2289 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2290 getReplacementType().getTypePtr());
2291
2292 case Type::Auto:
2293 case Type::DeducedTemplateSpecialization: {
2294 const auto *A = cast<DeducedType>(T);
2295 assert(!A->getDeducedType().isNull() &&
2296 "cannot request the size of an undeduced or dependent auto type");
2297 return getTypeInfo(A->getDeducedType().getTypePtr());
2298 }
2299
2300 case Type::Paren:
2301 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2302
2303 case Type::MacroQualified:
2304 return getTypeInfo(
2305 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
2306
2307 case Type::ObjCTypeParam:
2308 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2309
2310 case Type::Using:
2311 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr());
2312
2313 case Type::Typedef: {
2314 const auto *TT = cast<TypedefType>(T);
2315 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr());
2316 // If the typedef has an aligned attribute on it, it overrides any computed
2317 // alignment we have. This violates the GCC documentation (which says that
2318 // attribute(aligned) can only round up) but matches its implementation.
2319 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2320 Align = AttrAlign;
2321 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2322 } else {
2323 Align = Info.Align;
2324 AlignRequirement = Info.AlignRequirement;
2325 }
2326 Width = Info.Width;
2327 break;
2328 }
2329
2330 case Type::Elaborated:
2331 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2332
2333 case Type::Attributed:
2334 return getTypeInfo(
2335 cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2336
2337 case Type::BTFTagAttributed:
2338 return getTypeInfo(
2339 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
2340
2341 case Type::Atomic: {
2342 // Start with the base type information.
2343 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2344 Width = Info.Width;
2345 Align = Info.Align;
2346
2347 if (!Width) {
2348 // An otherwise zero-sized type should still generate an
2349 // atomic operation.
2350 Width = Target->getCharWidth();
2351 assert(Align);
2352 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2353 // If the size of the type doesn't exceed the platform's max
2354 // atomic promotion width, make the size and alignment more
2355 // favorable to atomic operations:
2356
2357 // Round the size up to a power of 2.
2358 Width = llvm::bit_ceil(Width);
2359
2360 // Set the alignment equal to the size.
2361 Align = static_cast<unsigned>(Width);
2362 }
2363 }
2364 break;
2365
2366 case Type::Pipe:
2367 Width = Target->getPointerWidth(LangAS::opencl_global);
2368 Align = Target->getPointerAlign(LangAS::opencl_global);
2369 break;
2370 }
2371
2372 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2373 return TypeInfo(Width, Align, AlignRequirement);
2374}
2375
2377 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2378 if (I != MemoizedUnadjustedAlign.end())
2379 return I->second;
2380
2381 unsigned UnadjustedAlign;
2382 if (const auto *RT = T->getAs<RecordType>()) {
2383 const RecordDecl *RD = RT->getDecl();
2384 const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2385 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2386 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2387 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2388 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2389 } else {
2390 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
2391 }
2392
2393 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2394 return UnadjustedAlign;
2395}
2396
2398 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2399 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap);
2400 return SimdAlign;
2401}
2402
2403/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2405 return CharUnits::fromQuantity(BitSize / getCharWidth());
2406}
2407
2408/// toBits - Convert a size in characters to a size in characters.
2409int64_t ASTContext::toBits(CharUnits CharSize) const {
2410 return CharSize.getQuantity() * getCharWidth();
2411}
2412
2413/// getTypeSizeInChars - Return the size of the specified type, in characters.
2414/// This method does not work on incomplete types.
2416 return getTypeInfoInChars(T).Width;
2417}
2419 return getTypeInfoInChars(T).Width;
2420}
2421
2422/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2423/// characters. This method does not work on incomplete types.
2426}
2429}
2430
2431/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2432/// type, in characters, before alignment adjustments. This method does
2433/// not work on incomplete types.
2436}
2439}
2440
2441/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2442/// type for the current target in bits. This can be different than the ABI
2443/// alignment in cases where it is beneficial for performance or backwards
2444/// compatibility preserving to overalign a data type. (Note: despite the name,
2445/// the preferred alignment is ABI-impacting, and not an optimization.)
2446unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2447 TypeInfo TI = getTypeInfo(T);
2448 unsigned ABIAlign = TI.Align;
2449
2450 T = T->getBaseElementTypeUnsafe();
2451
2452 // The preferred alignment of member pointers is that of a pointer.
2453 if (T->isMemberPointerType())
2454 return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
2455
2456 if (!Target->allowsLargerPreferedTypeAlignment())
2457 return ABIAlign;
2458
2459 if (const auto *RT = T->getAs<RecordType>()) {
2460 const RecordDecl *RD = RT->getDecl();
2461
2462 // When used as part of a typedef, or together with a 'packed' attribute,
2463 // the 'aligned' attribute can be used to decrease alignment. Note that the
2464 // 'packed' case is already taken into consideration when computing the
2465 // alignment, we only need to handle the typedef case here.
2467 RD->isInvalidDecl())
2468 return ABIAlign;
2469
2470 unsigned PreferredAlign = static_cast<unsigned>(
2471 toBits(getASTRecordLayout(RD).PreferredAlignment));
2472 assert(PreferredAlign >= ABIAlign &&
2473 "PreferredAlign should be at least as large as ABIAlign.");
2474 return PreferredAlign;
2475 }
2476
2477 // Double (and, for targets supporting AIX `power` alignment, long double) and
2478 // long long should be naturally aligned (despite requiring less alignment) if
2479 // possible.
2480 if (const auto *CT = T->getAs<ComplexType>())
2481 T = CT->getElementType().getTypePtr();
2482 if (const auto *ET = T->getAs<EnumType>())
2483 T = ET->getDecl()->getIntegerType().getTypePtr();
2484 if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2485 T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2486 T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2487 (T->isSpecificBuiltinType(BuiltinType::LongDouble) &&
2488 Target->defaultsToAIXPowerAlignment()))
2489 // Don't increase the alignment if an alignment attribute was specified on a
2490 // typedef declaration.
2491 if (!TI.isAlignRequired())
2492 return std::max(ABIAlign, (unsigned)getTypeSize(T));
2493
2494 return ABIAlign;
2495}
2496
2497/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2498/// for __attribute__((aligned)) on this target, to be used if no alignment
2499/// value is specified.
2502}
2503
2504/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2505/// to a global variable of the specified type.
2507 uint64_t TypeSize = getTypeSize(T.getTypePtr());
2508 return std::max(getPreferredTypeAlign(T),
2509 getTargetInfo().getMinGlobalAlign(TypeSize));
2510}
2511
2512/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2513/// should be given to a global variable of the specified type.
2516}
2517
2519 CharUnits Offset = CharUnits::Zero();
2520 const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2521 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2522 Offset += Layout->getBaseClassOffset(Base);
2523 Layout = &getASTRecordLayout(Base);
2524 }
2525 return Offset;
2526}
2527
2529 const ValueDecl *MPD = MP.getMemberPointerDecl();
2532 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2533 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
2534 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2535 const CXXRecordDecl *Base = RD;
2536 const CXXRecordDecl *Derived = Path[I];
2537 if (DerivedMember)
2538 std::swap(Base, Derived);
2540 RD = Path[I];
2541 }
2542 if (DerivedMember)
2544 return ThisAdjustment;
2545}
2546
2547/// DeepCollectObjCIvars -
2548/// This routine first collects all declared, but not synthesized, ivars in
2549/// super class and then collects all ivars, including those synthesized for
2550/// current class. This routine is used for implementation of current class
2551/// when all ivars, declared and synthesized are known.
2553 bool leafClass,
2555 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2556 DeepCollectObjCIvars(SuperClass, false, Ivars);
2557 if (!leafClass) {
2558 llvm::append_range(Ivars, OI->ivars());
2559 } else {
2560 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2561 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2562 Iv= Iv->getNextIvar())
2563 Ivars.push_back(Iv);
2564 }
2565}
2566
2567/// CollectInheritedProtocols - Collect all protocols in current class and
2568/// those inherited by it.
2571 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2572 // We can use protocol_iterator here instead of
2573 // all_referenced_protocol_iterator since we are walking all categories.
2574 for (auto *Proto : OI->all_referenced_protocols()) {
2575 CollectInheritedProtocols(Proto, Protocols);
2576 }
2577
2578 // Categories of this Interface.
2579 for (const auto *Cat : OI->visible_categories())
2580 CollectInheritedProtocols(Cat, Protocols);
2581
2582 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2583 while (SD) {
2584 CollectInheritedProtocols(SD, Protocols);
2585 SD = SD->getSuperClass();
2586 }
2587 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2588 for (auto *Proto : OC->protocols()) {
2589 CollectInheritedProtocols(Proto, Protocols);
2590 }
2591 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2592 // Insert the protocol.
2593 if (!Protocols.insert(
2594 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2595 return;
2596
2597 for (auto *Proto : OP->protocols())
2598 CollectInheritedProtocols(Proto, Protocols);
2599 }
2600}
2601
2603 const RecordDecl *RD,
2604 bool CheckIfTriviallyCopyable) {
2605 assert(RD->isUnion() && "Must be union type");
2606 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2607
2608 for (const auto *Field : RD->fields()) {
2609 if (!Context.hasUniqueObjectRepresentations(Field->getType(),
2610 CheckIfTriviallyCopyable))
2611 return false;
2612 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2613 if (FieldSize != UnionSize)
2614 return false;
2615 }
2616 return !RD->field_empty();
2617}
2618
2619static int64_t getSubobjectOffset(const FieldDecl *Field,
2620 const ASTContext &Context,
2621 const clang::ASTRecordLayout & /*Layout*/) {
2622 return Context.getFieldOffset(Field);
2623}
2624
2625static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2626 const ASTContext &Context,
2627 const clang::ASTRecordLayout &Layout) {
2628 return Context.toBits(Layout.getBaseClassOffset(RD));
2629}
2630
2631static std::optional<int64_t>
2633 const RecordDecl *RD,
2634 bool CheckIfTriviallyCopyable);
2635
2636static std::optional<int64_t>
2637getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2638 bool CheckIfTriviallyCopyable) {
2639 if (Field->getType()->isRecordType()) {
2640 const RecordDecl *RD = Field->getType()->getAsRecordDecl();
2641 if (!RD->isUnion())
2642 return structHasUniqueObjectRepresentations(Context, RD,
2643 CheckIfTriviallyCopyable);
2644 }
2645
2646 // A _BitInt type may not be unique if it has padding bits
2647 // but if it is a bitfield the padding bits are not used.
2648 bool IsBitIntType = Field->getType()->isBitIntType();
2649 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2650 !Context.hasUniqueObjectRepresentations(Field->getType(),
2651 CheckIfTriviallyCopyable))
2652 return std::nullopt;
2653
2654 int64_t FieldSizeInBits =
2655 Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2656 if (Field->isBitField()) {
2657 // If we have explicit padding bits, they don't contribute bits
2658 // to the actual object representation, so return 0.
2659 if (Field->isUnnamedBitfield())
2660 return 0;
2661
2662 int64_t BitfieldSize = Field->getBitWidthValue(Context);
2663 if (IsBitIntType) {
2664 if ((unsigned)BitfieldSize >
2665 cast<BitIntType>(Field->getType())->getNumBits())
2666 return std::nullopt;
2667 } else if (BitfieldSize > FieldSizeInBits) {
2668 return std::nullopt;
2669 }
2670 FieldSizeInBits = BitfieldSize;
2671 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2672 Field->getType(), CheckIfTriviallyCopyable)) {
2673 return std::nullopt;
2674 }
2675 return FieldSizeInBits;
2676}
2677
2678static std::optional<int64_t>
2680 bool CheckIfTriviallyCopyable) {
2681 return structHasUniqueObjectRepresentations(Context, RD,
2682 CheckIfTriviallyCopyable);
2683}
2684
2685template <typename RangeT>
2687 const RangeT &Subobjects, int64_t CurOffsetInBits,
2688 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2689 bool CheckIfTriviallyCopyable) {
2690 for (const auto *Subobject : Subobjects) {
2691 std::optional<int64_t> SizeInBits =
2692 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2693 if (!SizeInBits)
2694 return std::nullopt;
2695 if (*SizeInBits != 0) {
2696 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2697 if (Offset != CurOffsetInBits)
2698 return std::nullopt;
2699 CurOffsetInBits += *SizeInBits;
2700 }
2701 }
2702 return CurOffsetInBits;
2703}
2704
2705static std::optional<int64_t>
2707 const RecordDecl *RD,
2708 bool CheckIfTriviallyCopyable) {
2709 assert(!RD->isUnion() && "Must be struct/class type");
2710 const auto &Layout = Context.getASTRecordLayout(RD);
2711
2712 int64_t CurOffsetInBits = 0;
2713 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2714 if (ClassDecl->isDynamicClass())
2715 return std::nullopt;
2716
2718 for (const auto &Base : ClassDecl->bases()) {
2719 // Empty types can be inherited from, and non-empty types can potentially
2720 // have tail padding, so just make sure there isn't an error.
2721 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl());
2722 }
2723
2724 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2725 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
2726 });
2727
2728 std::optional<int64_t> OffsetAfterBases =
2730 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2731 if (!OffsetAfterBases)
2732 return std::nullopt;
2733 CurOffsetInBits = *OffsetAfterBases;
2734 }
2735
2736 std::optional<int64_t> OffsetAfterFields =
2738 RD->fields(), CurOffsetInBits, Context, Layout,
2739 CheckIfTriviallyCopyable);
2740 if (!OffsetAfterFields)
2741 return std::nullopt;
2742 CurOffsetInBits = *OffsetAfterFields;
2743
2744 return CurOffsetInBits;
2745}
2746
2748 QualType Ty, bool CheckIfTriviallyCopyable) const {
2749 // C++17 [meta.unary.prop]:
2750 // The predicate condition for a template specialization
2751 // has_unique_object_representations<T> shall be
2752 // satisfied if and only if:
2753 // (9.1) - T is trivially copyable, and
2754 // (9.2) - any two objects of type T with the same value have the same
2755 // object representation, where two objects
2756 // of array or non-union class type are considered to have the same value
2757 // if their respective sequences of
2758 // direct subobjects have the same values, and two objects of union type
2759 // are considered to have the same
2760 // value if they have the same active member and the corresponding members
2761 // have the same value.
2762 // The set of scalar types for which this condition holds is
2763 // implementation-defined. [ Note: If a type has padding
2764 // bits, the condition does not hold; otherwise, the condition holds true
2765 // for unsigned integral types. -- end note ]
2766 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2767
2768 // Arrays are unique only if their element type is unique.
2769 if (Ty->isArrayType())
2771 CheckIfTriviallyCopyable);
2772
2773 // (9.1) - T is trivially copyable...
2774 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this))
2775 return false;
2776
2777 // All integrals and enums are unique.
2778 if (Ty->isIntegralOrEnumerationType()) {
2779 // Except _BitInt types that have padding bits.
2780 if (const auto *BIT = Ty->getAs<BitIntType>())
2781 return getTypeSize(BIT) == BIT->getNumBits();
2782
2783 return true;
2784 }
2785
2786 // All other pointers are unique.
2787 if (Ty->isPointerType())
2788 return true;
2789
2790 if (const auto *MPT = Ty->getAs<MemberPointerType>())
2791 return !ABI->getMemberPointerInfo(MPT).HasPadding;
2792
2793 if (Ty->isRecordType()) {
2794 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
2795
2796 if (Record->isInvalidDecl())
2797 return false;
2798
2799 if (Record->isUnion())
2800 return unionHasUniqueObjectRepresentations(*this, Record,
2801 CheckIfTriviallyCopyable);
2802
2803 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
2804 *this, Record, CheckIfTriviallyCopyable);
2805
2806 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty));
2807 }
2808
2809 // FIXME: More cases to handle here (list by rsmith):
2810 // vectors (careful about, eg, vector of 3 foo)
2811 // _Complex int and friends
2812 // _Atomic T
2813 // Obj-C block pointers
2814 // Obj-C object pointers
2815 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2816 // clk_event_t, queue_t, reserve_id_t)
2817 // There're also Obj-C class types and the Obj-C selector type, but I think it
2818 // makes sense for those to return false here.
2819
2820 return false;
2821}
2822
2824 unsigned count = 0;
2825 // Count ivars declared in class extension.
2826 for (const auto *Ext : OI->known_extensions())
2827 count += Ext->ivar_size();
2828
2829 // Count ivar defined in this class's implementation. This
2830 // includes synthesized ivars.
2831 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2832 count += ImplDecl->ivar_size();
2833
2834 return count;
2835}
2836
2838 if (!E)
2839 return false;
2840
2841 // nullptr_t is always treated as null.
2842 if (E->getType()->isNullPtrType()) return true;
2843
2844 if (E->getType()->isAnyPointerType() &&
2847 return true;
2848
2849 // Unfortunately, __null has type 'int'.
2850 if (isa<GNUNullExpr>(E)) return true;
2851
2852 return false;
2853}
2854
2855/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2856/// exists.
2858 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2859 I = ObjCImpls.find(D);
2860 if (I != ObjCImpls.end())
2861 return cast<ObjCImplementationDecl>(I->second);
2862 return nullptr;
2863}
2864
2865/// Get the implementation of ObjCCategoryDecl, or nullptr if none
2866/// exists.
2868 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2869 I = ObjCImpls.find(D);
2870 if (I != ObjCImpls.end())
2871 return cast<ObjCCategoryImplDecl>(I->second);
2872 return nullptr;
2873}
2874
2875/// Set the implementation of ObjCInterfaceDecl.
2877 ObjCImplementationDecl *ImplD) {
2878 assert(IFaceD && ImplD && "Passed null params");
2879 ObjCImpls[IFaceD] = ImplD;
2880}
2881
2882/// Set the implementation of ObjCCategoryDecl.
2884 ObjCCategoryImplDecl *ImplD) {
2885 assert(CatD && ImplD && "Passed null params");
2886 ObjCImpls[CatD] = ImplD;
2887}
2888
2889const ObjCMethodDecl *
2891 return ObjCMethodRedecls.lookup(MD);
2892}
2893
2895 const ObjCMethodDecl *Redecl) {
2896 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2897 ObjCMethodRedecls[MD] = Redecl;
2898}
2899
2901 const NamedDecl *ND) const {
2902 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2903 return ID;
2904 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2905 return CD->getClassInterface();
2906 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2907 return IMD->getClassInterface();
2908
2909 return nullptr;
2910}
2911
2912/// Get the copy initialization expression of VarDecl, or nullptr if
2913/// none exists.
2915 assert(VD && "Passed null params");
2916 assert(VD->hasAttr<BlocksAttr>() &&
2917 "getBlockVarCopyInits - not __block var");
2918 auto I = BlockVarCopyInits.find(VD);
2919 if (I != BlockVarCopyInits.end())
2920 return I->second;
2921 return {nullptr, false};
2922}
2923
2924/// Set the copy initialization expression of a block var decl.
2926 bool CanThrow) {
2927 assert(VD && CopyExpr && "Passed null params");
2928 assert(VD->hasAttr<BlocksAttr>() &&
2929 "setBlockVarCopyInits - not __block var");
2930 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2931}
2932
2934 unsigned DataSize) const {
2935 if (!DataSize)
2936 DataSize = TypeLoc::getFullDataSizeForType(T);
2937 else
2938 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2939 "incorrect data size provided to CreateTypeSourceInfo!");
2940
2941 auto *TInfo =
2942 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
2943 new (TInfo) TypeSourceInfo(T, DataSize);
2944 return TInfo;
2945}
2946
2948 SourceLocation L) const {
2950 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
2951 return DI;
2952}
2953
2954const ASTRecordLayout &
2956 return getObjCLayout(D, nullptr);
2957}
2958
2959const ASTRecordLayout &
2961 const ObjCImplementationDecl *D) const {
2962 return getObjCLayout(D->getClassInterface(), D);
2963}
2964
2967 bool &AnyNonCanonArgs) {
2968 SmallVector<TemplateArgument, 16> CanonArgs(Args);
2969 for (auto &Arg : CanonArgs) {
2970 TemplateArgument OrigArg = Arg;
2971 Arg = C.getCanonicalTemplateArgument(Arg);
2972 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg);
2973 }
2974 return CanonArgs;
2975}
2976
2977//===----------------------------------------------------------------------===//
2978// Type creation/memoization methods
2979//===----------------------------------------------------------------------===//
2980
2982ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
2983 unsigned fastQuals = quals.getFastQualifiers();
2984 quals.removeFastQualifiers();
2985
2986 // Check if we've already instantiated this type.
2987 llvm::FoldingSetNodeID ID;
2988 ExtQuals::Profile(ID, baseType, quals);
2989 void *insertPos = nullptr;
2990 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
2991 assert(eq->getQualifiers() == quals);
2992 return QualType(eq, fastQuals);
2993 }
2994
2995 // If the base type is not canonical, make the appropriate canonical type.
2996 QualType canon;
2997 if (!baseType->isCanonicalUnqualified()) {
2998 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
2999 canonSplit.Quals.addConsistentQualifiers(quals);
3000 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
3001
3002 // Re-find the insert position.
3003 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
3004 }
3005
3006 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3007 ExtQualNodes.InsertNode(eq, insertPos);
3008 return QualType(eq, fastQuals);
3009}
3010
3012 LangAS AddressSpace) const {
3013 QualType CanT = getCanonicalType(T);
3014 if (CanT.getAddressSpace() == AddressSpace)
3015 return T;
3016
3017 // If we are composing extended qualifiers together, merge together
3018 // into one ExtQuals node.
3019 QualifierCollector Quals;
3020 const Type *TypeNode = Quals.strip(T);
3021
3022 // If this type already has an address space specified, it cannot get
3023 // another one.
3024 assert(!Quals.hasAddressSpace() &&
3025 "Type cannot be in multiple addr spaces!");
3026 Quals.addAddressSpace(AddressSpace);
3027
3028 return getExtQualType(TypeNode, Quals);
3029}
3030
3032 // If the type is not qualified with an address space, just return it
3033 // immediately.
3034 if (!T.hasAddressSpace())
3035 return T;
3036
3037 // If we are composing extended qualifiers together, merge together
3038 // into one ExtQuals node.
3039 QualifierCollector Quals;
3040 const Type *TypeNode;
3041
3042 while (T.hasAddressSpace()) {
3043 TypeNode = Quals.strip(T);
3044
3045 // If the type no longer has an address space after stripping qualifiers,
3046 // jump out.
3047 if (!QualType(TypeNode, 0).hasAddressSpace())
3048 break;
3049
3050 // There might be sugar in the way. Strip it and try again.
3051 T = T.getSingleStepDesugaredType(*this);
3052 }
3053
3054 Quals.removeAddressSpace();
3055
3056 // Removal of the address space can mean there are no longer any
3057 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3058 // or required.
3059 if (Quals.hasNonFastQualifiers())
3060 return getExtQualType(TypeNode, Quals);
3061 else
3062 return QualType(TypeNode, Quals.getFastQualifiers());
3063}
3064
3066 Qualifiers::GC GCAttr) const {
3067 QualType CanT = getCanonicalType(T);
3068 if (CanT.getObjCGCAttr() == GCAttr)
3069 return T;
3070
3071 if (const auto *ptr = T->getAs<PointerType>()) {
3072 QualType Pointee = ptr->getPointeeType();
3073 if (Pointee->isAnyPointerType()) {
3074 QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
3075 return getPointerType(ResultType);
3076 }
3077 }
3078
3079 // If we are composing extended qualifiers together, merge together
3080 // into one ExtQuals node.
3081 QualifierCollector Quals;
3082 const Type *TypeNode = Quals.strip(T);
3083
3084 // If this type already has an ObjCGC specified, it cannot get
3085 // another one.
3086 assert(!Quals.hasObjCGCAttr() &&
3087 "Type cannot have multiple ObjCGCs!");
3088 Quals.addObjCGCAttr(GCAttr);
3089
3090 return getExtQualType(TypeNode, Quals);
3091}
3092
3094 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3095 QualType Pointee = Ptr->getPointeeType();
3096 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) {
3097 return getPointerType(removeAddrSpaceQualType(Pointee));
3098 }
3099 }
3100 return T;
3101}
3102
3104 FunctionType::ExtInfo Info) {
3105 if (T->getExtInfo() == Info)
3106 return T;
3107
3109 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
3110 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
3111 } else {
3112 const auto *FPT = cast<FunctionProtoType>(T);
3113 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3114 EPI.ExtInfo = Info;
3115 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
3116 }
3117
3118 return cast<FunctionType>(Result.getTypePtr());
3119}
3120
3122 QualType ResultType) {
3123 FD = FD->getMostRecentDecl();
3124 while (true) {
3125 const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
3126 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3127 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
3128 if (FunctionDecl *Next = FD->getPreviousDecl())
3129 FD = Next;
3130 else
3131 break;
3132 }
3134 L->DeducedReturnType(FD, ResultType);
3135}
3136
3137/// Get a function type and produce the equivalent function type with the
3138/// specified exception specification. Type sugar that can be present on a
3139/// declaration of a function with an exception specification is permitted
3140/// and preserved. Other type sugar (for instance, typedefs) is not.
3142 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3143 // Might have some parens.
3144 if (const auto *PT = dyn_cast<ParenType>(Orig))
3145 return getParenType(
3146 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
3147
3148 // Might be wrapped in a macro qualified type.
3149 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig))
3150 return getMacroQualifiedType(
3151 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI),
3152 MQT->getMacroIdentifier());
3153
3154 // Might have a calling-convention attribute.
3155 if (const auto *AT = dyn_cast<AttributedType>(Orig))
3156 return getAttributedType(
3157 AT->getAttrKind(),
3158 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
3159 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
3160
3161 // Anything else must be a function type. Rebuild it with the new exception
3162 // specification.
3163 const auto *Proto = Orig->castAs<FunctionProtoType>();
3164 return getFunctionType(
3165 Proto->getReturnType(), Proto->getParamTypes(),
3166 Proto->getExtProtoInfo().withExceptionSpec(ESI));
3167}
3168
3170 QualType U) const {
3171 return hasSameType(T, U) ||
3172 (getLangOpts().CPlusPlus17 &&
3175}
3176
3178 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3179 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3180 SmallVector<QualType, 16> Args(Proto->param_types().size());
3181 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3182 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]);
3183 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo());
3184 }
3185
3186 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3187 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3188 return getFunctionNoProtoType(RetTy, Proto->getExtInfo());
3189 }
3190
3191 return T;
3192}
3193
3195 return hasSameType(T, U) ||
3198}
3199
3202 bool AsWritten) {
3203 // Update the type.
3204 QualType Updated =
3206 FD->setType(Updated);
3207
3208 if (!AsWritten)
3209 return;
3210
3211 // Update the type in the type source information too.
3212 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3213 // If the type and the type-as-written differ, we may need to update
3214 // the type-as-written too.
3215 if (TSInfo->getType() != FD->getType())
3216 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
3217
3218 // FIXME: When we get proper type location information for exceptions,
3219 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3220 // up the TypeSourceInfo;
3221 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3222 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3223 "TypeLoc size mismatch from updating exception specification");
3224 TSInfo->overrideType(Updated);
3225 }
3226}
3227
3228/// getComplexType - Return the uniqued reference to the type for a complex
3229/// number with the specified element type.
3231 // Unique pointers, to guarantee there is only one pointer of a particular
3232 // structure.
3233 llvm::FoldingSetNodeID ID;
3234 ComplexType::Profile(ID, T);
3235
3236 void *InsertPos = nullptr;
3237 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3238 return QualType(CT, 0);
3239
3240 // If the pointee type isn't canonical, this won't be a canonical type either,
3241 // so fill in the canonical type field.
3242 QualType Canonical;
3243 if (!T.isCanonical()) {
3244 Canonical = getComplexType(getCanonicalType(T));
3245
3246 // Get the new insert position for the node we care about.
3247 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3248 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3249 }
3250 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3251 Types.push_back(New);
3252 ComplexTypes.InsertNode(New, InsertPos);
3253 return QualType(New, 0);
3254}
3255
3256/// getPointerType - Return the uniqued reference to the type for a pointer to
3257/// the specified type.
3259 // Unique pointers, to guarantee there is only one pointer of a particular
3260 // structure.
3261 llvm::FoldingSetNodeID ID;
3262 PointerType::Profile(ID, T);
3263
3264 void *InsertPos = nullptr;
3265 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3266 return QualType(PT, 0);
3267
3268 // If the pointee type isn't canonical, this won't be a canonical type either,
3269 // so fill in the canonical type field.
3270 QualType Canonical;
3271 if (!T.isCanonical()) {
3272 Canonical = getPointerType(getCanonicalType(T));
3273
3274 // Get the new insert position for the node we care about.
3275 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3276 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3277 }
3278 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3279 Types.push_back(New);
3280 PointerTypes.InsertNode(New, InsertPos);
3281 return QualType(New, 0);
3282}
3283
3285 llvm::FoldingSetNodeID ID;
3286 AdjustedType::Profile(ID, Orig, New);
3287 void *InsertPos = nullptr;
3288 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3289 if (AT)
3290 return QualType(AT, 0);
3291
3292 QualType Canonical = getCanonicalType(New);
3293
3294 // Get the new insert position for the node we care about.
3295 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3296 assert(!AT && "Shouldn't be in the map!");
3297
3298 AT = new (*this, alignof(AdjustedType))
3299 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3300 Types.push_back(AT);
3301 AdjustedTypes.InsertNode(AT, InsertPos);
3302 return QualType(AT, 0);
3303}
3304
3306 llvm::FoldingSetNodeID ID;
3307 AdjustedType::Profile(ID, Orig, Decayed);
3308 void *InsertPos = nullptr;
3309 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3310 if (AT)
3311 return QualType(AT, 0);
3312
3313 QualType Canonical = getCanonicalType(Decayed);
3314
3315 // Get the new insert position for the node we care about.
3316 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3317 assert(!AT && "Shouldn't be in the map!");
3318
3319 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3320 Types.push_back(AT);
3321 AdjustedTypes.InsertNode(AT, InsertPos);
3322 return QualType(AT, 0);
3323}
3324
3326 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3327
3328 QualType Decayed;
3329
3330 // C99 6.7.5.3p7:
3331 // A declaration of a parameter as "array of type" shall be
3332 // adjusted to "qualified pointer to type", where the type
3333 // qualifiers (if any) are those specified within the [ and ] of
3334 // the array type derivation.
3335 if (T->isArrayType())
3336 Decayed = getArrayDecayedType(T);
3337
3338 // C99 6.7.5.3p8:
3339 // A declaration of a parameter as "function returning type"
3340 // shall be adjusted to "pointer to function returning type", as
3341 // in 6.3.2.1.
3342 if (T->isFunctionType())
3343 Decayed = getPointerType(T);
3344
3345 return getDecayedType(T, Decayed);
3346}
3347
3348/// getBlockPointerType - Return the uniqued reference to the type for
3349/// a pointer to the specified block.
3351 assert(T->isFunctionType() && "block of function types only");
3352 // Unique pointers, to guarantee there is only one block of a particular
3353 // structure.
3354 llvm::FoldingSetNodeID ID;
3356
3357 void *InsertPos = nullptr;
3358 if (BlockPointerType *PT =
3359 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3360 return QualType(PT, 0);
3361
3362 // If the block pointee type isn't canonical, this won't be a canonical
3363 // type either so fill in the canonical type field.
3364 QualType Canonical;
3365 if (!T.isCanonical()) {
3366 Canonical = getBlockPointerType(getCanonicalType(T));
3367
3368 // Get the new insert position for the node we care about.
3369 BlockPointerType *NewIP =
3370 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3371 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3372 }
3373 auto *New =
3374 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
3375 Types.push_back(New);
3376 BlockPointerTypes.InsertNode(New, InsertPos);
3377 return QualType(New, 0);
3378}
3379
3380/// getLValueReferenceType - Return the uniqued reference to the type for an
3381/// lvalue reference to the specified type.
3383ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3384 assert((!T->isPlaceholderType() ||
3385 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3386 "Unresolved placeholder type");
3387
3388 // Unique pointers, to guarantee there is only one pointer of a particular
3389 // structure.
3390 llvm::FoldingSetNodeID ID;
3391 ReferenceType::Profile(ID, T, SpelledAsLValue);
3392
3393 void *InsertPos = nullptr;
3394 if (LValueReferenceType *RT =
3395 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3396 return QualType(RT, 0);
3397
3398 const auto *InnerRef = T->getAs<ReferenceType>();
3399
3400 // If the referencee type isn't canonical, this won't be a canonical type
3401 // either, so fill in the canonical type field.
3402 QualType Canonical;
3403 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
3404 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3405 Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
3406
3407 // Get the new insert position for the node we care about.
3408 LValueReferenceType *NewIP =
3409 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3410 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3411 }
3412
3413 auto *New = new (*this, alignof(LValueReferenceType))
3414 LValueReferenceType(T, Canonical, SpelledAsLValue);
3415 Types.push_back(New);
3416 LValueReferenceTypes.InsertNode(New, InsertPos);
3417
3418 return QualType(New, 0);
3419}
3420
3421/// getRValueReferenceType - Return the uniqued reference to the type for an
3422/// rvalue reference to the specified type.
3424 assert((!T->isPlaceholderType() ||
3425 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3426 "Unresolved placeholder type");
3427
3428 // Unique pointers, to guarantee there is only one pointer of a particular
3429 // structure.
3430 llvm::FoldingSetNodeID ID;
3431 ReferenceType::Profile(ID, T, false);
3432
3433 void *InsertPos = nullptr;
3434 if (RValueReferenceType *RT =
3435 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3436 return QualType(RT, 0);
3437
3438 const auto *InnerRef = T->getAs<ReferenceType>();
3439
3440 // If the referencee type isn't canonical, this won't be a canonical type
3441 // either, so fill in the canonical type field.
3442 QualType Canonical;
3443 if (InnerRef || !T.isCanonical()) {
3444 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3445 Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3446
3447 // Get the new insert position for the node we care about.
3448 RValueReferenceType *NewIP =
3449 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3450 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3451 }
3452
3453 auto *New = new (*this, alignof(RValueReferenceType))
3454 RValueReferenceType(T, Canonical);
3455 Types.push_back(New);
3456 RValueReferenceTypes.InsertNode(New, InsertPos);
3457 return QualType(New, 0);
3458}
3459
3460/// getMemberPointerType - Return the uniqued reference to the type for a
3461/// member pointer to the specified type, in the specified class.
3463 // Unique pointers, to guarantee there is only one pointer of a particular
3464 // structure.
3465 llvm::FoldingSetNodeID ID;
3466 MemberPointerType::Profile(ID, T, Cls);
3467
3468 void *InsertPos = nullptr;
3469 if (MemberPointerType *PT =
3470 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3471 return QualType(PT, 0);
3472
3473 // If the pointee or class type isn't canonical, this won't be a canonical
3474 // type either, so fill in the canonical type field.
3475 QualType Canonical;
3476 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3478
3479 // Get the new insert position for the node we care about.
3480 MemberPointerType *NewIP =
3481 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3482 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3483 }
3484 auto *New = new (*this, alignof(MemberPointerType))
3485 MemberPointerType(T, Cls, Canonical);
3486 Types.push_back(New);
3487 MemberPointerTypes.InsertNode(New, InsertPos);
3488 return QualType(New, 0);
3489}
3490
3491/// getConstantArrayType - Return the unique reference to the type for an
3492/// array of the specified element type.
3494 const llvm::APInt &ArySizeIn,
3495 const Expr *SizeExpr,
3497 unsigned IndexTypeQuals) const {
3498 assert((EltTy->isDependentType() ||
3499 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3500 "Constant array of VLAs is illegal!");
3501
3502 // We only need the size as part of the type if it's instantiation-dependent.
3503 if (SizeExpr && !SizeExpr->isInstantiationDependent())
3504 SizeExpr = nullptr;
3505
3506 // Convert the array size into a canonical width matching the pointer size for
3507 // the target.
3508 llvm::APInt ArySize(ArySizeIn);
3509 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3510
3511 llvm::FoldingSetNodeID ID;
3512 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM,
3513 IndexTypeQuals);
3514
3515 void *InsertPos = nullptr;
3516 if (ConstantArrayType *ATP =
3517 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3518 return QualType(ATP, 0);
3519
3520 // If the element type isn't canonical or has qualifiers, or the array bound
3521 // is instantiation-dependent, this won't be a canonical type either, so fill
3522 // in the canonical type field.
3523 QualType Canon;
3524 // FIXME: Check below should look for qualifiers behind sugar.
3525 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
3526 SplitQualType canonSplit = getCanonicalType(EltTy).split();
3527 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr,
3528 ASM, IndexTypeQuals);
3529 Canon = getQualifiedType(Canon, canonSplit.Quals);
3530
3531 // Get the new insert position for the node we care about.
3532 ConstantArrayType *NewIP =
3533 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3534 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3535 }
3536
3537 void *Mem = Allocate(
3538 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
3539 alignof(ConstantArrayType));
3540 auto *New = new (Mem)
3541 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
3542 ConstantArrayTypes.InsertNode(New, InsertPos);
3543 Types.push_back(New);
3544 return QualType(New, 0);
3545}
3546
3547/// getVariableArrayDecayedType - Turns the given type, which may be
3548/// variably-modified, into the corresponding type with all the known
3549/// sizes replaced with [*].
3551 // Vastly most common case.
3552 if (!type->isVariablyModifiedType()) return type;
3553
3554 QualType result;
3555
3556 SplitQualType split = type.getSplitDesugaredType();
3557 const Type *ty = split.Ty;
3558 switch (ty->getTypeClass()) {
3559#define TYPE(Class, Base)
3560#define ABSTRACT_TYPE(Class, Base)
3561#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3562#include "clang/AST/TypeNodes.inc"
3563 llvm_unreachable("didn't desugar past all non-canonical types?");
3564
3565 // These types should never be variably-modified.
3566 case Type::Builtin:
3567 case Type::Complex:
3568 case Type::Vector:
3569 case Type::DependentVector:
3570 case Type::ExtVector:
3571 case Type::DependentSizedExtVector:
3572 case Type::ConstantMatrix:
3573 case Type::DependentSizedMatrix:
3574 case Type::DependentAddressSpace:
3575 case Type::ObjCObject:
3576 case Type::ObjCInterface:
3577 case Type::ObjCObjectPointer:
3578 case Type::Record:
3579 case Type::Enum:
3580 case Type::UnresolvedUsing:
3581 case Type::TypeOfExpr:
3582 case Type::TypeOf:
3583 case Type::Decltype:
3584 case Type::UnaryTransform:
3585 case Type::DependentName:
3586 case Type::InjectedClassName:
3587 case Type::TemplateSpecialization:
3588 case Type::DependentTemplateSpecialization:
3589 case Type::TemplateTypeParm:
3590 case Type::SubstTemplateTypeParmPack:
3591 case Type::Auto:
3592 case Type::DeducedTemplateSpecialization:
3593 case Type::PackExpansion:
3594 case Type::BitInt:
3595 case Type::DependentBitInt:
3596 llvm_unreachable("type should never be variably-modified");
3597
3598 // These types can be variably-modified but should never need to
3599 // further decay.
3600 case Type::FunctionNoProto:
3601 case Type::FunctionProto:
3602 case Type::BlockPointer:
3603 case Type::MemberPointer:
3604 case Type::Pipe:
3605 return type;
3606
3607 // These types can be variably-modified. All these modifications
3608 // preserve structure except as noted by comments.
3609 // TODO: if we ever care about optimizing VLAs, there are no-op
3610 // optimizations available here.
3611 case Type::Pointer:
3613 cast<PointerType>(ty)->getPointeeType()));
3614 break;
3615
3616 case Type::LValueReference: {
3617 const auto *lv = cast<LValueReferenceType>(ty);
3618 result = getLValueReferenceType(
3619 getVariableArrayDecayedType(lv->getPointeeType()),
3620 lv->isSpelledAsLValue());
3621 break;
3622 }
3623
3624 case Type::RValueReference: {
3625 const auto *lv = cast<RValueReferenceType>(ty);
3626 result = getRValueReferenceType(
3627 getVariableArrayDecayedType(lv->getPointeeType()));
3628 break;
3629 }
3630
3631 case Type::Atomic: {
3632 const auto *at = cast<AtomicType>(ty);
3633 result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
3634 break;
3635 }
3636
3637 case Type::ConstantArray: {
3638 const auto *cat = cast<ConstantArrayType>(ty);
3639 result = getConstantArrayType(
3640 getVariableArrayDecayedType(cat->getElementType()),
3641 cat->getSize(),
3642 cat->getSizeExpr(),
3643 cat->getSizeModifier(),
3644 cat->getIndexTypeCVRQualifiers());
3645 break;
3646 }
3647
3648 case Type::DependentSizedArray: {
3649 const auto *dat = cast<DependentSizedArrayType>(ty);
3651 getVariableArrayDecayedType(dat->getElementType()),
3652 dat->getSizeExpr(),
3653 dat->getSizeModifier(),
3654 dat->getIndexTypeCVRQualifiers(),
3655 dat->getBracketsRange());
3656 break;
3657 }
3658
3659 // Turn incomplete types into [*] types.
3660 case Type::IncompleteArray: {
3661 const auto *iat = cast<IncompleteArrayType>(ty);
3662 result =
3664 /*size*/ nullptr, ArraySizeModifier::Normal,
3665 iat->getIndexTypeCVRQualifiers(), SourceRange());
3666 break;
3667 }
3668
3669 // Turn VLA types into [*] types.
3670 case Type::VariableArray: {
3671 const auto *vat = cast<VariableArrayType>(ty);
3672 result = getVariableArrayType(
3673 getVariableArrayDecayedType(vat->getElementType()),
3674 /*size*/ nullptr, ArraySizeModifier::Star,
3675 vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange());
3676 break;
3677 }
3678 }
3679
3680 // Apply the top-level qualifiers from the original.
3681 return getQualifiedType(result, split.Quals);
3682}
3683
3684/// getVariableArrayType - Returns a non-unique reference to the type for a
3685/// variable array of the specified element type.
3688 unsigned IndexTypeQuals,
3689 SourceRange Brackets) const {
3690 // Since we don't unique expressions, it isn't possible to unique VLA's
3691 // that have an expression provided for their size.
3692 QualType Canon;
3693
3694 // Be sure to pull qualifiers off the element type.
3695 // FIXME: Check below should look for qualifiers behind sugar.
3696 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3697 SplitQualType canonSplit = getCanonicalType(EltTy).split();
3698 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
3699 IndexTypeQuals, Brackets);
3700 Canon = getQualifiedType(Canon, canonSplit.Quals);
3701 }
3702
3703 auto *New = new (*this, alignof(VariableArrayType))
3704 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3705
3706 VariableArrayTypes.push_back(New);
3707 Types.push_back(New);
3708 return QualType(New, 0);
3709}
3710
3711/// getDependentSizedArrayType - Returns a non-unique reference to
3712/// the type for a dependently-sized array of the specified element
3713/// type.
3715 Expr *numElements,
3717 unsigned elementTypeQuals,
3718 SourceRange brackets) const {
3719 assert((!numElements || numElements->isTypeDependent() ||
3720 numElements->isValueDependent()) &&
3721 "Size must be type- or value-dependent!");
3722
3723 // Dependently-sized array types that do not have a specified number
3724 // of elements will have their sizes deduced from a dependent
3725 // initializer. We do no canonicalization here at all, which is okay
3726 // because they can't be used in most locations.
3727 if (!numElements) {
3728 auto *newType = new (*this, alignof(DependentSizedArrayType))
3729 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
3730 elementTypeQuals, brackets);
3731 Types.push_back(newType);
3732 return QualType(newType, 0);
3733 }
3734
3735 // Otherwise, we actually build a new type every time, but we
3736 // also build a canonical type.
3737
3738 SplitQualType canonElementType = getCanonicalType(elementType).split();
3739
3740 void *insertPos = nullptr;
3741 llvm::FoldingSetNodeID ID;
3743 QualType(canonElementType.Ty, 0),
3744 ASM, elementTypeQuals, numElements);
3745
3746 // Look for an existing type with these properties.
3747 DependentSizedArrayType *canonTy =
3748 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3749
3750 // If we don't have one, build one.
3751 if (!canonTy) {
3752 canonTy = new (*this, alignof(DependentSizedArrayType))
3753 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
3754 numElements, ASM, elementTypeQuals, brackets);
3755 DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
3756 Types.push_back(canonTy);
3757 }
3758
3759 // Apply qualifiers from the element type to the array.
3760 QualType canon = getQualifiedType(QualType(canonTy,0),
3761 canonElementType.Quals);
3762
3763 // If we didn't need extra canonicalization for the element type or the size
3764 // expression, then just use that as our result.
3765 if (QualType(canonElementType.Ty, 0) == elementType &&
3766 canonTy->getSizeExpr() == numElements)
3767 return canon;
3768
3769 // Otherwise, we need to build a type which follows the spelling
3770 // of the element type.
3771 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
3772 DependentSizedArrayType(elementType, canon, numElements, ASM,
3773 elementTypeQuals, brackets);
3774 Types.push_back(sugaredType);
3775 return QualType(sugaredType, 0);
3776}
3777
3780 unsigned elementTypeQuals) const {
3781 llvm::FoldingSetNodeID ID;
3782 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
3783
3784 void *insertPos = nullptr;
3785 if (IncompleteArrayType *iat =
3786 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
3787 return QualType(iat, 0);
3788
3789 // If the element type isn't canonical, this won't be a canonical type
3790 // either, so fill in the canonical type field. We also have to pull
3791 // qualifiers off the element type.
3792 QualType canon;
3793
3794 // FIXME: Check below should look for qualifiers behind sugar.
3795 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3796 SplitQualType canonSplit = getCanonicalType(elementType).split();
3797 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
3798 ASM, elementTypeQuals);
3799 canon = getQualifiedType(canon, canonSplit.Quals);
3800
3801 // Get the new insert position for the node we care about.
3802 IncompleteArrayType *existing =
3803 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
3804 assert(!existing && "Shouldn't be in the map!"); (void) existing;
3805 }
3806
3807 auto *newType = new (*this, alignof(IncompleteArrayType))
3808 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3809
3810 IncompleteArrayTypes.InsertNode(newType, insertPos);
3811 Types.push_back(newType);
3812 return QualType(newType, 0);
3813}
3814
3817#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
3818 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
3819 NUMVECTORS};
3820
3821#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
3822 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
3823
3824 switch (Ty->getKind()) {
3825 default:
3826 llvm_unreachable("Unsupported builtin vector type");
3827 case BuiltinType::SveInt8:
3828 return SVE_INT_ELTTY(8, 16, true, 1);
3829 case BuiltinType::SveUint8:
3830 return SVE_INT_ELTTY(8, 16, false, 1);
3831 case BuiltinType::SveInt8x2:
3832 return SVE_INT_ELTTY(8, 16, true, 2);
3833 case BuiltinType::SveUint8x2:
3834 return SVE_INT_ELTTY(8, 16, false, 2);
3835 case BuiltinType::SveInt8x3:
3836 return SVE_INT_ELTTY(8, 16, true, 3);
3837 case BuiltinType::SveUint8x3:
3838 return SVE_INT_ELTTY(8, 16, false, 3);
3839 case BuiltinType::SveInt8x4:
3840 return SVE_INT_ELTTY(8, 16, true, 4);
3841 case BuiltinType::SveUint8x4:
3842 return SVE_INT_ELTTY(8, 16, false, 4);
3843 case BuiltinType::SveInt16:
3844 return SVE_INT_ELTTY(16, 8, true, 1);
3845 case BuiltinType::SveUint16:
3846 return SVE_INT_ELTTY(16, 8, false, 1);
3847 case BuiltinType::SveInt16x2:
3848 return SVE_INT_ELTTY(16, 8, true, 2);
3849 case BuiltinType::SveUint16x2:
3850 return SVE_INT_ELTTY(16, 8, false, 2);
3851 case BuiltinType::SveInt16x3:
3852 return SVE_INT_ELTTY(16, 8, true, 3);
3853 case BuiltinType::SveUint16x3:
3854 return SVE_INT_ELTTY(16, 8, false, 3);
3855 case BuiltinType::SveInt16x4:
3856 return SVE_INT_ELTTY(16, 8, true, 4);
3857 case BuiltinType::SveUint16x4:
3858 return SVE_INT_ELTTY(16, 8, false, 4);
3859 case BuiltinType::SveInt32:
3860 return SVE_INT_ELTTY(32, 4, true, 1);
3861 case BuiltinType::SveUint32:
3862 return SVE_INT_ELTTY(32, 4, false, 1);
3863 case BuiltinType::SveInt32x2:
3864 return SVE_INT_ELTTY(32, 4, true, 2);
3865 case BuiltinType::SveUint32x2:
3866 return SVE_INT_ELTTY(32, 4, false, 2);
3867 case BuiltinType::SveInt32x3:
3868 return SVE_INT_ELTTY(32, 4, true, 3);
3869 case BuiltinType::SveUint32x3:
3870 return SVE_INT_ELTTY(32, 4, false, 3);
3871 case BuiltinType::SveInt32x4:
3872 return SVE_INT_ELTTY(32, 4, true, 4);
3873 case BuiltinType::SveUint32x4:
3874 return SVE_INT_ELTTY(32, 4, false, 4);
3875 case BuiltinType::SveInt64:
3876 return SVE_INT_ELTTY(64, 2, true, 1);
3877 case BuiltinType::SveUint64:
3878 return SVE_INT_ELTTY(64, 2, false, 1);
3879 case BuiltinType::SveInt64x2:
3880 return SVE_INT_ELTTY(64, 2, true, 2);
3881 case BuiltinType::SveUint64x2:
3882 return SVE_INT_ELTTY(64, 2, false, 2);
3883 case BuiltinType::SveInt64x3:
3884 return SVE_INT_ELTTY(64, 2, true, 3);
3885 case BuiltinType::SveUint64x3:
3886 return SVE_INT_ELTTY(64, 2, false, 3);
3887 case BuiltinType::SveInt64x4:
3888 return SVE_INT_ELTTY(64, 2, true, 4);
3889 case BuiltinType::SveUint64x4:
3890 return SVE_INT_ELTTY(64, 2, false, 4);
3891 case BuiltinType::SveBool:
3892 return SVE_ELTTY(BoolTy, 16, 1);
3893 case BuiltinType::SveBoolx2:
3894 return SVE_ELTTY(BoolTy, 16, 2);
3895 case BuiltinType::SveBoolx4:
3896 return SVE_ELTTY(BoolTy, 16, 4);
3897 case BuiltinType::SveFloat16:
3898 return SVE_ELTTY(HalfTy, 8, 1);
3899 case BuiltinType::SveFloat16x2:
3900 return SVE_ELTTY(HalfTy, 8, 2);
3901 case BuiltinType::SveFloat16x3:
3902 return SVE_ELTTY(HalfTy, 8, 3);
3903 case BuiltinType::SveFloat16x4:
3904 return SVE_ELTTY(HalfTy, 8, 4);
3905 case BuiltinType::SveFloat32:
3906 return SVE_ELTTY(FloatTy, 4, 1);
3907 case BuiltinType::SveFloat32x2:
3908 return SVE_ELTTY(FloatTy, 4, 2);
3909 case BuiltinType::SveFloat32x3:
3910 return SVE_ELTTY(FloatTy, 4, 3);
3911 case BuiltinType::SveFloat32x4:
3912 return SVE_ELTTY(FloatTy, 4, 4);
3913 case BuiltinType::SveFloat64:
3914 return SVE_ELTTY(DoubleTy, 2, 1);
3915 case BuiltinType::SveFloat64x2:
3916 return SVE_ELTTY(DoubleTy, 2, 2);
3917 case BuiltinType::SveFloat64x3:
3918 return SVE_ELTTY(DoubleTy, 2, 3);
3919 case BuiltinType::SveFloat64x4:
3920 return SVE_ELTTY(DoubleTy, 2, 4);
3921 case BuiltinType::SveBFloat16:
3922 return SVE_ELTTY(BFloat16Ty, 8, 1);
3923 case BuiltinType::SveBFloat16x2:
3924 return SVE_ELTTY(BFloat16Ty, 8, 2);
3925 case BuiltinType::SveBFloat16x3:
3926 return SVE_ELTTY(BFloat16Ty, 8, 3);
3927 case BuiltinType::SveBFloat16x4:
3928 return SVE_ELTTY(BFloat16Ty, 8, 4);
3929#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
3930 IsSigned) \
3931 case BuiltinType::Id: \
3932 return {getIntTypeForBitwidth(ElBits, IsSigned), \
3933 llvm::ElementCount::getScalable(NumEls), NF};
3934#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
3935 case BuiltinType::Id: \
3936 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
3937 llvm::ElementCount::getScalable(NumEls), NF};
3938#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
3939 case BuiltinType::Id: \
3940 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
3941#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
3942 case BuiltinType::Id: \
3943 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
3944#include "clang/Basic/RISCVVTypes.def"
3945 }
3946}
3947
3948/// getExternrefType - Return a WebAssembly externref type, which represents an
3949/// opaque reference to a host value.
3951 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) {
3952#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
3953 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
3954 return SingletonId;
3955#include "clang/Basic/WebAssemblyReferenceTypes.def"
3956 }
3957 llvm_unreachable(
3958 "shouldn't try to generate type externref outside WebAssembly target");
3959}
3960
3961/// getScalableVectorType - Return the unique reference to a scalable vector
3962/// type of the specified element type and size. VectorType must be a built-in
3963/// type.
3965 unsigned NumFields) const {
3966 if (Target->hasAArch64SVETypes()) {
3967 uint64_t EltTySize = getTypeSize(EltTy);
3968#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
3969 IsSigned, IsFP, IsBF) \
3970 if (!EltTy->isBooleanType() && \
3971 ((EltTy->hasIntegerRepresentation() && \
3972 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
3973 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
3974 IsFP && !IsBF) || \
3975 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
3976 IsBF && !IsFP)) && \
3977 EltTySize == ElBits && NumElts == NumEls) { \
3978 return SingletonId; \
3979 }
3980#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
3981 if (EltTy->isBooleanType() && NumElts == NumEls) \
3982 return SingletonId;
3983#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId)
3984#include "clang/Basic/AArch64SVEACLETypes.def"
3985 } else if (Target->hasRISCVVTypes()) {
3986 uint64_t EltTySize = getTypeSize(EltTy);
3987#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
3988 IsFP, IsBF) \
3989 if (!EltTy->isBooleanType() && \
3990 ((EltTy->hasIntegerRepresentation() && \
3991 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
3992 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
3993 IsFP && !IsBF) || \
3994 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
3995 IsBF && !IsFP)) && \
3996 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
3997 return SingletonId;
3998#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
3999 if (EltTy->isBooleanType() && NumElts == NumEls) \
4000 return SingletonId;
4001#include "clang/Basic/RISCVVTypes.def"
4002 }
4003 return QualType();
4004}
4005
4006/// getVectorType - Return the unique reference to a vector type of
4007/// the specified element type and size. VectorType must be a built-in type.
4009 VectorKind VecKind) const {
4010 assert(vecType->isBuiltinType() ||
4011 (vecType->isBitIntType() &&
4012 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4013 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
4014 vecType->castAs<BitIntType>()->getNumBits() >= 8));
4015
4016 // Check if we've already instantiated a vector of this type.
4017 llvm::FoldingSetNodeID ID;
4018 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
4019
4020 void *InsertPos = nullptr;
4021 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4022 return QualType(VTP, 0);
4023
4024 // If the element type isn't canonical, this won't be a canonical type either,
4025 // so fill in the canonical type field.
4026 QualType Canonical;
4027 if (!vecType.isCanonical()) {
4028 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
4029
4030 // Get the new insert position for the node we care about.
4031 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4032 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4033 }
4034 auto *New = new (*this, alignof(VectorType))
4035 VectorType(vecType, NumElts, Canonical, VecKind);
4036 VectorTypes.InsertNode(New, InsertPos);
4037 Types.push_back(New);
4038 return QualType(New, 0);
4039}
4040
4042 SourceLocation AttrLoc,
4043 VectorKind VecKind) const {
4044 llvm::FoldingSetNodeID ID;
4045 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
4046 VecKind);
4047 void *InsertPos = nullptr;
4048 DependentVectorType *Canon =
4049 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4051
4052 if (Canon) {
4053 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4054 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4055 } else {
4056 QualType CanonVecTy = getCanonicalType(VecType);
4057 if (CanonVecTy == VecType) {
4058 New = new (*this, alignof(DependentVectorType))
4059 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4060
4061 DependentVectorType *CanonCheck =
4062 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4063 assert(!CanonCheck &&
4064 "Dependent-sized vector_size canonical type broken");
4065 (void)CanonCheck;
4066 DependentVectorTypes.InsertNode(New, InsertPos);
4067 } else {
4068 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
4069 SourceLocation(), VecKind);
4070 New = new (*this, alignof(DependentVectorType))
4071 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4072 }
4073 }
4074
4075 Types.push_back(New);
4076 return QualType(New, 0);
4077}
4078
4079/// getExtVectorType - Return the unique reference to an extended vector type of
4080/// the specified element type and size. VectorType must be a built-in type.
4082 unsigned NumElts) const {
4083 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4084 (vecType->isBitIntType() &&
4085 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4086 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
4087 vecType->castAs<BitIntType>()->getNumBits() >= 8));
4088
4089 // Check if we've already instantiated a vector of this type.
4090 llvm::FoldingSetNodeID ID;
4091 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
4093 void *InsertPos = nullptr;
4094 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4095 return QualType(VTP, 0);
4096
4097 // If the element type isn't canonical, this won't be a canonical type either,
4098 // so fill in the canonical type field.
4099 QualType Canonical;
4100 if (!vecType.isCanonical()) {
4101 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
4102
4103 // Get the new insert position for the node we care about.
4104 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4105 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4106 }
4107 auto *New = new (*this, alignof(ExtVectorType))
4108 ExtVectorType(vecType, NumElts, Canonical);
4109 VectorTypes.InsertNode(New, InsertPos);
4110 Types.push_back(New);
4111 return QualType(New, 0);
4112}
4113
4116 Expr *SizeExpr,
4117 SourceLocation AttrLoc) const {
4118 llvm::FoldingSetNodeID ID;
4120 SizeExpr);
4121
4122 void *InsertPos = nullptr;
4124 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4126 if (Canon) {
4127 // We already have a canonical version of this array type; use it as
4128 // the canonical type for a newly-built type.
4129 New = new (*this, alignof(DependentSizedExtVectorType))
4130 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4131 AttrLoc);
4132 } else {
4133 QualType CanonVecTy = getCanonicalType(vecType);
4134 if (CanonVecTy == vecType) {
4135 New = new (*this, alignof(DependentSizedExtVectorType))
4136 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4137
4138 DependentSizedExtVectorType *CanonCheck
4139 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4140 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4141 (void)CanonCheck;
4142 DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
4143 } else {
4144 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
4145 SourceLocation());
4146 New = new (*this, alignof(DependentSizedExtVectorType))
4147 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4148 }
4149 }
4150
4151 Types.push_back(New);
4152 return QualType(New, 0);
4153}
4154
4156 unsigned NumColumns) const {
4157 llvm::FoldingSetNodeID ID;
4158 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
4159 Type::ConstantMatrix);
4160
4161 assert(MatrixType::isValidElementType(ElementTy) &&
4162 "need a valid element type");
4163 assert(ConstantMatrixType::isDimensionValid(NumRows) &&
4165 "need valid matrix dimensions");
4166 void *InsertPos = nullptr;
4167 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4168 return QualType(MTP, 0);
4169
4170 QualType Canonical;
4171 if (!ElementTy.isCanonical()) {
4172 Canonical =
4173 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
4174
4175 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4176 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4177 (void)NewIP;
4178 }
4179
4180 auto *New = new (*this, alignof(ConstantMatrixType))
4181 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4182 MatrixTypes.InsertNode(New, InsertPos);
4183 Types.push_back(New);
4184 return QualType(New, 0);
4185}
4186
4188 Expr *RowExpr,
4189 Expr *ColumnExpr,
4190 SourceLocation AttrLoc) const {
4191 QualType CanonElementTy = getCanonicalType(ElementTy);
4192 llvm::FoldingSetNodeID ID;
4193 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
4194 ColumnExpr);
4195
4196 void *InsertPos = nullptr;
4198 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4199
4200 if (!Canon) {
4201 Canon = new (*this, alignof(DependentSizedMatrixType))
4202 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4203 ColumnExpr, AttrLoc);
4204#ifndef NDEBUG
4205 DependentSizedMatrixType *CanonCheck =
4206 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4207 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4208#endif
4209 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
4210 Types.push_back(Canon);
4211 }
4212
4213 // Already have a canonical version of the matrix type
4214 //
4215 // If it exactly matches the requested type, use it directly.
4216 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4217 Canon->getRowExpr() == ColumnExpr)
4218 return QualType(Canon, 0);
4219
4220 // Use Canon as the canonical type for newly-built type.
4221 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4222 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4223 ColumnExpr, AttrLoc);
4224 Types.push_back(New);
4225 return QualType(New, 0);
4226}
4227
4229 Expr *AddrSpaceExpr,
4230 SourceLocation AttrLoc) const {
4231 assert(AddrSpaceExpr->isInstantiationDependent());
4232
4233 QualType canonPointeeType = getCanonicalType(PointeeType);
4234
4235 void *insertPos = nullptr;
4236 llvm::FoldingSetNodeID ID;
4237 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
4238 AddrSpaceExpr);
4239
4240 DependentAddressSpaceType *canonTy =
4241 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
4242
4243 if (!canonTy) {
4244 canonTy = new (*this, alignof(DependentAddressSpaceType))
4245 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4246 AttrLoc);
4247 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
4248 Types.push_back(canonTy);
4249 }
4250
4251 if (canonPointeeType == PointeeType &&
4252 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4253 return QualType(canonTy, 0);
4254
4255 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4256 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4257 AddrSpaceExpr, AttrLoc);
4258 Types.push_back(sugaredType);
4259 return QualType(sugaredType, 0);
4260}
4261
4262/// Determine whether \p T is canonical as the result type of a function.
4264 return T.isCanonical() &&
4267}
4268
4269/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4272 const FunctionType::ExtInfo &Info) const {
4273 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4274 // functionality creates a function without a prototype regardless of
4275 // language mode (so it makes them even in C++). Once the rewriter has been
4276 // fixed, this assertion can be enabled again.
4277 //assert(!LangOpts.requiresStrictPrototypes() &&
4278 // "strict prototypes are disabled");
4279
4280 // Unique functions, to guarantee there is only one function of a particular
4281 // structure.
4282 llvm::FoldingSetNodeID ID;
4283 FunctionNoProtoType::Profile(ID, ResultTy, Info);
4284
4285 void *InsertPos = nullptr;
4286 if (FunctionNoProtoType *FT =
4287 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4288 return QualType(FT, 0);
4289
4290 QualType Canonical;
4291 if (!isCanonicalResultType(ResultTy)) {
4292 Canonical =
4294
4295 // Get the new insert position for the node we care about.
4296 FunctionNoProtoType *NewIP =
4297 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4298 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4299 }
4300
4301 auto *New = new (*this, alignof(FunctionNoProtoType))
4302 FunctionNoProtoType(ResultTy, Canonical, Info);
4303 Types.push_back(New);
4304 FunctionNoProtoTypes.InsertNode(New, InsertPos);
4305 return QualType(New, 0);
4306}
4307
4310 CanQualType CanResultType = getCanonicalType(ResultType);
4311
4312 // Canonical result types do not have ARC lifetime qualifiers.
4313 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4314 Qualifiers Qs = CanResultType.getQualifiers();
4315 Qs.removeObjCLifetime();
4317 getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
4318 }
4319
4320 return CanResultType;
4321}
4322
4324 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4325 if (ESI.Type == EST_None)
4326 return true;
4327 if (!NoexceptInType)
4328 return false;
4329
4330 // C++17 onwards: exception specification is part of the type, as a simple
4331 // boolean "can this function type throw".
4332 if (ESI.Type == EST_BasicNoexcept)
4333 return true;
4334
4335 // A noexcept(expr) specification is (possibly) canonical if expr is
4336 // value-dependent.
4337 if (ESI.Type == EST_DependentNoexcept)
4338 return true;
4339
4340 // A dynamic exception specification is canonical if it only contains pack
4341 // expansions (so we can't tell whether it's non-throwing) and all its
4342 // contained types are canonical.
4343 if (ESI.Type == EST_Dynamic) {
4344 bool AnyPackExpansions = false;
4345 for (QualType ET : ESI.Exceptions) {
4346 if (!ET.isCanonical())
4347 return false;
4348 if (ET->getAs<PackExpansionType>())
4349 AnyPackExpansions = true;
4350 }
4351 return AnyPackExpansions;
4352 }
4353
4354 return false;
4355}
4356
4357QualType ASTContext::getFunctionTypeInternal(
4358 QualType ResultTy, ArrayRef<QualType> ArgArray,
4359 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4360 size_t NumArgs = ArgArray.size();
4361
4362 // Unique functions, to guarantee there is only one function of a particular
4363 // structure.
4364 llvm::FoldingSetNodeID ID;
4365 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
4366 *this, true);
4367
4368 QualType Canonical;
4369 bool Unique = false;
4370
4371 void *InsertPos = nullptr;
4372 if (FunctionProtoType *FPT =
4373 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4374 QualType Existing = QualType(FPT, 0);
4375
4376 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4377 // it so long as our exception specification doesn't contain a dependent
4378 // noexcept expression, or we're just looking for a canonical type.
4379 // Otherwise, we're going to need to create a type
4380 // sugar node to hold the concrete expression.
4381 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
4382 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4383 return Existing;
4384
4385 // We need a new type sugar node for this one, to hold the new noexcept
4386 // expression. We do no canonicalization here, but that's OK since we don't
4387 // expect to see the same noexcept expression much more than once.
4388 Canonical = getCanonicalType(Existing);
4389 Unique = true;
4390 }
4391
4392 bool NoexceptInType = getLangOpts().CPlusPlus17;
4393 bool IsCanonicalExceptionSpec =
4395
4396 // Determine whether the type being created is already canonical or not.
4397 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
4398 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
4399 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
4400 if (!ArgArray[i].isCanonicalAsParam())
4401 isCanonical = false;
4402
4403 if (OnlyWantCanonical)
4404 assert(isCanonical &&
4405 "given non-canonical parameters constructing canonical type");
4406
4407 // If this type isn't canonical, get the canonical version of it if we don't
4408 // already have it. The exception spec is only partially part of the
4409 // canonical type, and only in C++17 onwards.
4410 if (!isCanonical && Canonical.isNull()) {
4411 SmallVector<QualType, 16> CanonicalArgs;
4412 CanonicalArgs.reserve(NumArgs);
4413 for (unsigned i = 0; i != NumArgs; ++i)
4414 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
4415
4416 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
4417 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
4418 CanonicalEPI.HasTrailingReturn = false;
4419
4420 if (IsCanonicalExceptionSpec) {
4421 // Exception spec is already OK.
4422 } else if (NoexceptInType) {
4423 switch (EPI.ExceptionSpec.Type) {
4425 // We don't know yet. It shouldn't matter what we pick here; no-one
4426 // should ever look at this.
4427 [[fallthrough]];
4428 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
4429 CanonicalEPI.ExceptionSpec.Type = EST_None;
4430 break;
4431
4432 // A dynamic exception specification is almost always "not noexcept",
4433 // with the exception that a pack expansion might expand to no types.
4434 case EST_Dynamic: {
4435 bool AnyPacks = false;
4436 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
4437 if (ET->getAs<PackExpansionType>())
4438 AnyPacks = true;
4439 ExceptionTypeStorage.push_back(getCanonicalType(ET));
4440 }
4441 if (!AnyPacks)
4442 CanonicalEPI.ExceptionSpec.Type = EST_None;
4443 else {
4444 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
4445 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
4446 }
4447 break;
4448 }
4449
4450 case EST_DynamicNone:
4451 case EST_BasicNoexcept:
4452 case EST_NoexceptTrue:
4453 case EST_NoThrow:
4454 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
4455 break;
4456
4458 llvm_unreachable("dependent noexcept is already canonical");
4459 }
4460 } else {
4462 }
4463
4464 // Adjust the canonical function result type.
4465 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
4466 Canonical =
4467 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
4468
4469 // Get the new insert position for the node we care about.
4470 FunctionProtoType *NewIP =
4471 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4472 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4473 }
4474
4475 // Compute the needed size to hold this FunctionProtoType and the
4476 // various trailing objects.
4477 auto ESH = FunctionProtoType::getExceptionSpecSize(
4478 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
4479 size_t Size = FunctionProtoType::totalSizeToAlloc<
4484 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
4485 EPI.ExtParameterInfos ? NumArgs : 0,
4486 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
4487
4488 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType));
4490 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
4491 Types.push_back(FTP);
4492 if (!Unique)
4493 FunctionProtoTypes.InsertNode(FTP, InsertPos);
4494 return QualType(FTP, 0);
4495}
4496
4497QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
4498 llvm::FoldingSetNodeID ID;
4499 PipeType::Profile(ID, T, ReadOnly);
4500
4501 void *InsertPos = nullptr;
4502 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
4503 return QualType(PT, 0);
4504
4505 // If the pipe element type isn't canonical, this won't be a canonical type
4506 // either, so fill in the canonical type field.
4507 QualType Canonical;
4508 if (!T.isCanonical()) {
4509 Canonical = getPipeType(getCanonicalType(T), ReadOnly);
4510
4511 // Get the new insert position for the node we care about.
4512 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
4513 assert(!NewIP && "Shouldn't be in the map!");
4514 (void)NewIP;
4515 }
4516 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
4517 Types.push_back(New);
4518 PipeTypes.InsertNode(New, InsertPos);
4519 return QualType(New, 0);
4520}
4521
4523 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
4524 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
4525 : Ty;
4526}
4527
4529 return getPipeType(T, true);
4530}
4531
4533 return getPipeType(T, false);
4534}
4535
4536QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
4537 llvm::FoldingSetNodeID ID;
4538 BitIntType::Profile(ID, IsUnsigned, NumBits);
4539
4540 void *InsertPos = nullptr;
4541 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4542 return QualType(EIT, 0);
4543
4544 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
4545 BitIntTypes.InsertNode(New, InsertPos);
4546 Types.push_back(New);
4547 return QualType(New, 0);
4548}
4549
4551 Expr *NumBitsExpr) const {
4552 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
4553 llvm::FoldingSetNodeID ID;
4554 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
4555
4556 void *InsertPos = nullptr;
4557 if (DependentBitIntType *Existing =
4558 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4559 return QualType(Existing, 0);
4560
4561 auto *New = new (*this, alignof(DependentBitIntType))
4562 DependentBitIntType(IsUnsigned, NumBitsExpr);
4563 DependentBitIntTypes.InsertNode(New, InsertPos);
4564
4565 Types.push_back(New);
4566 return QualType(New, 0);
4567}
4568
4569#ifndef NDEBUG
4571 if (!isa<CXXRecordDecl>(D)) return false;
4572 const auto *RD = cast<CXXRecordDecl>(D);
4573 if (isa<ClassTemplatePartialSpecializationDecl>(RD))
4574 return true;
4575 if (RD->getDescribedClassTemplate() &&
4576 !isa<ClassTemplateSpecializationDecl>(RD))
4577 return true;
4578 return false;
4579}
4580#endif
4581
4582/// getInjectedClassNameType - Return the unique reference to the
4583/// injected class name type for the specified templated declaration.
4585 QualType TST) const {
4587 if (Decl->TypeForDecl) {
4588 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
4589 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
4590 assert(PrevDecl->TypeForDecl && "previous declaration has no type");
4591 Decl->TypeForDecl = PrevDecl->TypeForDecl;
4592 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
4593 } else {
4594 Type *newType = new (*this, alignof(InjectedClassNameType))
4596 Decl->TypeForDecl = newType;
4597 Types.push_back(newType);
4598 }
4599 return QualType(Decl->TypeForDecl, 0);
4600}
4601
4602/// getTypeDeclType - Return the unique reference to the type for the
4603/// specified type declaration.
4604QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
4605 assert(Decl && "Passed null for Decl param");
4606 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
4607
4608 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
4609 return getTypedefType(Typedef);
4610
4611 assert(!isa<TemplateTypeParmDecl>(Decl) &&
4612 "Template type parameter types are always available.");
4613
4614 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
4615 assert(Record->isFirstDecl() && "struct/union has previous declaration");
4616 assert(!NeedsInjectedClassNameType(Record));
4617 return getRecordType(Record);
4618 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
4619 assert(Enum->isFirstDecl() && "enum has previous declaration");
4620 return getEnumType(Enum);
4621 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
4622 return getUnresolvedUsingType(Using);
4623 } else
4624 llvm_unreachable("TypeDecl without a type?");
4625
4626 return QualType(Decl->TypeForDecl, 0);
4627}
4628
4629/// getTypedefType - Return the unique reference to the type for the
4630/// specified typedef name decl.
4632 QualType Underlying) const {
4633 if (!Decl->TypeForDecl) {
4634 if (Underlying.isNull())
4635 Underlying = Decl->getUnderlyingType();
4636 auto *NewType = new (*this, alignof(TypedefType)) TypedefType(
4637 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying));
4638 Decl->TypeForDecl = NewType;
4639 Types.push_back(NewType);
4640 return QualType(NewType, 0);
4641 }
4642 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
4643 return QualType(Decl->TypeForDecl, 0);
4644 assert(hasSameType(Decl->getUnderlyingType(), Underlying));
4645
4646 llvm::FoldingSetNodeID ID;
4647 TypedefType::Profile(ID, Decl, Underlying);
4648
4649 void *InsertPos = nullptr;
4650 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4651 assert(!T->typeMatchesDecl() &&
4652 "non-divergent case should be handled with TypeDecl");
4653 return QualType(T, 0);
4654 }
4655
4656 void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true),
4657 alignof(TypedefType));
4658 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
4659 getCanonicalType(Underlying));
4660 TypedefTypes.InsertNode(NewType, InsertPos);
4661 Types.push_back(NewType);
4662 return QualType(NewType, 0);
4663}
4664
4666 QualType Underlying) const {
4667 llvm::FoldingSetNodeID ID;
4668 UsingType::Profile(ID, Found, Underlying);
4669
4670 void *InsertPos = nullptr;
4671 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
4672 return QualType(T, 0);
4673
4674 const Type *TypeForDecl =
4675 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl();
4676
4677 assert(!Underlying.hasLocalQualifiers());
4678 QualType Canon = Underlying->getCanonicalTypeInternal();
4679 assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
4680
4681 if (Underlying.getTypePtr() == TypeForDecl)
4682 Underlying = QualType();
4683 void *Mem =
4684 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()),
4685 alignof(UsingType));
4686 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
4687 Types.push_back(NewType);
4688 UsingTypes.InsertNode(NewType, InsertPos);
4689 return QualType(NewType, 0);
4690}
4691
4693 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4694
4695 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
4696 if (PrevDecl->TypeForDecl)
4697 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4698
4699 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl);
4700 Decl->TypeForDecl = newType;
4701 Types.push_back(newType);
4702 return QualType(newType, 0);
4703}
4704
4706 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4707
4708 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
4709 if (PrevDecl->TypeForDecl)
4710 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4711
4712 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl);
4713 Decl->TypeForDecl = newType;
4714 Types.push_back(newType);
4715 return QualType(newType, 0);
4716}
4717
4719 const UnresolvedUsingTypenameDecl *Decl) const {
4720 if (Decl->TypeForDecl)
4721 return QualType(Decl->TypeForDecl, 0);
4722
4723 if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
4725 if (CanonicalDecl->TypeForDecl)
4726 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
4727
4728 Type *newType =
4729 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl);
4730 Decl->TypeForDecl = newType;
4731 Types.push_back(newType);
4732 return QualType(newType, 0);
4733}
4734
4736 QualType modifiedType,
4737 QualType equivalentType) const {
4738 llvm::FoldingSetNodeID id;
4739 AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
4740
4741 void *insertPos = nullptr;
4742 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
4743 if (type) return QualType(type, 0);
4744
4745 QualType canon = getCanonicalType(equivalentType);
4746 type = new (*this, alignof(AttributedType))
4747 AttributedType(canon, attrKind, modifiedType, equivalentType);
4748
4749 Types.push_back(type);
4750 AttributedTypes.InsertNode(type, insertPos);
4751
4752 return QualType(type, 0);
4753}
4754
4755QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
4756 QualType Wrapped) {
4757 llvm::FoldingSetNodeID ID;
4758 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
4759
4760 void *InsertPos = nullptr;
4762 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
4763 if (Ty)
4764 return QualType(Ty, 0);
4765
4766 QualType Canon = getCanonicalType(Wrapped);
4767 Ty = new (*this, alignof(BTFTagAttributedType))
4768 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
4769
4770 Types.push_back(Ty);
4771 BTFTagAttributedTypes.InsertNode(Ty, InsertPos);
4772
4773 return QualType(Ty, 0);
4774}
4775
4776/// Retrieve a substitution-result type.
4778 QualType Replacement, Decl *AssociatedDecl, unsigned Index,
4779 std::optional<unsigned> PackIndex) const {
4780 llvm::FoldingSetNodeID ID;
4781 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
4782 PackIndex);
4783 void *InsertPos = nullptr;
4784 SubstTemplateTypeParmType *SubstParm =
4785 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4786
4787 if (!SubstParm) {
4788 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
4789 !Replacement.isCanonical()),
4790 alignof(SubstTemplateTypeParmType));
4791 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
4792 Index, PackIndex);
4793 Types.push_back(SubstParm);
4794 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
4795 }
4796
4797 return QualType(SubstParm, 0);
4798}
4799
4800/// Retrieve a
4803 unsigned Index, bool Final,
4804 const TemplateArgument &ArgPack) {
4805#ifndef NDEBUG
4806 for (const auto &P : ArgPack.pack_elements())
4807 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
4808#endif
4809
4810 llvm::FoldingSetNodeID ID;
4811 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
4812 ArgPack);
4813 void *InsertPos = nullptr;
4814 if (SubstTemplateTypeParmPackType *SubstParm =
4815 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
4816 return QualType(SubstParm, 0);
4817
4818 QualType Canon;
4819 {
4820 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack);
4821 if (!AssociatedDecl->isCanonicalDecl() ||
4822 !CanonArgPack.structurallyEquals(ArgPack)) {
4824 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack);
4825 [[maybe_unused]] const auto *Nothing =
4826 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
4827 assert(!Nothing);
4828 }
4829 }
4830
4831 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
4832 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
4833 ArgPack);
4834 Types.push_back(SubstParm);
4835 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
4836 return QualType(SubstParm, 0);
4837}
4838
4839/// Retrieve the template type parameter type for a template
4840/// parameter or parameter pack with the given depth, index, and (optionally)
4841/// name.
4842QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
4843 bool ParameterPack,
4844 TemplateTypeParmDecl *TTPDecl) const {
4845 llvm::FoldingSetNodeID ID;
4846 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4847 void *InsertPos = nullptr;
4848 TemplateTypeParmType *TypeParm
4849 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4850
4851 if (TypeParm)
4852 return QualType(TypeParm, 0);
4853
4854 if (TTPDecl) {
4855 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4856 TypeParm = new (*this, alignof(TemplateTypeParmType))
4857 TemplateTypeParmType(TTPDecl, Canon);
4858
4859 TemplateTypeParmType *TypeCheck
4860 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4861 assert(!TypeCheck && "Template type parameter canonical type broken");
4862 (void)TypeCheck;
4863 } else
4864 TypeParm = new (*this, alignof(TemplateTypeParmType))
4865 TemplateTypeParmType(Depth, Index, ParameterPack);
4866
4867 Types.push_back(TypeParm);
4868 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
4869
4870 return QualType(TypeParm, 0);
4871}
4872
4875 SourceLocation NameLoc,
4876 const TemplateArgumentListInfo &Args,
4877 QualType Underlying) const {
4878 assert(!Name.getAsDependentTemplateName() &&
4879 "No dependent template names here!");
4880 QualType TST =
4881 getTemplateSpecializationType(Name, Args.arguments(), Underlying);
4882
4887 TL.setTemplateNameLoc(NameLoc);
4888 TL.setLAngleLoc(Args.getLAngleLoc());
4889 TL.setRAngleLoc(Args.getRAngleLoc());
4890 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4891 TL.setArgLocInfo(i, Args[i].getLocInfo());
4892 return DI;
4893}
4894
4898 QualType Underlying) const {
4899 assert(!Template.getAsDependentTemplateName() &&
4900 "No dependent template names here!");
4901
4903 ArgVec.reserve(Args.size());
4904 for (const TemplateArgumentLoc &Arg : Args)
4905 ArgVec.push_back(Arg.getArgument());
4906
4907 return getTemplateSpecializationType(Template, ArgVec, Underlying);
4908}
4909
4910#ifndef NDEBUG
4912 for (const TemplateArgument &Arg : Args)
4913 if (Arg.isPackExpansion())
4914 return true;
4915
4916 return true;
4917}
4918#endif
4919
4923 QualType Underlying) const {
4924 assert(!Template.getAsDependentTemplateName() &&
4925 "No dependent template names here!");
4926 // Look through qualified template names.
4928 Template = QTN->getUnderlyingTemplate();
4929
4930 const auto *TD = Template.getAsTemplateDecl();
4931 bool IsTypeAlias = TD && TD->isTypeAlias();
4932 QualType CanonType;
4933 if (!Underlying.isNull())
4934 CanonType = getCanonicalType(Underlying);
4935 else {
4936 // We can get here with an alias template when the specialization contains
4937 // a pack expansion that does not match up with a parameter pack.
4938 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
4939 "Caller must compute aliased type");
4940 IsTypeAlias = false;
4941 CanonType = getCanonicalTemplateSpecializationType(Template, Args);
4942 }
4943
4944 // Allocate the (non-canonical) template specialization type, but don't
4945 // try to unique it: these types typically have location information that
4946 // we don't unique and don't want to lose.
4947 void *Mem = Allocate(sizeof(TemplateSpecializationType) +
4948 sizeof(TemplateArgument) * Args.size() +
4949 (IsTypeAlias ? sizeof(QualType) : 0),
4951 auto *Spec
4952 = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4953 IsTypeAlias ? Underlying : QualType());
4954
4955 Types.push_back(Spec);
4956 return QualType(Spec, 0);
4957}
4958
4960 TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4961 assert(!Template.getAsDependentTemplateName() &&
4962 "No dependent template names here!");
4963
4964 // Look through qualified template names.
4966 Template = TemplateName(QTN->getUnderlyingTemplate());
4967
4968 // Build the canonical template specialization type.
4969 TemplateName CanonTemplate = getCanonicalTemplateName(Template);
4970 bool AnyNonCanonArgs = false;
4971 auto CanonArgs =
4972 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs);
4973
4974 // Determine whether this canonical template specialization type already
4975 // exists.
4976 llvm::FoldingSetNodeID ID;
4977 TemplateSpecializationType::Profile(ID, CanonTemplate,
4978 CanonArgs, *this);
4979
4980 void *InsertPos = nullptr;
4982 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
4983
4984 if (!Spec) {
4985 // Allocate a new canonical template specialization type.
4986 void *Mem = Allocate((sizeof(TemplateSpecializationType) +
4987 sizeof(TemplateArgument) * CanonArgs.size()),
4989 Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
4990 CanonArgs,
4991 QualType(), QualType());
4992 Types.push_back(Spec);
4993 TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
4994 }
4995
4996 assert(Spec->isDependentType() &&
4997 "Non-dependent template-id type must have a canonical type");
4998 return QualType(Spec, 0);
4999}
5000
5003 QualType NamedType,
5004 TagDecl *OwnedTagDecl) const {
5005 llvm::FoldingSetNodeID ID;
5006 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
5007
5008 void *InsertPos = nullptr;
5009 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5010 if (T)
5011 return QualType(T, 0);
5012
5013 QualType Canon = NamedType;
5014 if (!Canon.isCanonical()) {
5015 Canon = getCanonicalType(NamedType);
5016 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5017 assert(!CheckT && "Elaborated canonical type broken");
5018 (void)CheckT;
5019 }
5020
5021 void *Mem =
5022 Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
5023 alignof(ElaboratedType));
5024 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
5025
5026 Types.push_back(T);
5027 ElaboratedTypes.InsertNode(T, InsertPos);
5028 return QualType(T, 0);
5029}
5030
5033 llvm::FoldingSetNodeID ID;
5034 ParenType::Profile(ID, InnerType);
5035
5036 void *InsertPos = nullptr;
5037 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5038 if (T)
5039 return QualType(T, 0);
5040
5041 QualType Canon = InnerType;
5042 if (!Canon.isCanonical()) {
5043 Canon = getCanonicalType(InnerType);
5044 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5045 assert(!CheckT && "Paren canonical type broken");
5046 (void)CheckT;
5047 }
5048
5049 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
5050 Types.push_back(T);
5051 ParenTypes.InsertNode(T, InsertPos);
5052 return QualType(T, 0);
5053}
5054
5057 const IdentifierInfo *MacroII) const {
5058 QualType Canon = UnderlyingTy;
5059 if (!Canon.isCanonical())
5060 Canon = getCanonicalType(UnderlyingTy);
5061
5062 auto *newType = new (*this, alignof(MacroQualifiedType))
5063 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
5064 Types.push_back(newType);
5065 return QualType(newType, 0);
5066}
5067
5070 const IdentifierInfo *Name,
5071 QualType Canon) const {
5072 if (Canon.isNull()) {
5074 if (CanonNNS != NNS)
5075 Canon = getDependentNameType(Keyword, CanonNNS, Name);
5076 }
5077
5078 llvm::FoldingSetNodeID ID;
5079 DependentNameType::Profile(ID, Keyword, NNS, Name);
5080
5081 void *InsertPos = nullptr;
5083 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
5084 if (T)
5085 return QualType(T, 0);
5086
5087 T = new (*this, alignof(DependentNameType))
5088 DependentNameType(Keyword, NNS, Name, Canon);
5089 Types.push_back(T);
5090 DependentNameTypes.InsertNode(T, InsertPos);
5091 return QualType(T, 0);
5092}
5093
5096 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const {
5097 // TODO: avoid this copy
5099 for (unsigned I = 0, E = Args.size(); I != E; ++I)
5100 ArgCopy.push_back(Args[I].getArgument());
5101 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy);
5102}
5103
5106 ElaboratedTypeKeyword Keyword,
5108 const IdentifierInfo *Name,
5109 ArrayRef<TemplateArgument> Args) const {
5110 assert((!NNS || NNS->isDependent()) &&
5111 "nested-name-specifier must be dependent");
5112
5113 llvm::FoldingSetNodeID ID;
5114 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
5115 Name, Args);
5116
5117 void *InsertPos = nullptr;
5119 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5120 if (T)
5121 return QualType(T, 0);
5122
5124
5125 ElaboratedTypeKeyword CanonKeyword = Keyword;
5126 if (Keyword == ElaboratedTypeKeyword::None)
5127 CanonKeyword = ElaboratedTypeKeyword::Typename;
5128
5129 bool AnyNonCanonArgs = false;
5130 auto CanonArgs =
5131 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs);
5132
5133 QualType Canon;
5134 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
5135 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
5136 Name,
5137 CanonArgs);
5138
5139 // Find the insert position again.
5140 [[maybe_unused]] auto *Nothing =
5141 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5142 assert(!Nothing && "canonical type broken");
5143 }
5144
5145 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
5146 sizeof(TemplateArgument) * Args.size()),
5148 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
5149 Name, Args, Canon);
5150 Types.push_back(T);
5151 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
5152 return QualType(T, 0);
5153}
5154
5156 TemplateArgument Arg;
5157 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
5158 QualType ArgType = getTypeDeclType(TTP);
5159 if (TTP->isParameterPack())
5160 ArgType = getPackExpansionType(ArgType, std::nullopt);
5161
5162 Arg = TemplateArgument(ArgType);
5163 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
5164 QualType T =
5165 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this);
5166 // For class NTTPs, ensure we include the 'const&