clang 20.0.0git
ASTContext.cpp
Go to the documentation of this file.
1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
14#include "ByteCode/Context.h"
15#include "CXXABI.h"
16#include "clang/AST/APValue.h"
20#include "clang/AST/Attr.h"
22#include "clang/AST/CharUnits.h"
23#include "clang/AST/Comment.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/DeclBase.h"
26#include "clang/AST/DeclCXX.h"
28#include "clang/AST/DeclObjC.h"
33#include "clang/AST/Expr.h"
34#include "clang/AST/ExprCXX.h"
37#include "clang/AST/Mangle.h"
43#include "clang/AST/Stmt.h"
47#include "clang/AST/Type.h"
48#include "clang/AST/TypeLoc.h"
56#include "clang/Basic/LLVM.h"
58#include "clang/Basic/Linkage.h"
59#include "clang/Basic/Module.h"
69#include "llvm/ADT/APFixedPoint.h"
70#include "llvm/ADT/APInt.h"
71#include "llvm/ADT/APSInt.h"
72#include "llvm/ADT/ArrayRef.h"
73#include "llvm/ADT/DenseMap.h"
74#include "llvm/ADT/DenseSet.h"
75#include "llvm/ADT/FoldingSet.h"
76#include "llvm/ADT/PointerUnion.h"
77#include "llvm/ADT/STLExtras.h"
78#include "llvm/ADT/SmallPtrSet.h"
79#include "llvm/ADT/SmallVector.h"
80#include "llvm/ADT/StringExtras.h"
81#include "llvm/ADT/StringRef.h"
82#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
83#include "llvm/Support/Capacity.h"
84#include "llvm/Support/Casting.h"
85#include "llvm/Support/Compiler.h"
86#include "llvm/Support/ErrorHandling.h"
87#include "llvm/Support/MD5.h"
88#include "llvm/Support/MathExtras.h"
89#include "llvm/Support/SipHash.h"
90#include "llvm/Support/raw_ostream.h"
91#include "llvm/TargetParser/AArch64TargetParser.h"
92#include "llvm/TargetParser/Triple.h"
93#include <algorithm>
94#include <cassert>
95#include <cstddef>
96#include <cstdint>
97#include <cstdlib>
98#include <map>
99#include <memory>
100#include <optional>
101#include <string>
102#include <tuple>
103#include <utility>
104
105using namespace clang;
106
117
118/// \returns The locations that are relevant when searching for Doc comments
119/// related to \p D.
122 assert(D);
123
124 // User can not attach documentation to implicit declarations.
125 if (D->isImplicit())
126 return {};
127
128 // User can not attach documentation to implicit instantiations.
129 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
130 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
131 return {};
132 }
133
134 if (const auto *VD = dyn_cast<VarDecl>(D)) {
135 if (VD->isStaticDataMember() &&
136 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
137 return {};
138 }
139
140 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
141 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
142 return {};
143 }
144
145 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
146 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
147 if (TSK == TSK_ImplicitInstantiation ||
148 TSK == TSK_Undeclared)
149 return {};
150 }
151
152 if (const auto *ED = dyn_cast<EnumDecl>(D)) {
153 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
154 return {};
155 }
156 if (const auto *TD = dyn_cast<TagDecl>(D)) {
157 // When tag declaration (but not definition!) is part of the
158 // decl-specifier-seq of some other declaration, it doesn't get comment
159 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
160 return {};
161 }
162 // TODO: handle comments for function parameters properly.
163 if (isa<ParmVarDecl>(D))
164 return {};
165
166 // TODO: we could look up template parameter documentation in the template
167 // documentation.
168 if (isa<TemplateTypeParmDecl>(D) ||
169 isa<NonTypeTemplateParmDecl>(D) ||
170 isa<TemplateTemplateParmDecl>(D))
171 return {};
172
174 // Find declaration location.
175 // For Objective-C declarations we generally don't expect to have multiple
176 // declarators, thus use declaration starting location as the "declaration
177 // location".
178 // For all other declarations multiple declarators are used quite frequently,
179 // so we use the location of the identifier as the "declaration location".
180 SourceLocation BaseLocation;
181 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
182 isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) ||
183 isa<ClassTemplateSpecializationDecl>(D) ||
184 // Allow association with Y across {} in `typedef struct X {} Y`.
185 isa<TypedefDecl>(D))
186 BaseLocation = D->getBeginLoc();
187 else
188 BaseLocation = D->getLocation();
189
190 if (!D->getLocation().isMacroID()) {
191 Locations.emplace_back(BaseLocation);
192 } else {
193 const auto *DeclCtx = D->getDeclContext();
194
195 // When encountering definitions generated from a macro (that are not
196 // contained by another declaration in the macro) we need to try and find
197 // the comment at the location of the expansion but if there is no comment
198 // there we should retry to see if there is a comment inside the macro as
199 // well. To this end we return first BaseLocation to first look at the
200 // expansion site, the second value is the spelling location of the
201 // beginning of the declaration defined inside the macro.
202 if (!(DeclCtx &&
203 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
204 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation));
205 }
206
207 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
208 // we don't refer to the macro argument location at the expansion site (this
209 // can happen if the name's spelling is provided via macro argument), and
210 // always to the declaration itself.
211 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc()));
212 }
213
214 return Locations;
215}
216
218 const Decl *D, const SourceLocation RepresentativeLocForDecl,
219 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
220 // If the declaration doesn't map directly to a location in a file, we
221 // can't find the comment.
222 if (RepresentativeLocForDecl.isInvalid() ||
223 !RepresentativeLocForDecl.isFileID())
224 return nullptr;
225
226 // If there are no comments anywhere, we won't find anything.
227 if (CommentsInTheFile.empty())
228 return nullptr;
229
230 // Decompose the location for the declaration and find the beginning of the
231 // file buffer.
232 const std::pair<FileID, unsigned> DeclLocDecomp =
233 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl);
234
235 // Slow path.
236 auto OffsetCommentBehindDecl =
237 CommentsInTheFile.lower_bound(DeclLocDecomp.second);
238
239 // First check whether we have a trailing comment.
240 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
241 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
242 if ((CommentBehindDecl->isDocumentation() ||
243 LangOpts.CommentOpts.ParseAllComments) &&
244 CommentBehindDecl->isTrailingComment() &&
245 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
246 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
247
248 // Check that Doxygen trailing comment comes after the declaration, starts
249 // on the same line and in the same file as the declaration.
250 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) ==
251 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first,
252 OffsetCommentBehindDecl->first)) {
253 return CommentBehindDecl;
254 }
255 }
256 }
257
258 // The comment just after the declaration was not a trailing comment.
259 // Let's look at the previous comment.
260 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
261 return nullptr;
262
263 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
264 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
265
266 // Check that we actually have a non-member Doxygen comment.
267 if (!(CommentBeforeDecl->isDocumentation() ||
268 LangOpts.CommentOpts.ParseAllComments) ||
269 CommentBeforeDecl->isTrailingComment())
270 return nullptr;
271
272 // Decompose the end of the comment.
273 const unsigned CommentEndOffset =
274 Comments.getCommentEndOffset(CommentBeforeDecl);
275
276 // Get the corresponding buffer.
277 bool Invalid = false;
278 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
279 &Invalid).data();
280 if (Invalid)
281 return nullptr;
282
283 // Extract text between the comment and declaration.
284 StringRef Text(Buffer + CommentEndOffset,
285 DeclLocDecomp.second - CommentEndOffset);
286
287 // There should be no other declarations or preprocessor directives between
288 // comment and declaration.
289 if (Text.find_last_of(";{}#@") != StringRef::npos)
290 return nullptr;
291
292 return CommentBeforeDecl;
293}
294
296 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
297
298 for (const auto DeclLoc : DeclLocs) {
299 // If the declaration doesn't map directly to a location in a file, we
300 // can't find the comment.
301 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
302 continue;
303
306 CommentsLoaded = true;
307 }
308
309 if (Comments.empty())
310 continue;
311
312 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
313 if (!File.isValid())
314 continue;
315
316 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
317 if (!CommentsInThisFile || CommentsInThisFile->empty())
318 continue;
319
320 if (RawComment *Comment =
321 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile))
322 return Comment;
323 }
324
325 return nullptr;
326}
327
329 assert(LangOpts.RetainCommentsFromSystemHeaders ||
330 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
331 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
332}
333
334/// If we have a 'templated' declaration for a template, adjust 'D' to
335/// refer to the actual template.
336/// If we have an implicit instantiation, adjust 'D' to refer to template.
337static const Decl &adjustDeclToTemplate(const Decl &D) {
338 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) {
339 // Is this function declaration part of a function template?
340 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
341 return *FTD;
342
343 // Nothing to do if function is not an implicit instantiation.
344 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
345 return D;
346
347 // Function is an implicit instantiation of a function template?
348 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
349 return *FTD;
350
351 // Function is instantiated from a member definition of a class template?
352 if (const FunctionDecl *MemberDecl =
354 return *MemberDecl;
355
356 return D;
357 }
358 if (const auto *VD = dyn_cast<VarDecl>(&D)) {
359 // Static data member is instantiated from a member definition of a class
360 // template?
361 if (VD->isStaticDataMember())
362 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
363 return *MemberDecl;
364
365 return D;
366 }
367 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) {
368 // Is this class declaration part of a class template?
369 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
370 return *CTD;
371
372 // Class is an implicit instantiation of a class template or partial
373 // specialization?
374 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
375 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
376 return D;
377 llvm::PointerUnion<ClassTemplateDecl *,
380 return PU.is<ClassTemplateDecl *>()
381 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>())
382 : *static_cast<const Decl *>(
384 }
385
386 // Class is instantiated from a member definition of a class template?
387 if (const MemberSpecializationInfo *Info =
388 CRD->getMemberSpecializationInfo())
389 return *Info->getInstantiatedFrom();
390
391 return D;
392 }
393 if (const auto *ED = dyn_cast<EnumDecl>(&D)) {
394 // Enum is instantiated from a member definition of a class template?
395 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
396 return *MemberDecl;
397
398 return D;
399 }
400 // FIXME: Adjust alias templates?
401 return D;
402}
403
405 const Decl *D,
406 const Decl **OriginalDecl) const {
407 if (!D) {
408 if (OriginalDecl)
409 OriginalDecl = nullptr;
410 return nullptr;
411 }
412
414
415 // Any comment directly attached to D?
416 {
417 auto DeclComment = DeclRawComments.find(D);
418 if (DeclComment != DeclRawComments.end()) {
419 if (OriginalDecl)
420 *OriginalDecl = D;
421 return DeclComment->second;
422 }
423 }
424
425 // Any comment attached to any redeclaration of D?
426 const Decl *CanonicalD = D->getCanonicalDecl();
427 if (!CanonicalD)
428 return nullptr;
429
430 {
431 auto RedeclComment = RedeclChainComments.find(CanonicalD);
432 if (RedeclComment != RedeclChainComments.end()) {
433 if (OriginalDecl)
434 *OriginalDecl = RedeclComment->second;
435 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second);
436 assert(CommentAtRedecl != DeclRawComments.end() &&
437 "This decl is supposed to have comment attached.");
438 return CommentAtRedecl->second;
439 }
440 }
441
442 // Any redeclarations of D that we haven't checked for comments yet?
443 // We can't use DenseMap::iterator directly since it'd get invalid.
444 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
445 return CommentlessRedeclChains.lookup(CanonicalD);
446 }();
447
448 for (const auto Redecl : D->redecls()) {
449 assert(Redecl);
450 // Skip all redeclarations that have been checked previously.
451 if (LastCheckedRedecl) {
452 if (LastCheckedRedecl == Redecl) {
453 LastCheckedRedecl = nullptr;
454 }
455 continue;
456 }
457 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl);
458 if (RedeclComment) {
459 cacheRawCommentForDecl(*Redecl, *RedeclComment);
460 if (OriginalDecl)
461 *OriginalDecl = Redecl;
462 return RedeclComment;
463 }
464 CommentlessRedeclChains[CanonicalD] = Redecl;
465 }
466
467 if (OriginalDecl)
468 *OriginalDecl = nullptr;
469 return nullptr;
470}
471
473 const RawComment &Comment) const {
474 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
475 DeclRawComments.try_emplace(&OriginalD, &Comment);
476 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
477 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD);
478 CommentlessRedeclChains.erase(CanonicalDecl);
479}
480
481static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
483 const DeclContext *DC = ObjCMethod->getDeclContext();
484 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
485 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
486 if (!ID)
487 return;
488 // Add redeclared method here.
489 for (const auto *Ext : ID->known_extensions()) {
490 if (ObjCMethodDecl *RedeclaredMethod =
491 Ext->getMethod(ObjCMethod->getSelector(),
492 ObjCMethod->isInstanceMethod()))
493 Redeclared.push_back(RedeclaredMethod);
494 }
495 }
496}
497
499 const Preprocessor *PP) {
500 if (Comments.empty() || Decls.empty())
501 return;
502
503 FileID File;
504 for (const Decl *D : Decls) {
505 if (D->isInvalidDecl())
506 continue;
507
510 if (Loc.isValid()) {
511 // See if there are any new comments that are not attached to a decl.
512 // The location doesn't have to be precise - we care only about the file.
513 File = SourceMgr.getDecomposedLoc(Loc).first;
514 break;
515 }
516 }
517
518 if (File.isInvalid())
519 return;
520
521 auto CommentsInThisFile = Comments.getCommentsInFile(File);
522 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
523 CommentsInThisFile->rbegin()->second->isAttached())
524 return;
525
526 // There is at least one comment not attached to a decl.
527 // Maybe it should be attached to one of Decls?
528 //
529 // Note that this way we pick up not only comments that precede the
530 // declaration, but also comments that *follow* the declaration -- thanks to
531 // the lookahead in the lexer: we've consumed the semicolon and looked
532 // ahead through comments.
533 for (const Decl *D : Decls) {
534 assert(D);
535 if (D->isInvalidDecl())
536 continue;
537
539
540 if (DeclRawComments.count(D) > 0)
541 continue;
542
543 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
544
545 for (const auto DeclLoc : DeclLocs) {
546 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
547 continue;
548
549 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
550 D, DeclLoc, *CommentsInThisFile)) {
551 cacheRawCommentForDecl(*D, *DocComment);
552 comments::FullComment *FC = DocComment->parse(*this, PP, D);
554 break;
555 }
556 }
557 }
558}
559
561 const Decl *D) const {
562 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
563 ThisDeclInfo->CommentDecl = D;
564 ThisDeclInfo->IsFilled = false;
565 ThisDeclInfo->fill();
566 ThisDeclInfo->CommentDecl = FC->getDecl();
567 if (!ThisDeclInfo->TemplateParameters)
568 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
570 new (*this) comments::FullComment(FC->getBlocks(),
571 ThisDeclInfo);
572 return CFC;
573}
574
577 return RC ? RC->parse(*this, nullptr, D) : nullptr;
578}
579
581 const Decl *D,
582 const Preprocessor *PP) const {
583 if (!D || D->isInvalidDecl())
584 return nullptr;
586
587 const Decl *Canonical = D->getCanonicalDecl();
588 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
589 ParsedComments.find(Canonical);
590
591 if (Pos != ParsedComments.end()) {
592 if (Canonical != D) {
593 comments::FullComment *FC = Pos->second;
595 return CFC;
596 }
597 return Pos->second;
598 }
599
600 const Decl *OriginalDecl = nullptr;
601
602 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
603 if (!RC) {
604 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
606 const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
607 if (OMD && OMD->isPropertyAccessor())
608 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
609 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
610 return cloneFullComment(FC, D);
611 if (OMD)
612 addRedeclaredMethods(OMD, Overridden);
613 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
614 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
615 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
616 return cloneFullComment(FC, D);
617 }
618 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
619 // Attach any tag type's documentation to its typedef if latter
620 // does not have one of its own.
621 QualType QT = TD->getUnderlyingType();
622 if (const auto *TT = QT->getAs<TagType>())
623 if (const Decl *TD = TT->getDecl())
625 return cloneFullComment(FC, D);
626 }
627 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
628 while (IC->getSuperClass()) {
629 IC = IC->getSuperClass();
631 return cloneFullComment(FC, D);
632 }
633 }
634 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
635 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
637 return cloneFullComment(FC, D);
638 }
639 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
640 if (!(RD = RD->getDefinition()))
641 return nullptr;
642 // Check non-virtual bases.
643 for (const auto &I : RD->bases()) {
644 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
645 continue;
646 QualType Ty = I.getType();
647 if (Ty.isNull())
648 continue;
650 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
651 continue;
652
654 return cloneFullComment(FC, D);
655 }
656 }
657 // Check virtual bases.
658 for (const auto &I : RD->vbases()) {
659 if (I.getAccessSpecifier() != AS_public)
660 continue;
661 QualType Ty = I.getType();
662 if (Ty.isNull())
663 continue;
664 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
665 if (!(VirtualBase= VirtualBase->getDefinition()))
666 continue;
668 return cloneFullComment(FC, D);
669 }
670 }
671 }
672 return nullptr;
673 }
674
675 // If the RawComment was attached to other redeclaration of this Decl, we
676 // should parse the comment in context of that other Decl. This is important
677 // because comments can contain references to parameter names which can be
678 // different across redeclarations.
679 if (D != OriginalDecl && OriginalDecl)
680 return getCommentForDecl(OriginalDecl, PP);
681
682 comments::FullComment *FC = RC->parse(*this, PP, D);
683 ParsedComments[Canonical] = FC;
684 return FC;
685}
686
687void
688ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
689 const ASTContext &C,
691 ID.AddInteger(Parm->getDepth());
692 ID.AddInteger(Parm->getPosition());
693 ID.AddBoolean(Parm->isParameterPack());
694
696 ID.AddInteger(Params->size());
698 PEnd = Params->end();
699 P != PEnd; ++P) {
700 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
701 ID.AddInteger(0);
702 ID.AddBoolean(TTP->isParameterPack());
703 if (TTP->isExpandedParameterPack()) {
704 ID.AddBoolean(true);
705 ID.AddInteger(TTP->getNumExpansionParameters());
706 } else
707 ID.AddBoolean(false);
708 continue;
709 }
710
711 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
712 ID.AddInteger(1);
713 ID.AddBoolean(NTTP->isParameterPack());
714 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType()))
715 .getAsOpaquePtr());
716 if (NTTP->isExpandedParameterPack()) {
717 ID.AddBoolean(true);
718 ID.AddInteger(NTTP->getNumExpansionTypes());
719 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
720 QualType T = NTTP->getExpansionType(I);
721 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
722 }
723 } else
724 ID.AddBoolean(false);
725 continue;
726 }
727
728 auto *TTP = cast<TemplateTemplateParmDecl>(*P);
729 ID.AddInteger(2);
730 Profile(ID, C, TTP);
731 }
732}
733
735ASTContext::getCanonicalTemplateTemplateParmDecl(
736 TemplateTemplateParmDecl *TTP) const {
737 // Check if we already have a canonical template template parameter.
738 llvm::FoldingSetNodeID ID;
739 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP);
740 void *InsertPos = nullptr;
741 CanonicalTemplateTemplateParm *Canonical
742 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
743 if (Canonical)
744 return Canonical->getParam();
745
746 // Build a canonical template parameter list.
748 SmallVector<NamedDecl *, 4> CanonParams;
749 CanonParams.reserve(Params->size());
751 PEnd = Params->end();
752 P != PEnd; ++P) {
753 // Note that, per C++20 [temp.over.link]/6, when determining whether
754 // template-parameters are equivalent, constraints are ignored.
755 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
758 TTP->getDepth(), TTP->getIndex(), nullptr, false,
759 TTP->isParameterPack(), /*HasTypeConstraint=*/false,
761 ? std::optional<unsigned>(TTP->getNumExpansionParameters())
762 : std::nullopt);
763 CanonParams.push_back(NewTTP);
764 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
768 if (NTTP->isExpandedParameterPack()) {
769 SmallVector<QualType, 2> ExpandedTypes;
771 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
772 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
773 ExpandedTInfos.push_back(
774 getTrivialTypeSourceInfo(ExpandedTypes.back()));
775 }
776
780 NTTP->getDepth(),
781 NTTP->getPosition(), nullptr,
782 T,
783 TInfo,
784 ExpandedTypes,
785 ExpandedTInfos);
786 } else {
790 NTTP->getDepth(),
791 NTTP->getPosition(), nullptr,
792 T,
793 NTTP->isParameterPack(),
794 TInfo);
795 }
796 CanonParams.push_back(Param);
797 } else
798 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
799 cast<TemplateTemplateParmDecl>(*P)));
800 }
801
804 TTP->getPosition(), TTP->isParameterPack(), nullptr, /*Typename=*/false,
806 CanonParams, SourceLocation(),
807 /*RequiresClause=*/nullptr));
808
809 // Get the new insert position for the node we care about.
810 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
811 assert(!Canonical && "Shouldn't be in the map!");
812 (void)Canonical;
813
814 // Create the canonical template template parameter entry.
815 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
816 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
817 return CanonTTP;
818}
819
821 auto Kind = getTargetInfo().getCXXABI().getKind();
822 return getLangOpts().CXXABI.value_or(Kind);
823}
824
825CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
826 if (!LangOpts.CPlusPlus) return nullptr;
827
828 switch (getCXXABIKind()) {
829 case TargetCXXABI::AppleARM64:
830 case TargetCXXABI::Fuchsia:
831 case TargetCXXABI::GenericARM: // Same as Itanium at this level
832 case TargetCXXABI::iOS:
833 case TargetCXXABI::WatchOS:
834 case TargetCXXABI::GenericAArch64:
835 case TargetCXXABI::GenericMIPS:
836 case TargetCXXABI::GenericItanium:
837 case TargetCXXABI::WebAssembly:
838 case TargetCXXABI::XL:
839 return CreateItaniumCXXABI(*this);
840 case TargetCXXABI::Microsoft:
841 return CreateMicrosoftCXXABI(*this);
842 }
843 llvm_unreachable("Invalid CXXABI type!");
844}
845
847 if (!InterpContext) {
848 InterpContext.reset(new interp::Context(*this));
849 }
850 return *InterpContext.get();
851}
852
854 if (!ParentMapCtx)
855 ParentMapCtx.reset(new ParentMapContext(*this));
856 return *ParentMapCtx.get();
857}
858
860 const LangOptions &LangOpts) {
861 switch (LangOpts.getAddressSpaceMapMangling()) {
863 return TI.useAddressSpaceMapMangling();
865 return true;
867 return false;
868 }
869 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
870}
871
873 IdentifierTable &idents, SelectorTable &sels,
874 Builtin::Context &builtins, TranslationUnitKind TUKind)
875 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
876 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
877 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
878 DependentSizedMatrixTypes(this_()),
879 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
880 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
881 TemplateSpecializationTypes(this_()),
882 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
883 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
884 ArrayParameterTypes(this_()), CanonTemplateTemplateParms(this_()),
885 SourceMgr(SM), LangOpts(LOpts),
886 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
887 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
888 LangOpts.XRayNeverInstrumentFiles,
889 LangOpts.XRayAttrListFiles, SM)),
890 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
891 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
892 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
893 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
894 CompCategories(this_()), LastSDM(nullptr, 0) {
896}
897
899 // Release the DenseMaps associated with DeclContext objects.
900 // FIXME: Is this the ideal solution?
901 ReleaseDeclContextMaps();
902
903 // Call all of the deallocation functions on all of their targets.
904 for (auto &Pair : Deallocations)
905 (Pair.first)(Pair.second);
906 Deallocations.clear();
907
908 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
909 // because they can contain DenseMaps.
910 for (llvm::DenseMap<const ObjCContainerDecl*,
911 const ASTRecordLayout*>::iterator
912 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
913 // Increment in loop to prevent using deallocated memory.
914 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
915 R->Destroy(*this);
916 ObjCLayouts.clear();
917
918 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
919 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
920 // Increment in loop to prevent using deallocated memory.
921 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
922 R->Destroy(*this);
923 }
924 ASTRecordLayouts.clear();
925
926 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
927 AEnd = DeclAttrs.end();
928 A != AEnd; ++A)
929 A->second->~AttrVec();
930 DeclAttrs.clear();
931
932 for (const auto &Value : ModuleInitializers)
933 Value.second->~PerModuleInitializers();
934 ModuleInitializers.clear();
935}
936
938
939void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
940 TraversalScope = TopLevelDecls;
942}
943
944void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
945 Deallocations.push_back({Callback, Data});
946}
947
948void
950 ExternalSource = std::move(Source);
951}
952
954 llvm::errs() << "\n*** AST Context Stats:\n";
955 llvm::errs() << " " << Types.size() << " types total.\n";
956
957 unsigned counts[] = {
958#define TYPE(Name, Parent) 0,
959#define ABSTRACT_TYPE(Name, Parent)
960#include "clang/AST/TypeNodes.inc"
961 0 // Extra
962 };
963
964 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
965 Type *T = Types[i];
966 counts[(unsigned)T->getTypeClass()]++;
967 }
968
969 unsigned Idx = 0;
970 unsigned TotalBytes = 0;
971#define TYPE(Name, Parent) \
972 if (counts[Idx]) \
973 llvm::errs() << " " << counts[Idx] << " " << #Name \
974 << " types, " << sizeof(Name##Type) << " each " \
975 << "(" << counts[Idx] * sizeof(Name##Type) \
976 << " bytes)\n"; \
977 TotalBytes += counts[Idx] * sizeof(Name##Type); \
978 ++Idx;
979#define ABSTRACT_TYPE(Name, Parent)
980#include "clang/AST/TypeNodes.inc"
981
982 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
983
984 // Implicit special member functions.
985 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
987 << " implicit default constructors created\n";
988 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
990 << " implicit copy constructors created\n";
992 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
994 << " implicit move constructors created\n";
995 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
997 << " implicit copy assignment operators created\n";
999 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
1001 << " implicit move assignment operators created\n";
1002 llvm::errs() << NumImplicitDestructorsDeclared << "/"
1004 << " implicit destructors created\n";
1005
1006 if (ExternalSource) {
1007 llvm::errs() << "\n";
1009 }
1010
1011 BumpAlloc.PrintStats();
1012}
1013
1015 bool NotifyListeners) {
1016 if (NotifyListeners)
1017 if (auto *Listener = getASTMutationListener())
1019
1020 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
1021}
1022
1024 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1025 if (It == MergedDefModules.end())
1026 return;
1027
1028 auto &Merged = It->second;
1029 llvm::DenseSet<Module*> Found;
1030 for (Module *&M : Merged)
1031 if (!Found.insert(M).second)
1032 M = nullptr;
1033 llvm::erase(Merged, nullptr);
1034}
1035
1038 auto MergedIt =
1039 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
1040 if (MergedIt == MergedDefModules.end())
1041 return std::nullopt;
1042 return MergedIt->second;
1043}
1044
1045void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1046 if (LazyInitializers.empty())
1047 return;
1048
1049 auto *Source = Ctx.getExternalSource();
1050 assert(Source && "lazy initializers but no external source");
1051
1052 auto LazyInits = std::move(LazyInitializers);
1053 LazyInitializers.clear();
1054
1055 for (auto ID : LazyInits)
1056 Initializers.push_back(Source->GetExternalDecl(ID));
1057
1058 assert(LazyInitializers.empty() &&
1059 "GetExternalDecl for lazy module initializer added more inits");
1060}
1061
1063 // One special case: if we add a module initializer that imports another
1064 // module, and that module's only initializer is an ImportDecl, simplify.
1065 if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1066 auto It = ModuleInitializers.find(ID->getImportedModule());
1067
1068 // Maybe the ImportDecl does nothing at all. (Common case.)
1069 if (It == ModuleInitializers.end())
1070 return;
1071
1072 // Maybe the ImportDecl only imports another ImportDecl.
1073 auto &Imported = *It->second;
1074 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1075 Imported.resolve(*this);
1076 auto *OnlyDecl = Imported.Initializers.front();
1077 if (isa<ImportDecl>(OnlyDecl))
1078 D = OnlyDecl;
1079 }
1080 }
1081
1082 auto *&Inits = ModuleInitializers[M];
1083 if (!Inits)
1084 Inits = new (*this) PerModuleInitializers;
1085 Inits->Initializers.push_back(D);
1086}
1087
1090 auto *&Inits = ModuleInitializers[M];
1091 if (!Inits)
1092 Inits = new (*this) PerModuleInitializers;
1093 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1094 IDs.begin(), IDs.end());
1095}
1096
1098 auto It = ModuleInitializers.find(M);
1099 if (It == ModuleInitializers.end())
1100 return std::nullopt;
1101
1102 auto *Inits = It->second;
1103 Inits->resolve(*this);
1104 return Inits->Initializers;
1105}
1106
1108 assert(M->isNamedModule());
1109 assert(!CurrentCXXNamedModule &&
1110 "We should set named module for ASTContext for only once");
1111 CurrentCXXNamedModule = M;
1112}
1113
1114bool ASTContext::isInSameModule(const Module *M1, const Module *M2) {
1115 if (!M1 != !M2)
1116 return false;
1117
1118 /// Get the representative module for M. The representative module is the
1119 /// first module unit for a specific primary module name. So that the module
1120 /// units have the same representative module belongs to the same module.
1121 ///
1122 /// The process is helpful to reduce the expensive string operations.
1123 auto GetRepresentativeModule = [this](const Module *M) {
1124 auto Iter = SameModuleLookupSet.find(M);
1125 if (Iter != SameModuleLookupSet.end())
1126 return Iter->second;
1127
1128 const Module *RepresentativeModule =
1129 PrimaryModuleNameMap.try_emplace(M->getPrimaryModuleInterfaceName(), M)
1130 .first->second;
1131 SameModuleLookupSet[M] = RepresentativeModule;
1132 return RepresentativeModule;
1133 };
1134
1135 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
1136 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
1137}
1138
1140 if (!ExternCContext)
1141 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1142
1143 return ExternCContext;
1144}
1145
1148 const IdentifierInfo *II) const {
1149 auto *BuiltinTemplate =
1151 BuiltinTemplate->setImplicit();
1152 getTranslationUnitDecl()->addDecl(BuiltinTemplate);
1153
1154 return BuiltinTemplate;
1155}
1156
1159 if (!MakeIntegerSeqDecl)
1162 return MakeIntegerSeqDecl;
1163}
1164
1167 if (!TypePackElementDecl)
1170 return TypePackElementDecl;
1171}
1172
1174 RecordDecl::TagKind TK) const {
1176 RecordDecl *NewDecl;
1177 if (getLangOpts().CPlusPlus)
1178 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1179 Loc, &Idents.get(Name));
1180 else
1181 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1182 &Idents.get(Name));
1183 NewDecl->setImplicit();
1184 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1185 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1186 return NewDecl;
1187}
1188
1190 StringRef Name) const {
1193 const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1194 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1195 NewDecl->setImplicit();
1196 return NewDecl;
1197}
1198
1200 if (!Int128Decl)
1201 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1202 return Int128Decl;
1203}
1204
1206 if (!UInt128Decl)
1207 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1208 return UInt128Decl;
1209}
1210
1211void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1212 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1214 Types.push_back(Ty);
1215}
1216
1218 const TargetInfo *AuxTarget) {
1219 assert((!this->Target || this->Target == &Target) &&
1220 "Incorrect target reinitialization");
1221 assert(VoidTy.isNull() && "Context reinitialized?");
1222
1223 this->Target = &Target;
1224 this->AuxTarget = AuxTarget;
1225
1226 ABI.reset(createCXXABI(Target));
1227 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1228
1229 // C99 6.2.5p19.
1230 InitBuiltinType(VoidTy, BuiltinType::Void);
1231
1232 // C99 6.2.5p2.
1233 InitBuiltinType(BoolTy, BuiltinType::Bool);
1234 // C99 6.2.5p3.
1235 if (LangOpts.CharIsSigned)
1236 InitBuiltinType(CharTy, BuiltinType::Char_S);
1237 else
1238 InitBuiltinType(CharTy, BuiltinType::Char_U);
1239 // C99 6.2.5p4.
1240 InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1241 InitBuiltinType(ShortTy, BuiltinType::Short);
1242 InitBuiltinType(IntTy, BuiltinType::Int);
1243 InitBuiltinType(LongTy, BuiltinType::Long);
1244 InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1245
1246 // C99 6.2.5p6.
1247 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1248 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1249 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1250 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1251 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1252
1253 // C99 6.2.5p10.
1254 InitBuiltinType(FloatTy, BuiltinType::Float);
1255 InitBuiltinType(DoubleTy, BuiltinType::Double);
1256 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1257
1258 // GNU extension, __float128 for IEEE quadruple precision
1259 InitBuiltinType(Float128Ty, BuiltinType::Float128);
1260
1261 // __ibm128 for IBM extended precision
1262 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128);
1263
1264 // C11 extension ISO/IEC TS 18661-3
1265 InitBuiltinType(Float16Ty, BuiltinType::Float16);
1266
1267 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1268 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1269 InitBuiltinType(AccumTy, BuiltinType::Accum);
1270 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1271 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1272 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1273 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1274 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1275 InitBuiltinType(FractTy, BuiltinType::Fract);
1276 InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1277 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1278 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1279 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1280 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1281 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1282 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1283 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1284 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1285 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1286 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1287 InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1288 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1289 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1290 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1291 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1292
1293 // GNU extension, 128-bit integers.
1294 InitBuiltinType(Int128Ty, BuiltinType::Int128);
1295 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1296
1297 // C++ 3.9.1p5
1298 if (TargetInfo::isTypeSigned(Target.getWCharType()))
1299 InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1300 else // -fshort-wchar makes wchar_t be unsigned.
1301 InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1302 if (LangOpts.CPlusPlus && LangOpts.WChar)
1304 else {
1305 // C99 (or C++ using -fno-wchar).
1306 WideCharTy = getFromTargetType(Target.getWCharType());
1307 }
1308
1309 WIntTy = getFromTargetType(Target.getWIntType());
1310
1311 // C++20 (proposed)
1312 InitBuiltinType(Char8Ty, BuiltinType::Char8);
1313
1314 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1315 InitBuiltinType(Char16Ty, BuiltinType::Char16);
1316 else // C99
1317 Char16Ty = getFromTargetType(Target.getChar16Type());
1318
1319 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1320 InitBuiltinType(Char32Ty, BuiltinType::Char32);
1321 else // C99
1322 Char32Ty = getFromTargetType(Target.getChar32Type());
1323
1324 // Placeholder type for type-dependent expressions whose type is
1325 // completely unknown. No code should ever check a type against
1326 // DependentTy and users should never see it; however, it is here to
1327 // help diagnose failures to properly check for type-dependent
1328 // expressions.
1329 InitBuiltinType(DependentTy, BuiltinType::Dependent);
1330
1331 // Placeholder type for functions.
1332 InitBuiltinType(OverloadTy, BuiltinType::Overload);
1333
1334 // Placeholder type for bound members.
1335 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1336
1337 // Placeholder type for unresolved templates.
1338 InitBuiltinType(UnresolvedTemplateTy, BuiltinType::UnresolvedTemplate);
1339
1340 // Placeholder type for pseudo-objects.
1341 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1342
1343 // "any" type; useful for debugger-like clients.
1344 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1345
1346 // Placeholder type for unbridged ARC casts.
1347 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1348
1349 // Placeholder type for builtin functions.
1350 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1351
1352 // Placeholder type for OMP array sections.
1353 if (LangOpts.OpenMP) {
1354 InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection);
1355 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
1356 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
1357 }
1358 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
1359 // don't bother, as we're just using the same type as OMP.
1360 if (LangOpts.OpenACC && !LangOpts.OpenMP) {
1361 InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection);
1362 }
1363 if (LangOpts.MatrixTypes)
1364 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
1365
1366 // Builtin types for 'id', 'Class', and 'SEL'.
1367 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1368 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1369 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1370
1371 if (LangOpts.OpenCL) {
1372#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1373 InitBuiltinType(SingletonId, BuiltinType::Id);
1374#include "clang/Basic/OpenCLImageTypes.def"
1375
1376 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1377 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1378 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1379 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1380 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1381
1382#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1383 InitBuiltinType(Id##Ty, BuiltinType::Id);
1384#include "clang/Basic/OpenCLExtensionTypes.def"
1385 }
1386
1387 if (LangOpts.HLSL) {
1388#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
1389 InitBuiltinType(SingletonId, BuiltinType::Id);
1390#include "clang/Basic/HLSLIntangibleTypes.def"
1391 }
1392
1393 if (Target.hasAArch64SVETypes() ||
1394 (AuxTarget && AuxTarget->hasAArch64SVETypes())) {
1395#define SVE_TYPE(Name, Id, SingletonId) \
1396 InitBuiltinType(SingletonId, BuiltinType::Id);
1397#include "clang/Basic/AArch64SVEACLETypes.def"
1398 }
1399
1400 if (Target.getTriple().isPPC64()) {
1401#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1402 InitBuiltinType(Id##Ty, BuiltinType::Id);
1403#include "clang/Basic/PPCTypes.def"
1404#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1405 InitBuiltinType(Id##Ty, BuiltinType::Id);
1406#include "clang/Basic/PPCTypes.def"
1407 }
1408
1409 if (Target.hasRISCVVTypes()) {
1410#define RVV_TYPE(Name, Id, SingletonId) \
1411 InitBuiltinType(SingletonId, BuiltinType::Id);
1412#include "clang/Basic/RISCVVTypes.def"
1413 }
1414
1415 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) {
1416#define WASM_TYPE(Name, Id, SingletonId) \
1417 InitBuiltinType(SingletonId, BuiltinType::Id);
1418#include "clang/Basic/WebAssemblyReferenceTypes.def"
1419 }
1420
1421 if (Target.getTriple().isAMDGPU() ||
1422 (AuxTarget && AuxTarget->getTriple().isAMDGPU())) {
1423#define AMDGPU_TYPE(Name, Id, SingletonId) \
1424 InitBuiltinType(SingletonId, BuiltinType::Id);
1425#include "clang/Basic/AMDGPUTypes.def"
1426 }
1427
1428 // Builtin type for __objc_yes and __objc_no
1429 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1431
1432 ObjCConstantStringType = QualType();
1433
1434 ObjCSuperType = QualType();
1435
1436 // void * type
1437 if (LangOpts.OpenCLGenericAddressSpace) {
1438 auto Q = VoidTy.getQualifiers();
1442 } else {
1444 }
1445
1446 // nullptr type (C++0x 2.14.7)
1447 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1448
1449 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1450 InitBuiltinType(HalfTy, BuiltinType::Half);
1451
1452 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
1453
1454 // Builtin type used to help define __builtin_va_list.
1455 VaListTagDecl = nullptr;
1456
1457 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1458 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1461 }
1462}
1463
1465 return SourceMgr.getDiagnostics();
1466}
1467
1469 AttrVec *&Result = DeclAttrs[D];
1470 if (!Result) {
1471 void *Mem = Allocate(sizeof(AttrVec));
1472 Result = new (Mem) AttrVec;
1473 }
1474
1475 return *Result;
1476}
1477
1478/// Erase the attributes corresponding to the given declaration.
1480 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1481 if (Pos != DeclAttrs.end()) {
1482 Pos->second->~AttrVec();
1483 DeclAttrs.erase(Pos);
1484 }
1485}
1486
1487// FIXME: Remove ?
1490 assert(Var->isStaticDataMember() && "Not a static data member");
1492 .dyn_cast<MemberSpecializationInfo *>();
1493}
1494
1497 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1498 TemplateOrInstantiation.find(Var);
1499 if (Pos == TemplateOrInstantiation.end())
1500 return {};
1501
1502 return Pos->second;
1503}
1504
1505void
1508 SourceLocation PointOfInstantiation) {
1509 assert(Inst->isStaticDataMember() && "Not a static data member");
1510 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1512 Tmpl, TSK, PointOfInstantiation));
1513}
1514
1515void
1518 assert(!TemplateOrInstantiation[Inst] &&
1519 "Already noted what the variable was instantiated from");
1520 TemplateOrInstantiation[Inst] = TSI;
1521}
1522
1523NamedDecl *
1525 return InstantiatedFromUsingDecl.lookup(UUD);
1526}
1527
1528void
1530 assert((isa<UsingDecl>(Pattern) ||
1531 isa<UnresolvedUsingValueDecl>(Pattern) ||
1532 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1533 "pattern decl is not a using decl");
1534 assert((isa<UsingDecl>(Inst) ||
1535 isa<UnresolvedUsingValueDecl>(Inst) ||
1536 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1537 "instantiation did not produce a using decl");
1538 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1539 InstantiatedFromUsingDecl[Inst] = Pattern;
1540}
1541
1544 return InstantiatedFromUsingEnumDecl.lookup(UUD);
1545}
1546
1548 UsingEnumDecl *Pattern) {
1549 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1550 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1551}
1552
1555 return InstantiatedFromUsingShadowDecl.lookup(Inst);
1556}
1557
1558void
1560 UsingShadowDecl *Pattern) {
1561 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1562 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1563}
1564
1566 return InstantiatedFromUnnamedFieldDecl.lookup(Field);
1567}
1568
1570 FieldDecl *Tmpl) {
1571 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1572 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1573 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1574 "Already noted what unnamed field was instantiated from");
1575
1576 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1577}
1578
1581 return overridden_methods(Method).begin();
1582}
1583
1586 return overridden_methods(Method).end();
1587}
1588
1589unsigned
1591 auto Range = overridden_methods(Method);
1592 return Range.end() - Range.begin();
1593}
1594
1597 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1598 OverriddenMethods.find(Method->getCanonicalDecl());
1599 if (Pos == OverriddenMethods.end())
1600 return overridden_method_range(nullptr, nullptr);
1601 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1602}
1603
1605 const CXXMethodDecl *Overridden) {
1606 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1607 OverriddenMethods[Method].push_back(Overridden);
1608}
1609
1611 const NamedDecl *D,
1612 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1613 assert(D);
1614
1615 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1616 Overridden.append(overridden_methods_begin(CXXMethod),
1617 overridden_methods_end(CXXMethod));
1618 return;
1619 }
1620
1621 const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1622 if (!Method)
1623 return;
1624
1626 Method->getOverriddenMethods(OverDecls);
1627 Overridden.append(OverDecls.begin(), OverDecls.end());
1628}
1629
1631 assert(!Import->getNextLocalImport() &&
1632 "Import declaration already in the chain");
1633 assert(!Import->isFromASTFile() && "Non-local import declaration");
1634 if (!FirstLocalImport) {
1635 FirstLocalImport = Import;
1636 LastLocalImport = Import;
1637 return;
1638 }
1639
1640 LastLocalImport->setNextLocalImport(Import);
1641 LastLocalImport = Import;
1642}
1643
1644//===----------------------------------------------------------------------===//
1645// Type Sizing and Analysis
1646//===----------------------------------------------------------------------===//
1647
1648/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1649/// scalar floating point type.
1650const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1651 switch (T->castAs<BuiltinType>()->getKind()) {
1652 default:
1653 llvm_unreachable("Not a floating point type!");
1654 case BuiltinType::BFloat16:
1655 return Target->getBFloat16Format();
1656 case BuiltinType::Float16:
1657 return Target->getHalfFormat();
1658 case BuiltinType::Half:
1659 return Target->getHalfFormat();
1660 case BuiltinType::Float: return Target->getFloatFormat();
1661 case BuiltinType::Double: return Target->getDoubleFormat();
1662 case BuiltinType::Ibm128:
1663 return Target->getIbm128Format();
1664 case BuiltinType::LongDouble:
1665 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1666 return AuxTarget->getLongDoubleFormat();
1667 return Target->getLongDoubleFormat();
1668 case BuiltinType::Float128:
1669 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1670 return AuxTarget->getFloat128Format();
1671 return Target->getFloat128Format();
1672 }
1673}
1674
1675CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1676 unsigned Align = Target->getCharWidth();
1677
1678 const unsigned AlignFromAttr = D->getMaxAlignment();
1679 if (AlignFromAttr)
1680 Align = AlignFromAttr;
1681
1682 // __attribute__((aligned)) can increase or decrease alignment
1683 // *except* on a struct or struct member, where it only increases
1684 // alignment unless 'packed' is also specified.
1685 //
1686 // It is an error for alignas to decrease alignment, so we can
1687 // ignore that possibility; Sema should diagnose it.
1688 bool UseAlignAttrOnly;
1689 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D))
1690 UseAlignAttrOnly =
1691 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1692 else
1693 UseAlignAttrOnly = AlignFromAttr != 0;
1694 // If we're using the align attribute only, just ignore everything
1695 // else about the declaration and its type.
1696 if (UseAlignAttrOnly) {
1697 // do nothing
1698 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1699 QualType T = VD->getType();
1700 if (const auto *RT = T->getAs<ReferenceType>()) {
1701 if (ForAlignof)
1702 T = RT->getPointeeType();
1703 else
1704 T = getPointerType(RT->getPointeeType());
1705 }
1706 QualType BaseT = getBaseElementType(T);
1707 if (T->isFunctionType())
1708 Align = getTypeInfoImpl(T.getTypePtr()).Align;
1709 else if (!BaseT->isIncompleteType()) {
1710 // Adjust alignments of declarations with array type by the
1711 // large-array alignment on the target.
1712 if (const ArrayType *arrayType = getAsArrayType(T)) {
1713 unsigned MinWidth = Target->getLargeArrayMinWidth();
1714 if (!ForAlignof && MinWidth) {
1715 if (isa<VariableArrayType>(arrayType))
1716 Align = std::max(Align, Target->getLargeArrayAlign());
1717 else if (isa<ConstantArrayType>(arrayType) &&
1718 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
1719 Align = std::max(Align, Target->getLargeArrayAlign());
1720 }
1721 }
1722 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1723 if (BaseT.getQualifiers().hasUnaligned())
1724 Align = Target->getCharWidth();
1725 }
1726
1727 // Ensure miminum alignment for global variables.
1728 if (const auto *VD = dyn_cast<VarDecl>(D))
1729 if (VD->hasGlobalStorage() && !ForAlignof) {
1730 uint64_t TypeSize =
1731 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0;
1732 Align = std::max(Align, getMinGlobalAlignOfVar(TypeSize, VD));
1733 }
1734
1735 // Fields can be subject to extra alignment constraints, like if
1736 // the field is packed, the struct is packed, or the struct has a
1737 // a max-field-alignment constraint (#pragma pack). So calculate
1738 // the actual alignment of the field within the struct, and then
1739 // (as we're expected to) constrain that by the alignment of the type.
1740 if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1741 const RecordDecl *Parent = Field->getParent();
1742 // We can only produce a sensible answer if the record is valid.
1743 if (!Parent->isInvalidDecl()) {
1744 const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1745
1746 // Start with the record's overall alignment.
1747 unsigned FieldAlign = toBits(Layout.getAlignment());
1748
1749 // Use the GCD of that and the offset within the record.
1750 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1751 if (Offset > 0) {
1752 // Alignment is always a power of 2, so the GCD will be a power of 2,
1753 // which means we get to do this crazy thing instead of Euclid's.
1754 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1755 if (LowBitOfOffset < FieldAlign)
1756 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1757 }
1758
1759 Align = std::min(Align, FieldAlign);
1760 }
1761 }
1762 }
1763
1764 // Some targets have hard limitation on the maximum requestable alignment in
1765 // aligned attribute for static variables.
1766 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1767 const auto *VD = dyn_cast<VarDecl>(D);
1768 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1769 Align = std::min(Align, MaxAlignedAttr);
1770
1771 return toCharUnitsFromBits(Align);
1772}
1773
1775 return toCharUnitsFromBits(Target->getExnObjectAlignment());
1776}
1777
1778// getTypeInfoDataSizeInChars - Return the size of a type, in
1779// chars. If the type is a record, its data size is returned. This is
1780// the size of the memcpy that's performed when assigning this type
1781// using a trivial copy/move assignment operator.
1784
1785 // In C++, objects can sometimes be allocated into the tail padding
1786 // of a base-class subobject. We decide whether that's possible
1787 // during class layout, so here we can just trust the layout results.
1788 if (getLangOpts().CPlusPlus) {
1789 if (const auto *RT = T->getAs<RecordType>();
1790 RT && !RT->getDecl()->isInvalidDecl()) {
1791 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
1792 Info.Width = layout.getDataSize();
1793 }
1794 }
1795
1796 return Info;
1797}
1798
1799/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1800/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1803 const ConstantArrayType *CAT) {
1804 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
1805 uint64_t Size = CAT->getZExtSize();
1806 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1807 (uint64_t)(-1)/Size) &&
1808 "Overflow in array type char size evaluation");
1809 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1810 unsigned Align = EltInfo.Align.getQuantity();
1811 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1813 Width = llvm::alignTo(Width, Align);
1816 EltInfo.AlignRequirement);
1817}
1818
1820 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1821 return getConstantArrayInfoInChars(*this, CAT);
1822 TypeInfo Info = getTypeInfo(T);
1825}
1826
1828 return getTypeInfoInChars(T.getTypePtr());
1829}
1830
1832 // HLSL doesn't promote all small integer types to int, it
1833 // just uses the rank-based promotion rules for all types.
1834 if (getLangOpts().HLSL)
1835 return false;
1836
1837 if (const auto *BT = T->getAs<BuiltinType>())
1838 switch (BT->getKind()) {
1839 case BuiltinType::Bool:
1840 case BuiltinType::Char_S:
1841 case BuiltinType::Char_U:
1842 case BuiltinType::SChar:
1843 case BuiltinType::UChar:
1844 case BuiltinType::Short:
1845 case BuiltinType::UShort:
1846 case BuiltinType::WChar_S:
1847 case BuiltinType::WChar_U:
1848 case BuiltinType::Char8:
1849 case BuiltinType::Char16:
1850 case BuiltinType::Char32:
1851 return true;
1852 default:
1853 return false;
1854 }
1855
1856 // Enumerated types are promotable to their compatible integer types
1857 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1858 if (const auto *ET = T->getAs<EnumType>()) {
1859 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
1860 ET->getDecl()->isScoped())
1861 return false;
1862
1863 return true;
1864 }
1865
1866 return false;
1867}
1868
1871}
1872
1874 return isAlignmentRequired(T.getTypePtr());
1875}
1876
1878 bool NeedsPreferredAlignment) const {
1879 // An alignment on a typedef overrides anything else.
1880 if (const auto *TT = T->getAs<TypedefType>())
1881 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1882 return Align;
1883
1884 // If we have an (array of) complete type, we're done.
1886 if (!T->isIncompleteType())
1887 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1888
1889 // If we had an array type, its element type might be a typedef
1890 // type with an alignment attribute.
1891 if (const auto *TT = T->getAs<TypedefType>())
1892 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1893 return Align;
1894
1895 // Otherwise, see if the declaration of the type had an attribute.
1896 if (const auto *TT = T->getAs<TagType>())
1897 return TT->getDecl()->getMaxAlignment();
1898
1899 return 0;
1900}
1901
1903 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1904 if (I != MemoizedTypeInfo.end())
1905 return I->second;
1906
1907 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1908 TypeInfo TI = getTypeInfoImpl(T);
1909 MemoizedTypeInfo[T] = TI;
1910 return TI;
1911}
1912
1913/// getTypeInfoImpl - Return the size of the specified type, in bits. This
1914/// method does not work on incomplete types.
1915///
1916/// FIXME: Pointers into different addr spaces could have different sizes and
1917/// alignment requirements: getPointerInfo should take an AddrSpace, this
1918/// should take a QualType, &c.
1919TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1920 uint64_t Width = 0;
1921 unsigned Align = 8;
1924 switch (T->getTypeClass()) {
1925#define TYPE(Class, Base)
1926#define ABSTRACT_TYPE(Class, Base)
1927#define NON_CANONICAL_TYPE(Class, Base)
1928#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1929#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1930 case Type::Class: \
1931 assert(!T->isDependentType() && "should not see dependent types here"); \
1932 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1933#include "clang/AST/TypeNodes.inc"
1934 llvm_unreachable("Should not see dependent types");
1935
1936 case Type::FunctionNoProto:
1937 case Type::FunctionProto:
1938 // GCC extension: alignof(function) = 32 bits
1939 Width = 0;
1940 Align = 32;
1941 break;
1942
1943 case Type::IncompleteArray:
1944 case Type::VariableArray:
1945 case Type::ConstantArray:
1946 case Type::ArrayParameter: {
1947 // Model non-constant sized arrays as size zero, but track the alignment.
1948 uint64_t Size = 0;
1949 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1950 Size = CAT->getZExtSize();
1951
1952 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
1953 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1954 "Overflow in array type bit size evaluation");
1955 Width = EltInfo.Width * Size;
1956 Align = EltInfo.Align;
1957 AlignRequirement = EltInfo.AlignRequirement;
1958 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1959 getTargetInfo().getPointerWidth(LangAS::Default) == 64)
1960 Width = llvm::alignTo(Width, Align);
1961 break;
1962 }
1963
1964 case Type::ExtVector:
1965 case Type::Vector: {
1966 const auto *VT = cast<VectorType>(T);
1967 TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1968 Width = VT->isExtVectorBoolType() ? VT->getNumElements()
1969 : EltInfo.Width * VT->getNumElements();
1970 // Enforce at least byte size and alignment.
1971 Width = std::max<unsigned>(8, Width);
1972 Align = std::max<unsigned>(8, Width);
1973
1974 // If the alignment is not a power of 2, round up to the next power of 2.
1975 // This happens for non-power-of-2 length vectors.
1976 if (Align & (Align-1)) {
1977 Align = llvm::bit_ceil(Align);
1978 Width = llvm::alignTo(Width, Align);
1979 }
1980 // Adjust the alignment based on the target max.
1981 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1982 if (TargetVectorAlign && TargetVectorAlign < Align)
1983 Align = TargetVectorAlign;
1984 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
1985 // Adjust the alignment for fixed-length SVE vectors. This is important
1986 // for non-power-of-2 vector lengths.
1987 Align = 128;
1988 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
1989 // Adjust the alignment for fixed-length SVE predicates.
1990 Align = 16;
1991 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
1992 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
1993 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
1994 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
1995 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
1996 // Adjust the alignment for fixed-length RVV vectors.
1997 Align = std::min<unsigned>(64, Width);
1998 break;
1999 }
2000
2001 case Type::ConstantMatrix: {
2002 const auto *MT = cast<ConstantMatrixType>(T);
2003 TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
2004 // The internal layout of a matrix value is implementation defined.
2005 // Initially be ABI compatible with arrays with respect to alignment and
2006 // size.
2007 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2008 Align = ElementInfo.Align;
2009 break;
2010 }
2011
2012 case Type::Builtin:
2013 switch (cast<BuiltinType>(T)->getKind()) {
2014 default: llvm_unreachable("Unknown builtin type!");
2015 case BuiltinType::Void:
2016 // GCC extension: alignof(void) = 8 bits.
2017 Width = 0;
2018 Align = 8;
2019 break;
2020 case BuiltinType::Bool:
2021 Width = Target->getBoolWidth();
2022 Align = Target->getBoolAlign();
2023 break;
2024 case BuiltinType::Char_S:
2025 case BuiltinType::Char_U:
2026 case BuiltinType::UChar:
2027 case BuiltinType::SChar:
2028 case BuiltinType::Char8:
2029 Width = Target->getCharWidth();
2030 Align = Target->getCharAlign();
2031 break;
2032 case BuiltinType::WChar_S:
2033 case BuiltinType::WChar_U:
2034 Width = Target->getWCharWidth();
2035 Align = Target->getWCharAlign();
2036 break;
2037 case BuiltinType::Char16:
2038 Width = Target->getChar16Width();
2039 Align = Target->getChar16Align();
2040 break;
2041 case BuiltinType::Char32:
2042 Width = Target->getChar32Width();
2043 Align = Target->getChar32Align();
2044 break;
2045 case BuiltinType::UShort:
2046 case BuiltinType::Short:
2047 Width = Target->getShortWidth();
2048 Align = Target->getShortAlign();
2049 break;
2050 case BuiltinType::UInt:
2051 case BuiltinType::Int:
2052 Width = Target->getIntWidth();
2053 Align = Target->getIntAlign();
2054 break;
2055 case BuiltinType::ULong:
2056 case BuiltinType::Long:
2057 Width = Target->getLongWidth();
2058 Align = Target->getLongAlign();
2059 break;
2060 case BuiltinType::ULongLong:
2061 case BuiltinType::LongLong:
2062 Width = Target->getLongLongWidth();
2063 Align = Target->getLongLongAlign();
2064 break;
2065 case BuiltinType::Int128:
2066 case BuiltinType::UInt128:
2067 Width = 128;
2068 Align = Target->getInt128Align();
2069 break;
2070 case BuiltinType::ShortAccum:
2071 case BuiltinType::UShortAccum:
2072 case BuiltinType::SatShortAccum:
2073 case BuiltinType::SatUShortAccum:
2074 Width = Target->getShortAccumWidth();
2075 Align = Target->getShortAccumAlign();
2076 break;
2077 case BuiltinType::Accum:
2078 case BuiltinType::UAccum:
2079 case BuiltinType::SatAccum:
2080 case BuiltinType::SatUAccum:
2081 Width = Target->getAccumWidth();
2082 Align = Target->getAccumAlign();
2083 break;
2084 case BuiltinType::LongAccum:
2085 case BuiltinType::ULongAccum:
2086 case BuiltinType::SatLongAccum:
2087 case BuiltinType::SatULongAccum:
2088 Width = Target->getLongAccumWidth();
2089 Align = Target->getLongAccumAlign();
2090 break;
2091 case BuiltinType::ShortFract:
2092 case BuiltinType::UShortFract:
2093 case BuiltinType::SatShortFract:
2094 case BuiltinType::SatUShortFract:
2095 Width = Target->getShortFractWidth();
2096 Align = Target->getShortFractAlign();
2097 break;
2098 case BuiltinType::Fract:
2099 case BuiltinType::UFract:
2100 case BuiltinType::SatFract:
2101 case BuiltinType::SatUFract:
2102 Width = Target->getFractWidth();
2103 Align = Target->getFractAlign();
2104 break;
2105 case BuiltinType::LongFract:
2106 case BuiltinType::ULongFract:
2107 case BuiltinType::SatLongFract:
2108 case BuiltinType::SatULongFract:
2109 Width = Target->getLongFractWidth();
2110 Align = Target->getLongFractAlign();
2111 break;
2112 case BuiltinType::BFloat16:
2113 if (Target->hasBFloat16Type()) {
2114 Width = Target->getBFloat16Width();
2115 Align = Target->getBFloat16Align();
2116 } else if ((getLangOpts().SYCLIsDevice ||
2117 (getLangOpts().OpenMP &&
2118 getLangOpts().OpenMPIsTargetDevice)) &&
2119 AuxTarget->hasBFloat16Type()) {
2120 Width = AuxTarget->getBFloat16Width();
2121 Align = AuxTarget->getBFloat16Align();
2122 }
2123 break;
2124 case BuiltinType::Float16:
2125 case BuiltinType::Half:
2126 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2127 !getLangOpts().OpenMPIsTargetDevice) {
2128 Width = Target->getHalfWidth();
2129 Align = Target->getHalfAlign();
2130 } else {
2131 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2132 "Expected OpenMP device compilation.");
2133 Width = AuxTarget->getHalfWidth();
2134 Align = AuxTarget->getHalfAlign();
2135 }
2136 break;
2137 case BuiltinType::Float:
2138 Width = Target->getFloatWidth();
2139 Align = Target->getFloatAlign();
2140 break;
2141 case BuiltinType::Double:
2142 Width = Target->getDoubleWidth();
2143 Align = Target->getDoubleAlign();
2144 break;
2145 case BuiltinType::Ibm128:
2146 Width = Target->getIbm128Width();
2147 Align = Target->getIbm128Align();
2148 break;
2149 case BuiltinType::LongDouble:
2150 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2151 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2152 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2153 Width = AuxTarget->getLongDoubleWidth();
2154 Align = AuxTarget->getLongDoubleAlign();
2155 } else {
2156 Width = Target->getLongDoubleWidth();
2157 Align = Target->getLongDoubleAlign();
2158 }
2159 break;
2160 case BuiltinType::Float128:
2161 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2162 !getLangOpts().OpenMPIsTargetDevice) {
2163 Width = Target->getFloat128Width();
2164 Align = Target->getFloat128Align();
2165 } else {
2166 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2167 "Expected OpenMP device compilation.");
2168 Width = AuxTarget->getFloat128Width();
2169 Align = AuxTarget->getFloat128Align();
2170 }
2171 break;
2172 case BuiltinType::NullPtr:
2173 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2174 Width = Target->getPointerWidth(LangAS::Default);
2175 Align = Target->getPointerAlign(LangAS::Default);
2176 break;
2177 case BuiltinType::ObjCId:
2178 case BuiltinType::ObjCClass:
2179 case BuiltinType::ObjCSel:
2180 Width = Target->getPointerWidth(LangAS::Default);
2181 Align = Target->getPointerAlign(LangAS::Default);
2182 break;
2183 case BuiltinType::OCLSampler:
2184 case BuiltinType::OCLEvent:
2185 case BuiltinType::OCLClkEvent:
2186 case BuiltinType::OCLQueue:
2187 case BuiltinType::OCLReserveID:
2188#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2189 case BuiltinType::Id:
2190#include "clang/Basic/OpenCLImageTypes.def"
2191#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2192 case BuiltinType::Id:
2193#include "clang/Basic/OpenCLExtensionTypes.def"
2194 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T));
2195 Width = Target->getPointerWidth(AS);
2196 Align = Target->getPointerAlign(AS);
2197 break;
2198 // The SVE types are effectively target-specific. The length of an
2199 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2200 // of 128 bits. There is one predicate bit for each vector byte, so the
2201 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2202 //
2203 // Because the length is only known at runtime, we use a dummy value
2204 // of 0 for the static length. The alignment values are those defined
2205 // by the Procedure Call Standard for the Arm Architecture.
2206#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
2207 IsSigned, IsFP, IsBF) \
2208 case BuiltinType::Id: \
2209 Width = 0; \
2210 Align = 128; \
2211 break;
2212#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
2213 case BuiltinType::Id: \
2214 Width = 0; \
2215 Align = 16; \
2216 break;
2217#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2218 case BuiltinType::Id: \
2219 Width = 0; \
2220 Align = 16; \
2221 break;
2222#include "clang/Basic/AArch64SVEACLETypes.def"
2223#define PPC_VECTOR_TYPE(Name, Id, Size) \
2224 case BuiltinType::Id: \
2225 Width = Size; \
2226 Align = Size; \
2227 break;
2228#include "clang/Basic/PPCTypes.def"
2229#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2230 IsFP, IsBF) \
2231 case BuiltinType::Id: \
2232 Width = 0; \
2233 Align = ElBits; \
2234 break;
2235#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2236 case BuiltinType::Id: \
2237 Width = 0; \
2238 Align = 8; \
2239 break;
2240#include "clang/Basic/RISCVVTypes.def"
2241#define WASM_TYPE(Name, Id, SingletonId) \
2242 case BuiltinType::Id: \
2243 Width = 0; \
2244 Align = 8; \
2245 break;
2246#include "clang/Basic/WebAssemblyReferenceTypes.def"
2247#define AMDGPU_OPAQUE_PTR_TYPE(NAME, MANGLEDNAME, AS, WIDTH, ALIGN, ID, \
2248 SINGLETONID) \
2249 case BuiltinType::ID: \
2250 Width = WIDTH; \
2251 Align = ALIGN; \
2252 break;
2253#include "clang/Basic/AMDGPUTypes.def"
2254#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
2255#include "clang/Basic/HLSLIntangibleTypes.def"
2256 Width = 0;
2257 Align = 8;
2258 break;
2259 }
2260 break;
2261 case Type::ObjCObjectPointer:
2262 Width = Target->getPointerWidth(LangAS::Default);
2263 Align = Target->getPointerAlign(LangAS::Default);
2264 break;
2265 case Type::BlockPointer:
2266 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace();
2267 Width = Target->getPointerWidth(AS);
2268 Align = Target->getPointerAlign(AS);
2269 break;
2270 case Type::LValueReference:
2271 case Type::RValueReference:
2272 // alignof and sizeof should never enter this code path here, so we go
2273 // the pointer route.
2274 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace();
2275 Width = Target->getPointerWidth(AS);
2276 Align = Target->getPointerAlign(AS);
2277 break;
2278 case Type::Pointer:
2279 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace();
2280 Width = Target->getPointerWidth(AS);
2281 Align = Target->getPointerAlign(AS);
2282 break;
2283 case Type::MemberPointer: {
2284 const auto *MPT = cast<MemberPointerType>(T);
2285 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2286 Width = MPI.Width;
2287 Align = MPI.Align;
2288 break;
2289 }
2290 case Type::Complex: {
2291 // Complex types have the same alignment as their elements, but twice the
2292 // size.
2293 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2294 Width = EltInfo.Width * 2;
2295 Align = EltInfo.Align;
2296 break;
2297 }
2298 case Type::ObjCObject:
2299 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2300 case Type::Adjusted:
2301 case Type::Decayed:
2302 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2303 case Type::ObjCInterface: {
2304 const auto *ObjCI = cast<ObjCInterfaceType>(T);
2305 if (ObjCI->getDecl()->isInvalidDecl()) {
2306 Width = 8;
2307 Align = 8;
2308 break;
2309 }
2310 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2311 Width = toBits(Layout.getSize());
2312 Align = toBits(Layout.getAlignment());
2313 break;
2314 }
2315 case Type::BitInt: {
2316 const auto *EIT = cast<BitIntType>(T);
2317 Align = Target->getBitIntAlign(EIT->getNumBits());
2318 Width = Target->getBitIntWidth(EIT->getNumBits());
2319 break;
2320 }
2321 case Type::Record:
2322 case Type::Enum: {
2323 const auto *TT = cast<TagType>(T);
2324
2325 if (TT->getDecl()->isInvalidDecl()) {
2326 Width = 8;
2327 Align = 8;
2328 break;
2329 }
2330
2331 if (const auto *ET = dyn_cast<EnumType>(TT)) {
2332 const EnumDecl *ED = ET->getDecl();
2333 TypeInfo Info =
2335 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2336 Info.Align = AttrAlign;
2338 }
2339 return Info;
2340 }
2341
2342 const auto *RT = cast<RecordType>(TT);
2343 const RecordDecl *RD = RT->getDecl();
2344 const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2345 Width = toBits(Layout.getSize());
2346 Align = toBits(Layout.getAlignment());
2347 AlignRequirement = RD->hasAttr<AlignedAttr>()
2350 break;
2351 }
2352
2353 case Type::SubstTemplateTypeParm:
2354 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2355 getReplacementType().getTypePtr());
2356
2357 case Type::Auto:
2358 case Type::DeducedTemplateSpecialization: {
2359 const auto *A = cast<DeducedType>(T);
2360 assert(!A->getDeducedType().isNull() &&
2361 "cannot request the size of an undeduced or dependent auto type");
2362 return getTypeInfo(A->getDeducedType().getTypePtr());
2363 }
2364
2365 case Type::Paren:
2366 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2367
2368 case Type::MacroQualified:
2369 return getTypeInfo(
2370 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
2371
2372 case Type::ObjCTypeParam:
2373 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2374
2375 case Type::Using:
2376 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr());
2377
2378 case Type::Typedef: {
2379 const auto *TT = cast<TypedefType>(T);
2380 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr());
2381 // If the typedef has an aligned attribute on it, it overrides any computed
2382 // alignment we have. This violates the GCC documentation (which says that
2383 // attribute(aligned) can only round up) but matches its implementation.
2384 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2385 Align = AttrAlign;
2386 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2387 } else {
2388 Align = Info.Align;
2389 AlignRequirement = Info.AlignRequirement;
2390 }
2391 Width = Info.Width;
2392 break;
2393 }
2394
2395 case Type::Elaborated:
2396 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2397
2398 case Type::Attributed:
2399 return getTypeInfo(
2400 cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2401
2402 case Type::CountAttributed:
2403 return getTypeInfo(cast<CountAttributedType>(T)->desugar().getTypePtr());
2404
2405 case Type::BTFTagAttributed:
2406 return getTypeInfo(
2407 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
2408
2409 case Type::Atomic: {
2410 // Start with the base type information.
2411 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2412 Width = Info.Width;
2413 Align = Info.Align;
2414
2415 if (!Width) {
2416 // An otherwise zero-sized type should still generate an
2417 // atomic operation.
2418 Width = Target->getCharWidth();
2419 assert(Align);
2420 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2421 // If the size of the type doesn't exceed the platform's max
2422 // atomic promotion width, make the size and alignment more
2423 // favorable to atomic operations:
2424
2425 // Round the size up to a power of 2.
2426 Width = llvm::bit_ceil(Width);
2427
2428 // Set the alignment equal to the size.
2429 Align = static_cast<unsigned>(Width);
2430 }
2431 }
2432 break;
2433
2434 case Type::Pipe:
2435 Width = Target->getPointerWidth(LangAS::opencl_global);
2436 Align = Target->getPointerAlign(LangAS::opencl_global);
2437 break;
2438 }
2439
2440 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2441 return TypeInfo(Width, Align, AlignRequirement);
2442}
2443
2445 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2446 if (I != MemoizedUnadjustedAlign.end())
2447 return I->second;
2448
2449 unsigned UnadjustedAlign;
2450 if (const auto *RT = T->getAs<RecordType>()) {
2451 const RecordDecl *RD = RT->getDecl();
2452 const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2453 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2454 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2455 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2456 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2457 } else {
2458 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
2459 }
2460
2461 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2462 return UnadjustedAlign;
2463}
2464
2466 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2467 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap);
2468 return SimdAlign;
2469}
2470
2471/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2473 return CharUnits::fromQuantity(BitSize / getCharWidth());
2474}
2475
2476/// toBits - Convert a size in characters to a size in characters.
2477int64_t ASTContext::toBits(CharUnits CharSize) const {
2478 return CharSize.getQuantity() * getCharWidth();
2479}
2480
2481/// getTypeSizeInChars - Return the size of the specified type, in characters.
2482/// This method does not work on incomplete types.
2484 return getTypeInfoInChars(T).Width;
2485}
2487 return getTypeInfoInChars(T).Width;
2488}
2489
2490/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2491/// characters. This method does not work on incomplete types.
2494}
2497}
2498
2499/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2500/// type, in characters, before alignment adjustments. This method does
2501/// not work on incomplete types.
2504}
2507}
2508
2509/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2510/// type for the current target in bits. This can be different than the ABI
2511/// alignment in cases where it is beneficial for performance or backwards
2512/// compatibility preserving to overalign a data type. (Note: despite the name,
2513/// the preferred alignment is ABI-impacting, and not an optimization.)
2515 TypeInfo TI = getTypeInfo(T);
2516 unsigned ABIAlign = TI.Align;
2517
2519
2520 // The preferred alignment of member pointers is that of a pointer.
2521 if (T->isMemberPointerType())
2522 return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
2523
2524 if (!Target->allowsLargerPreferedTypeAlignment())
2525 return ABIAlign;
2526
2527 if (const auto *RT = T->getAs<RecordType>()) {
2528 const RecordDecl *RD = RT->getDecl();
2529
2530 // When used as part of a typedef, or together with a 'packed' attribute,
2531 // the 'aligned' attribute can be used to decrease alignment. Note that the
2532 // 'packed' case is already taken into consideration when computing the
2533 // alignment, we only need to handle the typedef case here.
2535 RD->isInvalidDecl())
2536 return ABIAlign;
2537
2538 unsigned PreferredAlign = static_cast<unsigned>(
2539 toBits(getASTRecordLayout(RD).PreferredAlignment));
2540 assert(PreferredAlign >= ABIAlign &&
2541 "PreferredAlign should be at least as large as ABIAlign.");
2542 return PreferredAlign;
2543 }
2544
2545 // Double (and, for targets supporting AIX `power` alignment, long double) and
2546 // long long should be naturally aligned (despite requiring less alignment) if
2547 // possible.
2548 if (const auto *CT = T->getAs<ComplexType>())
2549 T = CT->getElementType().getTypePtr();
2550 if (const auto *ET = T->getAs<EnumType>())
2551 T = ET->getDecl()->getIntegerType().getTypePtr();
2552 if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2553 T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2554 T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2555 (T->isSpecificBuiltinType(BuiltinType::LongDouble) &&
2556 Target->defaultsToAIXPowerAlignment()))
2557 // Don't increase the alignment if an alignment attribute was specified on a
2558 // typedef declaration.
2559 if (!TI.isAlignRequired())
2560 return std::max(ABIAlign, (unsigned)getTypeSize(T));
2561
2562 return ABIAlign;
2563}
2564
2565/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2566/// for __attribute__((aligned)) on this target, to be used if no alignment
2567/// value is specified.
2570}
2571
2572/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2573/// to a global variable of the specified type.
2575 uint64_t TypeSize = getTypeSize(T.getTypePtr());
2576 return std::max(getPreferredTypeAlign(T),
2577 getMinGlobalAlignOfVar(TypeSize, VD));
2578}
2579
2580/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2581/// should be given to a global variable of the specified type.
2583 const VarDecl *VD) const {
2585}
2586
2588 const VarDecl *VD) const {
2589 // Make the default handling as that of a non-weak definition in the
2590 // current translation unit.
2591 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2592 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2593}
2594
2596 CharUnits Offset = CharUnits::Zero();
2597 const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2598 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2599 Offset += Layout->getBaseClassOffset(Base);
2600 Layout = &getASTRecordLayout(Base);
2601 }
2602 return Offset;
2603}
2604
2606 const ValueDecl *MPD = MP.getMemberPointerDecl();
2609 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2610 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
2611 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2612 const CXXRecordDecl *Base = RD;
2613 const CXXRecordDecl *Derived = Path[I];
2614 if (DerivedMember)
2615 std::swap(Base, Derived);
2617 RD = Path[I];
2618 }
2619 if (DerivedMember)
2621 return ThisAdjustment;
2622}
2623
2624/// DeepCollectObjCIvars -
2625/// This routine first collects all declared, but not synthesized, ivars in
2626/// super class and then collects all ivars, including those synthesized for
2627/// current class. This routine is used for implementation of current class
2628/// when all ivars, declared and synthesized are known.
2630 bool leafClass,
2632 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2633 DeepCollectObjCIvars(SuperClass, false, Ivars);
2634 if (!leafClass) {
2635 llvm::append_range(Ivars, OI->ivars());
2636 } else {
2637 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2638 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2639 Iv= Iv->getNextIvar())
2640 Ivars.push_back(Iv);
2641 }
2642}
2643
2644/// CollectInheritedProtocols - Collect all protocols in current class and
2645/// those inherited by it.
2648 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2649 // We can use protocol_iterator here instead of
2650 // all_referenced_protocol_iterator since we are walking all categories.
2651 for (auto *Proto : OI->all_referenced_protocols()) {
2652 CollectInheritedProtocols(Proto, Protocols);
2653 }
2654
2655 // Categories of this Interface.
2656 for (const auto *Cat : OI->visible_categories())
2657 CollectInheritedProtocols(Cat, Protocols);
2658
2659 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2660 while (SD) {
2661 CollectInheritedProtocols(SD, Protocols);
2662 SD = SD->getSuperClass();
2663 }
2664 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2665 for (auto *Proto : OC->protocols()) {
2666 CollectInheritedProtocols(Proto, Protocols);
2667 }
2668 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2669 // Insert the protocol.
2670 if (!Protocols.insert(
2671 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2672 return;
2673
2674 for (auto *Proto : OP->protocols())
2675 CollectInheritedProtocols(Proto, Protocols);
2676 }
2677}
2678
2680 const RecordDecl *RD,
2681 bool CheckIfTriviallyCopyable) {
2682 assert(RD->isUnion() && "Must be union type");
2683 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2684
2685 for (const auto *Field : RD->fields()) {
2686 if (!Context.hasUniqueObjectRepresentations(Field->getType(),
2687 CheckIfTriviallyCopyable))
2688 return false;
2689 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2690 if (FieldSize != UnionSize)
2691 return false;
2692 }
2693 return !RD->field_empty();
2694}
2695
2696static int64_t getSubobjectOffset(const FieldDecl *Field,
2697 const ASTContext &Context,
2698 const clang::ASTRecordLayout & /*Layout*/) {
2699 return Context.getFieldOffset(Field);
2700}
2701
2702static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2703 const ASTContext &Context,
2704 const clang::ASTRecordLayout &Layout) {
2705 return Context.toBits(Layout.getBaseClassOffset(RD));
2706}
2707
2708static std::optional<int64_t>
2710 const RecordDecl *RD,
2711 bool CheckIfTriviallyCopyable);
2712
2713static std::optional<int64_t>
2714getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2715 bool CheckIfTriviallyCopyable) {
2716 if (Field->getType()->isRecordType()) {
2717 const RecordDecl *RD = Field->getType()->getAsRecordDecl();
2718 if (!RD->isUnion())
2719 return structHasUniqueObjectRepresentations(Context, RD,
2720 CheckIfTriviallyCopyable);
2721 }
2722
2723 // A _BitInt type may not be unique if it has padding bits
2724 // but if it is a bitfield the padding bits are not used.
2725 bool IsBitIntType = Field->getType()->isBitIntType();
2726 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2727 !Context.hasUniqueObjectRepresentations(Field->getType(),
2728 CheckIfTriviallyCopyable))
2729 return std::nullopt;
2730
2731 int64_t FieldSizeInBits =
2732 Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2733 if (Field->isBitField()) {
2734 // If we have explicit padding bits, they don't contribute bits
2735 // to the actual object representation, so return 0.
2736 if (Field->isUnnamedBitField())
2737 return 0;
2738
2739 int64_t BitfieldSize = Field->getBitWidthValue(Context);
2740 if (IsBitIntType) {
2741 if ((unsigned)BitfieldSize >
2742 cast<BitIntType>(Field->getType())->getNumBits())
2743 return std::nullopt;
2744 } else if (BitfieldSize > FieldSizeInBits) {
2745 return std::nullopt;
2746 }
2747 FieldSizeInBits = BitfieldSize;
2748 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2749 Field->getType(), CheckIfTriviallyCopyable)) {
2750 return std::nullopt;
2751 }
2752 return FieldSizeInBits;
2753}
2754
2755static std::optional<int64_t>
2757 bool CheckIfTriviallyCopyable) {
2758 return structHasUniqueObjectRepresentations(Context, RD,
2759 CheckIfTriviallyCopyable);
2760}
2761
2762template <typename RangeT>
2764 const RangeT &Subobjects, int64_t CurOffsetInBits,
2765 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2766 bool CheckIfTriviallyCopyable) {
2767 for (const auto *Subobject : Subobjects) {
2768 std::optional<int64_t> SizeInBits =
2769 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2770 if (!SizeInBits)
2771 return std::nullopt;
2772 if (*SizeInBits != 0) {
2773 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2774 if (Offset != CurOffsetInBits)
2775 return std::nullopt;
2776 CurOffsetInBits += *SizeInBits;
2777 }
2778 }
2779 return CurOffsetInBits;
2780}
2781
2782static std::optional<int64_t>
2784 const RecordDecl *RD,
2785 bool CheckIfTriviallyCopyable) {
2786 assert(!RD->isUnion() && "Must be struct/class type");
2787 const auto &Layout = Context.getASTRecordLayout(RD);
2788
2789 int64_t CurOffsetInBits = 0;
2790 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2791 if (ClassDecl->isDynamicClass())
2792 return std::nullopt;
2793
2795 for (const auto &Base : ClassDecl->bases()) {
2796 // Empty types can be inherited from, and non-empty types can potentially
2797 // have tail padding, so just make sure there isn't an error.
2798 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl());
2799 }
2800
2801 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2802 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
2803 });
2804
2805 std::optional<int64_t> OffsetAfterBases =
2807 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2808 if (!OffsetAfterBases)
2809 return std::nullopt;
2810 CurOffsetInBits = *OffsetAfterBases;
2811 }
2812
2813 std::optional<int64_t> OffsetAfterFields =
2815 RD->fields(), CurOffsetInBits, Context, Layout,
2816 CheckIfTriviallyCopyable);
2817 if (!OffsetAfterFields)
2818 return std::nullopt;
2819 CurOffsetInBits = *OffsetAfterFields;
2820
2821 return CurOffsetInBits;
2822}
2823
2825 QualType Ty, bool CheckIfTriviallyCopyable) const {
2826 // C++17 [meta.unary.prop]:
2827 // The predicate condition for a template specialization
2828 // has_unique_object_representations<T> shall be satisfied if and only if:
2829 // (9.1) - T is trivially copyable, and
2830 // (9.2) - any two objects of type T with the same value have the same
2831 // object representation, where:
2832 // - two objects of array or non-union class type are considered to have
2833 // the same value if their respective sequences of direct subobjects
2834 // have the same values, and
2835 // - two objects of union type are considered to have the same value if
2836 // they have the same active member and the corresponding members have
2837 // the same value.
2838 // The set of scalar types for which this condition holds is
2839 // implementation-defined. [ Note: If a type has padding bits, the condition
2840 // does not hold; otherwise, the condition holds true for unsigned integral
2841 // types. -- end note ]
2842 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2843
2844 // Arrays are unique only if their element type is unique.
2845 if (Ty->isArrayType())
2847 CheckIfTriviallyCopyable);
2848
2849 assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
2850 "hasUniqueObjectRepresentations should not be called with an "
2851 "incomplete type");
2852
2853 // (9.1) - T is trivially copyable...
2854 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this))
2855 return false;
2856
2857 // All integrals and enums are unique.
2858 if (Ty->isIntegralOrEnumerationType()) {
2859 // Except _BitInt types that have padding bits.
2860 if (const auto *BIT = Ty->getAs<BitIntType>())
2861 return getTypeSize(BIT) == BIT->getNumBits();
2862
2863 return true;
2864 }
2865
2866 // All other pointers are unique.
2867 if (Ty->isPointerType())
2868 return true;
2869
2870 if (const auto *MPT = Ty->getAs<MemberPointerType>())
2871 return !ABI->getMemberPointerInfo(MPT).HasPadding;
2872
2873 if (Ty->isRecordType()) {
2874 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
2875
2876 if (Record->isInvalidDecl())
2877 return false;
2878
2879 if (Record->isUnion())
2881 CheckIfTriviallyCopyable);
2882
2883 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
2884 *this, Record, CheckIfTriviallyCopyable);
2885
2886 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty));
2887 }
2888
2889 // FIXME: More cases to handle here (list by rsmith):
2890 // vectors (careful about, eg, vector of 3 foo)
2891 // _Complex int and friends
2892 // _Atomic T
2893 // Obj-C block pointers
2894 // Obj-C object pointers
2895 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2896 // clk_event_t, queue_t, reserve_id_t)
2897 // There're also Obj-C class types and the Obj-C selector type, but I think it
2898 // makes sense for those to return false here.
2899
2900 return false;
2901}
2902
2904 unsigned count = 0;
2905 // Count ivars declared in class extension.
2906 for (const auto *Ext : OI->known_extensions())
2907 count += Ext->ivar_size();
2908
2909 // Count ivar defined in this class's implementation. This
2910 // includes synthesized ivars.
2911 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2912 count += ImplDecl->ivar_size();
2913
2914 return count;
2915}
2916
2918 if (!E)
2919 return false;
2920
2921 // nullptr_t is always treated as null.
2922 if (E->getType()->isNullPtrType()) return true;
2923
2924 if (E->getType()->isAnyPointerType() &&
2927 return true;
2928
2929 // Unfortunately, __null has type 'int'.
2930 if (isa<GNUNullExpr>(E)) return true;
2931
2932 return false;
2933}
2934
2935/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2936/// exists.
2938 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2939 I = ObjCImpls.find(D);
2940 if (I != ObjCImpls.end())
2941 return cast<ObjCImplementationDecl>(I->second);
2942 return nullptr;
2943}
2944
2945/// Get the implementation of ObjCCategoryDecl, or nullptr if none
2946/// exists.
2948 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2949 I = ObjCImpls.find(D);
2950 if (I != ObjCImpls.end())
2951 return cast<ObjCCategoryImplDecl>(I->second);
2952 return nullptr;
2953}
2954
2955/// Set the implementation of ObjCInterfaceDecl.
2957 ObjCImplementationDecl *ImplD) {
2958 assert(IFaceD && ImplD && "Passed null params");
2959 ObjCImpls[IFaceD] = ImplD;
2960}
2961
2962/// Set the implementation of ObjCCategoryDecl.
2964 ObjCCategoryImplDecl *ImplD) {
2965 assert(CatD && ImplD && "Passed null params");
2966 ObjCImpls[CatD] = ImplD;
2967}
2968
2969const ObjCMethodDecl *
2971 return ObjCMethodRedecls.lookup(MD);
2972}
2973
2975 const ObjCMethodDecl *Redecl) {
2976 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2977 ObjCMethodRedecls[MD] = Redecl;
2978}
2979
2981 const NamedDecl *ND) const {
2982 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2983 return ID;
2984 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2985 return CD->getClassInterface();
2986 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2987 return IMD->getClassInterface();
2988
2989 return nullptr;
2990}
2991
2992/// Get the copy initialization expression of VarDecl, or nullptr if
2993/// none exists.
2995 assert(VD && "Passed null params");
2996 assert(VD->hasAttr<BlocksAttr>() &&
2997 "getBlockVarCopyInits - not __block var");
2998 auto I = BlockVarCopyInits.find(VD);
2999 if (I != BlockVarCopyInits.end())
3000 return I->second;
3001 return {nullptr, false};
3002}
3003
3004/// Set the copy initialization expression of a block var decl.
3006 bool CanThrow) {
3007 assert(VD && CopyExpr && "Passed null params");
3008 assert(VD->hasAttr<BlocksAttr>() &&
3009 "setBlockVarCopyInits - not __block var");
3010 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3011}
3012
3014 unsigned DataSize) const {
3015 if (!DataSize)
3017 else
3018 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3019 "incorrect data size provided to CreateTypeSourceInfo!");
3020
3021 auto *TInfo =
3022 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
3023 new (TInfo) TypeSourceInfo(T, DataSize);
3024 return TInfo;
3025}
3026
3028 SourceLocation L) const {
3030 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
3031 return DI;
3032}
3033
3034const ASTRecordLayout &
3036 return getObjCLayout(D, nullptr);
3037}
3038
3039const ASTRecordLayout &
3041 const ObjCImplementationDecl *D) const {
3042 return getObjCLayout(D->getClassInterface(), D);
3043}
3044
3047 bool &AnyNonCanonArgs) {
3048 SmallVector<TemplateArgument, 16> CanonArgs(Args);
3049 for (auto &Arg : CanonArgs) {
3050 TemplateArgument OrigArg = Arg;
3051 Arg = C.getCanonicalTemplateArgument(Arg);
3052 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg);
3053 }
3054 return CanonArgs;
3055}
3056
3057//===----------------------------------------------------------------------===//
3058// Type creation/memoization methods
3059//===----------------------------------------------------------------------===//
3060
3062ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3063 unsigned fastQuals = quals.getFastQualifiers();
3064 quals.removeFastQualifiers();
3065
3066 // Check if we've already instantiated this type.
3067 llvm::FoldingSetNodeID ID;
3068 ExtQuals::Profile(ID, baseType, quals);
3069 void *insertPos = nullptr;
3070 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
3071 assert(eq->getQualifiers() == quals);
3072 return QualType(eq, fastQuals);
3073 }
3074
3075 // If the base type is not canonical, make the appropriate canonical type.
3076 QualType canon;
3077 if (!baseType->isCanonicalUnqualified()) {
3078 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3079 canonSplit.Quals.addConsistentQualifiers(quals);
3080 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
3081
3082 // Re-find the insert position.
3083 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
3084 }
3085
3086 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3087 ExtQualNodes.InsertNode(eq, insertPos);
3088 return QualType(eq, fastQuals);
3089}
3090
3092 LangAS AddressSpace) const {
3093 QualType CanT = getCanonicalType(T);
3094 if (CanT.getAddressSpace() == AddressSpace)
3095 return T;
3096
3097 // If we are composing extended qualifiers together, merge together
3098 // into one ExtQuals node.
3099 QualifierCollector Quals;
3100 const Type *TypeNode = Quals.strip(T);
3101
3102 // If this type already has an address space specified, it cannot get
3103 // another one.
3104 assert(!Quals.hasAddressSpace() &&
3105 "Type cannot be in multiple addr spaces!");
3106 Quals.addAddressSpace(AddressSpace);
3107
3108 return getExtQualType(TypeNode, Quals);
3109}
3110
3112 // If the type is not qualified with an address space, just return it
3113 // immediately.
3114 if (!T.hasAddressSpace())
3115 return T;
3116
3117 QualifierCollector Quals;
3118 const Type *TypeNode;
3119 // For arrays, strip the qualifier off the element type, then reconstruct the
3120 // array type
3121 if (T.getTypePtr()->isArrayType()) {
3122 T = getUnqualifiedArrayType(T, Quals);
3123 TypeNode = T.getTypePtr();
3124 } else {
3125 // If we are composing extended qualifiers together, merge together
3126 // into one ExtQuals node.
3127 while (T.hasAddressSpace()) {
3128 TypeNode = Quals.strip(T);
3129
3130 // If the type no longer has an address space after stripping qualifiers,
3131 // jump out.
3132 if (!QualType(TypeNode, 0).hasAddressSpace())
3133 break;
3134
3135 // There might be sugar in the way. Strip it and try again.
3136 T = T.getSingleStepDesugaredType(*this);
3137 }
3138 }
3139
3140 Quals.removeAddressSpace();
3141
3142 // Removal of the address space can mean there are no longer any
3143 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3144 // or required.
3145 if (Quals.hasNonFastQualifiers())
3146 return getExtQualType(TypeNode, Quals);
3147 else
3148 return QualType(TypeNode, Quals.getFastQualifiers());
3149}
3150
3151uint16_t
3153 assert(RD->isPolymorphic() &&
3154 "Attempted to get vtable pointer discriminator on a monomorphic type");
3155 std::unique_ptr<MangleContext> MC(createMangleContext());
3156 SmallString<256> Str;
3157 llvm::raw_svector_ostream Out(Str);
3158 MC->mangleCXXVTable(RD, Out);
3159 return llvm::getPointerAuthStableSipHash(Str);
3160}
3161
3162/// Encode a function type for use in the discriminator of a function pointer
3163/// type. We can't use the itanium scheme for this since C has quite permissive
3164/// rules for type compatibility that we need to be compatible with.
3165///
3166/// Formally, this function associates every function pointer type T with an
3167/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
3168/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
3169/// compatibility requires equivalent treatment under the ABI, so
3170/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
3171/// a subset of ~. Crucially, however, it must be a proper subset because
3172/// CCompatible is not an equivalence relation: for example, int[] is compatible
3173/// with both int[1] and int[2], but the latter are not compatible with each
3174/// other. Therefore this encoding function must be careful to only distinguish
3175/// types if there is no third type with which they are both required to be
3176/// compatible.
3178 raw_ostream &OS, QualType QT) {
3179 // FIXME: Consider address space qualifiers.
3180 const Type *T = QT.getCanonicalType().getTypePtr();
3181
3182 // FIXME: Consider using the C++ type mangling when we encounter a construct
3183 // that is incompatible with C.
3184
3185 switch (T->getTypeClass()) {
3186 case Type::Atomic:
3188 Ctx, OS, cast<AtomicType>(T)->getValueType());
3189
3190 case Type::LValueReference:
3191 OS << "R";
3193 cast<ReferenceType>(T)->getPointeeType());
3194 return;
3195 case Type::RValueReference:
3196 OS << "O";
3198 cast<ReferenceType>(T)->getPointeeType());
3199 return;
3200
3201 case Type::Pointer:
3202 // C11 6.7.6.1p2:
3203 // For two pointer types to be compatible, both shall be identically
3204 // qualified and both shall be pointers to compatible types.
3205 // FIXME: we should also consider pointee types.
3206 OS << "P";
3207 return;
3208
3209 case Type::ObjCObjectPointer:
3210 case Type::BlockPointer:
3211 OS << "P";
3212 return;
3213
3214 case Type::Complex:
3215 OS << "C";
3217 Ctx, OS, cast<ComplexType>(T)->getElementType());
3218
3219 case Type::VariableArray:
3220 case Type::ConstantArray:
3221 case Type::IncompleteArray:
3222 case Type::ArrayParameter:
3223 // C11 6.7.6.2p6:
3224 // For two array types to be compatible, both shall have compatible
3225 // element types, and if both size specifiers are present, and are integer
3226 // constant expressions, then both size specifiers shall have the same
3227 // constant value [...]
3228 //
3229 // So since ElemType[N] has to be compatible ElemType[], we can't encode the
3230 // width of the array.
3231 OS << "A";
3233 Ctx, OS, cast<ArrayType>(T)->getElementType());
3234
3235 case Type::ObjCInterface:
3236 case Type::ObjCObject:
3237 OS << "<objc_object>";
3238 return;
3239
3240 case Type::Enum: {
3241 // C11 6.7.2.2p4:
3242 // Each enumerated type shall be compatible with char, a signed integer
3243 // type, or an unsigned integer type.
3244 //
3245 // So we have to treat enum types as integers.
3246 QualType UnderlyingType = cast<EnumType>(T)->getDecl()->getIntegerType();
3248 Ctx, OS, UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
3249 }
3250
3251 case Type::FunctionNoProto:
3252 case Type::FunctionProto: {
3253 // C11 6.7.6.3p15:
3254 // For two function types to be compatible, both shall specify compatible
3255 // return types. Moreover, the parameter type lists, if both are present,
3256 // shall agree in the number of parameters and in the use of the ellipsis
3257 // terminator; corresponding parameters shall have compatible types.
3258 //
3259 // That paragraph goes on to describe how unprototyped functions are to be
3260 // handled, which we ignore here. Unprototyped function pointers are hashed
3261 // as though they were prototyped nullary functions since thats probably
3262 // what the user meant. This behavior is non-conforming.
3263 // FIXME: If we add a "custom discriminator" function type attribute we
3264 // should encode functions as their discriminators.
3265 OS << "F";
3266 const auto *FuncType = cast<FunctionType>(T);
3267 encodeTypeForFunctionPointerAuth(Ctx, OS, FuncType->getReturnType());
3268 if (const auto *FPT = dyn_cast<FunctionProtoType>(FuncType)) {
3269 for (QualType Param : FPT->param_types()) {
3270 Param = Ctx.getSignatureParameterType(Param);
3271 encodeTypeForFunctionPointerAuth(Ctx, OS, Param);
3272 }
3273 if (FPT->isVariadic())
3274 OS << "z";
3275 }
3276 OS << "E";
3277 return;
3278 }
3279
3280 case Type::MemberPointer: {
3281 OS << "M";
3282 const auto *MPT = T->castAs<MemberPointerType>();
3283 encodeTypeForFunctionPointerAuth(Ctx, OS, QualType(MPT->getClass(), 0));
3284 encodeTypeForFunctionPointerAuth(Ctx, OS, MPT->getPointeeType());
3285 return;
3286 }
3287 case Type::ExtVector:
3288 case Type::Vector:
3289 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
3290 break;
3291
3292 // Don't bother discriminating based on these types.
3293 case Type::Pipe:
3294 case Type::BitInt:
3295 case Type::ConstantMatrix:
3296 OS << "?";
3297 return;
3298
3299 case Type::Builtin: {
3300 const auto *BTy = T->castAs<BuiltinType>();
3301 switch (BTy->getKind()) {
3302#define SIGNED_TYPE(Id, SingletonId) \
3303 case BuiltinType::Id: \
3304 OS << "i"; \
3305 return;
3306#define UNSIGNED_TYPE(Id, SingletonId) \
3307 case BuiltinType::Id: \
3308 OS << "i"; \
3309 return;
3310#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
3311#define BUILTIN_TYPE(Id, SingletonId)
3312#include "clang/AST/BuiltinTypes.def"
3313 llvm_unreachable("placeholder types should not appear here.");
3314
3315 case BuiltinType::Half:
3316 OS << "Dh";
3317 return;
3318 case BuiltinType::Float:
3319 OS << "f";
3320 return;
3321 case BuiltinType::Double:
3322 OS << "d";
3323 return;
3324 case BuiltinType::LongDouble:
3325 OS << "e";
3326 return;
3327 case BuiltinType::Float16:
3328 OS << "DF16_";
3329 return;
3330 case BuiltinType::Float128:
3331 OS << "g";
3332 return;
3333
3334 case BuiltinType::Void:
3335 OS << "v";
3336 return;
3337
3338 case BuiltinType::ObjCId:
3339 case BuiltinType::ObjCClass:
3340 case BuiltinType::ObjCSel:
3341 case BuiltinType::NullPtr:
3342 OS << "P";
3343 return;
3344
3345 // Don't bother discriminating based on OpenCL types.
3346 case BuiltinType::OCLSampler:
3347 case BuiltinType::OCLEvent:
3348 case BuiltinType::OCLClkEvent:
3349 case BuiltinType::OCLQueue:
3350 case BuiltinType::OCLReserveID:
3351 case BuiltinType::BFloat16:
3352 case BuiltinType::VectorQuad:
3353 case BuiltinType::VectorPair:
3354 OS << "?";
3355 return;
3356
3357 // Don't bother discriminating based on these seldom-used types.
3358 case BuiltinType::Ibm128:
3359 return;
3360#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3361 case BuiltinType::Id: \
3362 return;
3363#include "clang/Basic/OpenCLImageTypes.def"
3364#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3365 case BuiltinType::Id: \
3366 return;
3367#include "clang/Basic/OpenCLExtensionTypes.def"
3368#define SVE_TYPE(Name, Id, SingletonId) \
3369 case BuiltinType::Id: \
3370 return;
3371#include "clang/Basic/AArch64SVEACLETypes.def"
3372#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
3373 case BuiltinType::Id: \
3374 return;
3375#include "clang/Basic/HLSLIntangibleTypes.def"
3376 case BuiltinType::Dependent:
3377 llvm_unreachable("should never get here");
3378 case BuiltinType::AMDGPUBufferRsrc:
3379 case BuiltinType::WasmExternRef:
3380#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3381#include "clang/Basic/RISCVVTypes.def"
3382 llvm_unreachable("not yet implemented");
3383 }
3384 llvm_unreachable("should never get here");
3385 }
3386 case Type::Record: {
3387 const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
3388 const IdentifierInfo *II = RD->getIdentifier();
3389
3390 // In C++, an immediate typedef of an anonymous struct or union
3391 // is considered to name it for ODR purposes, but C's specification
3392 // of type compatibility does not have a similar rule. Using the typedef
3393 // name in function type discriminators anyway, as we do here,
3394 // therefore technically violates the C standard: two function pointer
3395 // types defined in terms of two typedef'd anonymous structs with
3396 // different names are formally still compatible, but we are assigning
3397 // them different discriminators and therefore incompatible ABIs.
3398 //
3399 // This is a relatively minor violation that significantly improves
3400 // discrimination in some cases and has not caused problems in
3401 // practice. Regardless, it is now part of the ABI in places where
3402 // function type discrimination is used, and it can no longer be
3403 // changed except on new platforms.
3404
3405 if (!II)
3406 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
3407 II = Typedef->getDeclName().getAsIdentifierInfo();
3408
3409 if (!II) {
3410 OS << "<anonymous_record>";
3411 return;
3412 }
3413 OS << II->getLength() << II->getName();
3414 return;
3415 }
3416 case Type::DeducedTemplateSpecialization:
3417 case Type::Auto:
3418#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3419#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3420#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3421#define ABSTRACT_TYPE(Class, Base)
3422#define TYPE(Class, Base)
3423#include "clang/AST/TypeNodes.inc"
3424 llvm_unreachable("unexpected non-canonical or dependent type!");
3425 return;
3426 }
3427}
3428
3430 assert(!T->isDependentType() &&
3431 "cannot compute type discriminator of a dependent type");
3432
3433 SmallString<256> Str;
3434 llvm::raw_svector_ostream Out(Str);
3435
3437 T = T->getPointeeType();
3438
3439 if (T->isFunctionType()) {
3441 } else {
3442 T = T.getUnqualifiedType();
3443 std::unique_ptr<MangleContext> MC(createMangleContext());
3444 MC->mangleCanonicalTypeName(T, Out);
3445 }
3446
3447 return llvm::getPointerAuthStableSipHash(Str);
3448}
3449
3451 Qualifiers::GC GCAttr) const {
3452 QualType CanT = getCanonicalType(T);
3453 if (CanT.getObjCGCAttr() == GCAttr)
3454 return T;
3455
3456 if (const auto *ptr = T->getAs<PointerType>()) {
3457 QualType Pointee = ptr->getPointeeType();
3458 if (Pointee->isAnyPointerType()) {
3459 QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
3460 return getPointerType(ResultType);
3461 }
3462 }
3463
3464 // If we are composing extended qualifiers together, merge together
3465 // into one ExtQuals node.
3466 QualifierCollector Quals;
3467 const Type *TypeNode = Quals.strip(T);
3468
3469 // If this type already has an ObjCGC specified, it cannot get
3470 // another one.
3471 assert(!Quals.hasObjCGCAttr() &&
3472 "Type cannot have multiple ObjCGCs!");
3473 Quals.addObjCGCAttr(GCAttr);
3474
3475 return getExtQualType(TypeNode, Quals);
3476}
3477
3479 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3480 QualType Pointee = Ptr->getPointeeType();
3481 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) {
3482 return getPointerType(removeAddrSpaceQualType(Pointee));
3483 }
3484 }
3485 return T;
3486}
3487
3489 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
3490 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
3491 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
3492
3493 llvm::FoldingSetNodeID ID;
3494 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, OrNull);
3495
3496 void *InsertPos = nullptr;
3497 CountAttributedType *CATy =
3498 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
3499 if (CATy)
3500 return QualType(CATy, 0);
3501
3502 QualType CanonTy = getCanonicalType(WrappedTy);
3503 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
3504 DependentDecls.size());
3506 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
3507 OrNull, DependentDecls);
3508 Types.push_back(CATy);
3509 CountAttributedTypes.InsertNode(CATy, InsertPos);
3510
3511 return QualType(CATy, 0);
3512}
3513
3515 FunctionType::ExtInfo Info) {
3516 if (T->getExtInfo() == Info)
3517 return T;
3518
3520 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
3521 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
3522 } else {
3523 const auto *FPT = cast<FunctionProtoType>(T);
3524 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3525 EPI.ExtInfo = Info;
3526 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
3527 }
3528
3529 return cast<FunctionType>(Result.getTypePtr());
3530}
3531
3533 QualType ResultType) {
3534 FD = FD->getMostRecentDecl();
3535 while (true) {
3536 const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
3537 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3538 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
3539 if (FunctionDecl *Next = FD->getPreviousDecl())
3540 FD = Next;
3541 else
3542 break;
3543 }
3545 L->DeducedReturnType(FD, ResultType);
3546}
3547
3548/// Get a function type and produce the equivalent function type with the
3549/// specified exception specification. Type sugar that can be present on a
3550/// declaration of a function with an exception specification is permitted
3551/// and preserved. Other type sugar (for instance, typedefs) is not.
3553 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3554 // Might have some parens.
3555 if (const auto *PT = dyn_cast<ParenType>(Orig))
3556 return getParenType(
3557 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
3558
3559 // Might be wrapped in a macro qualified type.
3560 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig))
3561 return getMacroQualifiedType(
3562 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI),
3563 MQT->getMacroIdentifier());
3564
3565 // Might have a calling-convention attribute.
3566 if (const auto *AT = dyn_cast<AttributedType>(Orig))
3567 return getAttributedType(
3568 AT->getAttrKind(),
3569 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
3570 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
3571
3572 // Anything else must be a function type. Rebuild it with the new exception
3573 // specification.
3574 const auto *Proto = Orig->castAs<FunctionProtoType>();
3575 return getFunctionType(
3576 Proto->getReturnType(), Proto->getParamTypes(),
3577 Proto->getExtProtoInfo().withExceptionSpec(ESI));
3578}
3579
3581 QualType U) const {
3582 return hasSameType(T, U) ||
3583 (getLangOpts().CPlusPlus17 &&
3586}
3587
3589 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3590 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3591 SmallVector<QualType, 16> Args(Proto->param_types().size());
3592 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3593 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]);
3594 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo());
3595 }
3596
3597 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3598 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3599 return getFunctionNoProtoType(RetTy, Proto->getExtInfo());
3600 }
3601
3602 return T;
3603}
3604
3606 return hasSameType(T, U) ||
3609}
3610
3613 bool AsWritten) {
3614 // Update the type.
3615 QualType Updated =
3617 FD->setType(Updated);
3618
3619 if (!AsWritten)
3620 return;
3621
3622 // Update the type in the type source information too.
3623 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3624 // If the type and the type-as-written differ, we may need to update
3625 // the type-as-written too.
3626 if (TSInfo->getType() != FD->getType())
3627 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
3628
3629 // FIXME: When we get proper type location information for exceptions,
3630 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3631 // up the TypeSourceInfo;
3632 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3633 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3634 "TypeLoc size mismatch from updating exception specification");
3635 TSInfo->overrideType(Updated);
3636 }
3637}
3638
3639/// getComplexType - Return the uniqued reference to the type for a complex
3640/// number with the specified element type.
3642 // Unique pointers, to guarantee there is only one pointer of a particular
3643 // structure.
3644 llvm::FoldingSetNodeID ID;
3646
3647 void *InsertPos = nullptr;
3648 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3649 return QualType(CT, 0);
3650
3651 // If the pointee type isn't canonical, this won't be a canonical type either,
3652 // so fill in the canonical type field.
3653 QualType Canonical;
3654 if (!T.isCanonical()) {
3655 Canonical = getComplexType(getCanonicalType(T));
3656
3657 // Get the new insert position for the node we care about.
3658 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3659 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3660 }
3661 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3662 Types.push_back(New);
3663 ComplexTypes.InsertNode(New, InsertPos);
3664 return QualType(New, 0);
3665}
3666
3667/// getPointerType - Return the uniqued reference to the type for a pointer to
3668/// the specified type.
3670 // Unique pointers, to guarantee there is only one pointer of a particular
3671 // structure.
3672 llvm::FoldingSetNodeID ID;
3674
3675 void *InsertPos = nullptr;
3676 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3677 return QualType(PT, 0);
3678
3679 // If the pointee type isn't canonical, this won't be a canonical type either,
3680 // so fill in the canonical type field.
3681 QualType Canonical;
3682 if (!T.isCanonical()) {
3683 Canonical = getPointerType(getCanonicalType(T));
3684
3685 // Get the new insert position for the node we care about.
3686 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3687 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3688 }
3689 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3690 Types.push_back(New);
3691 PointerTypes.InsertNode(New, InsertPos);
3692 return QualType(New, 0);
3693}
3694
3696 llvm::FoldingSetNodeID ID;
3697 AdjustedType::Profile(ID, Orig, New);
3698 void *InsertPos = nullptr;
3699 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3700 if (AT)
3701 return QualType(AT, 0);
3702
3703 QualType Canonical = getCanonicalType(New);
3704
3705 // Get the new insert position for the node we care about.
3706 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3707 assert(!AT && "Shouldn't be in the map!");
3708
3709 AT = new (*this, alignof(AdjustedType))
3710 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3711 Types.push_back(AT);
3712 AdjustedTypes.InsertNode(AT, InsertPos);
3713 return QualType(AT, 0);
3714}
3715
3717 llvm::FoldingSetNodeID ID;
3718 AdjustedType::Profile(ID, Orig, Decayed);
3719 void *InsertPos = nullptr;
3720 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3721 if (AT)
3722 return QualType(AT, 0);
3723
3724 QualType Canonical = getCanonicalType(Decayed);
3725
3726 // Get the new insert position for the node we care about.
3727 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3728 assert(!AT && "Shouldn't be in the map!");
3729
3730 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3731 Types.push_back(AT);
3732 AdjustedTypes.InsertNode(AT, InsertPos);
3733 return QualType(AT, 0);
3734}
3735
3737 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3738
3739 QualType Decayed;
3740
3741 // C99 6.7.5.3p7:
3742 // A declaration of a parameter as "array of type" shall be
3743 // adjusted to "qualified pointer to type", where the type
3744 // qualifiers (if any) are those specified within the [ and ] of
3745 // the array type derivation.
3746 if (T->isArrayType())
3747 Decayed = getArrayDecayedType(T);
3748
3749 // C99 6.7.5.3p8:
3750 // A declaration of a parameter as "function returning type"
3751 // shall be adjusted to "pointer to function returning type", as
3752 // in 6.3.2.1.
3753 if (T->isFunctionType())
3754 Decayed = getPointerType(T);
3755
3756 return getDecayedType(T, Decayed);
3757}
3758
3760 if (Ty->isArrayParameterType())
3761 return Ty;
3762 assert(Ty->isConstantArrayType() && "Ty must be an array type.");
3763 const auto *ATy = cast<ConstantArrayType>(Ty);
3764 llvm::FoldingSetNodeID ID;
3765 ATy->Profile(ID, *this, ATy->getElementType(), ATy->getZExtSize(),
3766 ATy->getSizeExpr(), ATy->getSizeModifier(),
3767 ATy->getIndexTypeQualifiers().getAsOpaqueValue());
3768 void *InsertPos = nullptr;
3769 ArrayParameterType *AT =
3770 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3771 if (AT)
3772 return QualType(AT, 0);
3773
3774 QualType Canonical;
3775 if (!Ty.isCanonical()) {
3776 Canonical = getArrayParameterType(getCanonicalType(Ty));
3777
3778 // Get the new insert position for the node we care about.
3779 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3780 assert(!AT && "Shouldn't be in the map!");
3781 }
3782
3783 AT = new (*this, alignof(ArrayParameterType))
3784 ArrayParameterType(ATy, Canonical);
3785 Types.push_back(AT);
3786 ArrayParameterTypes.InsertNode(AT, InsertPos);
3787 return QualType(AT, 0);
3788}
3789
3790/// getBlockPointerType - Return the uniqued reference to the type for
3791/// a pointer to the specified block.
3793 assert(T->isFunctionType() && "block of function types only");
3794 // Unique pointers, to guarantee there is only one block of a particular
3795 // structure.
3796 llvm::FoldingSetNodeID ID;
3798
3799 void *InsertPos = nullptr;
3800 if (BlockPointerType *PT =
3801 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3802 return QualType(PT, 0);
3803
3804 // If the block pointee type isn't canonical, this won't be a canonical
3805 // type either so fill in the canonical type field.
3806 QualType Canonical;
3807 if (!T.isCanonical()) {
3809
3810 // Get the new insert position for the node we care about.
3811 BlockPointerType *NewIP =
3812 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3813 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3814 }
3815 auto *New =
3816 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
3817 Types.push_back(New);
3818 BlockPointerTypes.InsertNode(New, InsertPos);
3819 return QualType(New, 0);
3820}
3821
3822/// getLValueReferenceType - Return the uniqued reference to the type for an
3823/// lvalue reference to the specified type.
3825ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3826 assert((!T->isPlaceholderType() ||
3827 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3828 "Unresolved placeholder type");
3829
3830 // Unique pointers, to guarantee there is only one pointer of a particular
3831 // structure.
3832 llvm::FoldingSetNodeID ID;
3833 ReferenceType::Profile(ID, T, SpelledAsLValue);
3834
3835 void *InsertPos = nullptr;
3836 if (LValueReferenceType *RT =
3837 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3838 return QualType(RT, 0);
3839
3840 const auto *InnerRef = T->getAs<ReferenceType>();
3841
3842 // If the referencee type isn't canonical, this won't be a canonical type
3843 // either, so fill in the canonical type field.
3844 QualType Canonical;
3845 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
3846 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3847 Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
3848
3849 // Get the new insert position for the node we care about.
3850 LValueReferenceType *NewIP =
3851 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3852 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3853 }
3854
3855 auto *New = new (*this, alignof(LValueReferenceType))
3856 LValueReferenceType(T, Canonical, SpelledAsLValue);
3857 Types.push_back(New);
3858 LValueReferenceTypes.InsertNode(New, InsertPos);
3859
3860 return QualType(New, 0);
3861}
3862
3863/// getRValueReferenceType - Return the uniqued reference to the type for an
3864/// rvalue reference to the specified type.
3866 assert((!T->isPlaceholderType() ||
3867 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3868 "Unresolved placeholder type");
3869
3870 // Unique pointers, to guarantee there is only one pointer of a particular
3871 // structure.
3872 llvm::FoldingSetNodeID ID;
3873 ReferenceType::Profile(ID, T, false);
3874
3875 void *InsertPos = nullptr;
3876 if (RValueReferenceType *RT =
3877 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3878 return QualType(RT, 0);
3879
3880 const auto *InnerRef = T->getAs<ReferenceType>();
3881
3882 // If the referencee type isn't canonical, this won't be a canonical type
3883 // either, so fill in the canonical type field.
3884 QualType Canonical;
3885 if (InnerRef || !T.isCanonical()) {
3886 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3887 Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
3888
3889 // Get the new insert position for the node we care about.
3890 RValueReferenceType *NewIP =
3891 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3892 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3893 }
3894
3895 auto *New = new (*this, alignof(RValueReferenceType))
3896 RValueReferenceType(T, Canonical);
3897 Types.push_back(New);
3898 RValueReferenceTypes.InsertNode(New, InsertPos);
3899 return QualType(New, 0);
3900}
3901
3902/// getMemberPointerType - Return the uniqued reference to the type for a
3903/// member pointer to the specified type, in the specified class.
3905 // Unique pointers, to guarantee there is only one pointer of a particular
3906 // structure.
3907 llvm::FoldingSetNodeID ID;
3908 MemberPointerType::Profile(ID, T, Cls);
3909
3910 void *InsertPos = nullptr;
3911 if (MemberPointerType *PT =
3912 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3913 return QualType(PT, 0);
3914
3915 // If the pointee or class type isn't canonical, this won't be a canonical
3916 // type either, so fill in the canonical type field.
3917 QualType Canonical;
3918 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3920
3921 // Get the new insert position for the node we care about.
3922 MemberPointerType *NewIP =
3923 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3924 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3925 }
3926 auto *New = new (*this, alignof(MemberPointerType))
3927 MemberPointerType(T, Cls, Canonical);
3928 Types.push_back(New);
3929 MemberPointerTypes.InsertNode(New, InsertPos);
3930 return QualType(New, 0);
3931}
3932
3933/// getConstantArrayType - Return the unique reference to the type for an
3934/// array of the specified element type.
3936 const llvm::APInt &ArySizeIn,
3937 const Expr *SizeExpr,
3939 unsigned IndexTypeQuals) const {
3940 assert((EltTy->isDependentType() ||
3941 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3942 "Constant array of VLAs is illegal!");
3943
3944 // We only need the size as part of the type if it's instantiation-dependent.
3945 if (SizeExpr && !SizeExpr->isInstantiationDependent())
3946 SizeExpr = nullptr;
3947
3948 // Convert the array size into a canonical width matching the pointer size for
3949 // the target.
3950 llvm::APInt ArySize(ArySizeIn);
3951 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
3952
3953 llvm::FoldingSetNodeID ID;
3954 ConstantArrayType::Profile(ID, *this, EltTy, ArySize.getZExtValue(), SizeExpr,
3955 ASM, IndexTypeQuals);
3956
3957 void *InsertPos = nullptr;
3958 if (ConstantArrayType *ATP =
3959 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3960 return QualType(ATP, 0);
3961
3962 // If the element type isn't canonical or has qualifiers, or the array bound
3963 // is instantiation-dependent, this won't be a canonical type either, so fill
3964 // in the canonical type field.
3965 QualType Canon;
3966 // FIXME: Check below should look for qualifiers behind sugar.
3967 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
3968 SplitQualType canonSplit = getCanonicalType(EltTy).split();
3969 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr,
3970 ASM, IndexTypeQuals);
3971 Canon = getQualifiedType(Canon, canonSplit.Quals);
3972
3973 // Get the new insert position for the node we care about.
3974 ConstantArrayType *NewIP =
3975 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3976 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3977 }
3978
3979 auto *New = ConstantArrayType::Create(*this, EltTy, Canon, ArySize, SizeExpr,
3980 ASM, IndexTypeQuals);
3981 ConstantArrayTypes.InsertNode(New, InsertPos);
3982 Types.push_back(New);
3983 return QualType(New, 0);
3984}
3985
3986/// getVariableArrayDecayedType - Turns the given type, which may be
3987/// variably-modified, into the corresponding type with all the known
3988/// sizes replaced with [*].
3990 // Vastly most common case.
3991 if (!type->isVariablyModifiedType()) return type;
3992
3993 QualType result;
3994
3995 SplitQualType split = type.getSplitDesugaredType();
3996 const Type *ty = split.Ty;
3997 switch (ty->getTypeClass()) {
3998#define TYPE(Class, Base)
3999#define ABSTRACT_TYPE(Class, Base)
4000#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4001#include "clang/AST/TypeNodes.inc"
4002 llvm_unreachable("didn't desugar past all non-canonical types?");
4003
4004 // These types should never be variably-modified.
4005 case Type::Builtin:
4006 case Type::Complex:
4007 case Type::Vector:
4008 case Type::DependentVector:
4009 case Type::ExtVector:
4010 case Type::DependentSizedExtVector:
4011 case Type::ConstantMatrix:
4012 case Type::DependentSizedMatrix:
4013 case Type::DependentAddressSpace:
4014 case Type::ObjCObject:
4015 case Type::ObjCInterface:
4016 case Type::ObjCObjectPointer:
4017 case Type::Record:
4018 case Type::Enum:
4019 case Type::UnresolvedUsing:
4020 case Type::TypeOfExpr:
4021 case Type::TypeOf:
4022 case Type::Decltype:
4023 case Type::UnaryTransform:
4024 case Type::DependentName:
4025 case Type::InjectedClassName:
4026 case Type::TemplateSpecialization:
4027 case Type::DependentTemplateSpecialization:
4028 case Type::TemplateTypeParm:
4029 case Type::SubstTemplateTypeParmPack:
4030 case Type::Auto:
4031 case Type::DeducedTemplateSpecialization:
4032 case Type::PackExpansion:
4033 case Type::PackIndexing:
4034 case Type::BitInt:
4035 case Type::DependentBitInt:
4036 case Type::ArrayParameter:
4037 llvm_unreachable("type should never be variably-modified");
4038
4039 // These types can be variably-modified but should never need to
4040 // further decay.
4041 case Type::FunctionNoProto:
4042 case Type::FunctionProto:
4043 case Type::BlockPointer:
4044 case Type::MemberPointer:
4045 case Type::Pipe:
4046 return type;
4047
4048 // These types can be variably-modified. All these modifications
4049 // preserve structure except as noted by comments.
4050 // TODO: if we ever care about optimizing VLAs, there are no-op
4051 // optimizations available here.
4052 case Type::Pointer:
4054 cast<PointerType>(ty)->getPointeeType()));
4055 break;
4056
4057 case Type::LValueReference: {
4058 const auto *lv = cast<LValueReferenceType>(ty);
4059 result = getLValueReferenceType(
4060 getVariableArrayDecayedType(lv->getPointeeType()),
4061 lv->isSpelledAsLValue());
4062 break;
4063 }
4064
4065 case Type::RValueReference: {
4066 const auto *lv = cast<RValueReferenceType>(ty);
4067 result = getRValueReferenceType(
4068 getVariableArrayDecayedType(lv->getPointeeType()));
4069 break;
4070 }
4071
4072 case Type::Atomic: {
4073 const auto *at = cast<AtomicType>(ty);
4074 result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
4075 break;
4076 }
4077
4078 case Type::ConstantArray: {
4079 const auto *cat = cast<ConstantArrayType>(ty);
4080 result = getConstantArrayType(
4081 getVariableArrayDecayedType(cat->getElementType()),
4082 cat->getSize(),
4083 cat->getSizeExpr(),
4084 cat->getSizeModifier(),
4085 cat->getIndexTypeCVRQualifiers());
4086 break;
4087 }
4088
4089 case Type::DependentSizedArray: {
4090 const auto *dat = cast<DependentSizedArrayType>(ty);
4092 getVariableArrayDecayedType(dat->getElementType()),
4093 dat->getSizeExpr(),
4094 dat->getSizeModifier(),
4095 dat->getIndexTypeCVRQualifiers(),
4096 dat->getBracketsRange());
4097 break;
4098 }
4099
4100 // Turn incomplete types into [*] types.
4101 case Type::IncompleteArray: {
4102 const auto *iat = cast<IncompleteArrayType>(ty);
4103 result =
4105 /*size*/ nullptr, ArraySizeModifier::Normal,
4106 iat->getIndexTypeCVRQualifiers(), SourceRange());
4107 break;
4108 }
4109
4110 // Turn VLA types into [*] types.
4111 case Type::VariableArray: {
4112 const auto *vat = cast<VariableArrayType>(ty);
4113 result = getVariableArrayType(
4114 getVariableArrayDecayedType(vat->getElementType()),
4115 /*size*/ nullptr, ArraySizeModifier::Star,
4116 vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange());
4117 break;
4118 }
4119 }
4120
4121 // Apply the top-level qualifiers from the original.
4122 return getQualifiedType(result, split.Quals);
4123}
4124
4125/// getVariableArrayType - Returns a non-unique reference to the type for a
4126/// variable array of the specified element type.
4129 unsigned IndexTypeQuals,
4130 SourceRange Brackets) const {
4131 // Since we don't unique expressions, it isn't possible to unique VLA's
4132 // that have an expression provided for their size.
4133 QualType Canon;
4134
4135 // Be sure to pull qualifiers off the element type.
4136 // FIXME: Check below should look for qualifiers behind sugar.
4137 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
4138 SplitQualType canonSplit = getCanonicalType(EltTy).split();
4139 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
4140 IndexTypeQuals, Brackets);
4141 Canon = getQualifiedType(Canon, canonSplit.Quals);
4142 }
4143
4144 auto *New = new (*this, alignof(VariableArrayType))
4145 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
4146
4147 VariableArrayTypes.push_back(New);
4148 Types.push_back(New);
4149 return QualType(New, 0);
4150}
4151
4152/// getDependentSizedArrayType - Returns a non-unique reference to
4153/// the type for a dependently-sized array of the specified element
4154/// type.
4156 Expr *numElements,
4158 unsigned elementTypeQuals,
4159 SourceRange brackets) const {
4160 assert((!numElements || numElements->isTypeDependent() ||
4161 numElements->isValueDependent()) &&
4162 "Size must be type- or value-dependent!");
4163
4164 SplitQualType canonElementType = getCanonicalType(elementType).split();
4165
4166 void *insertPos = nullptr;
4167 llvm::FoldingSetNodeID ID;
4169 ID, *this, numElements ? QualType(canonElementType.Ty, 0) : elementType,
4170 ASM, elementTypeQuals, numElements);
4171
4172 // Look for an existing type with these properties.
4173 DependentSizedArrayType *canonTy =
4174 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
4175
4176 // Dependently-sized array types that do not have a specified number
4177 // of elements will have their sizes deduced from a dependent
4178 // initializer.
4179 if (!numElements) {
4180 if (canonTy)
4181 return QualType(canonTy, 0);
4182
4183 auto *newType = new (*this, alignof(DependentSizedArrayType))
4184 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
4185 elementTypeQuals, brackets);
4186 DependentSizedArrayTypes.InsertNode(newType, insertPos);
4187 Types.push_back(newType);
4188 return QualType(newType, 0);
4189 }
4190
4191 // If we don't have one, build one.
4192 if (!canonTy) {
4193 canonTy = new (*this, alignof(DependentSizedArrayType))
4194 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
4195 numElements, ASM, elementTypeQuals, brackets);
4196 DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
4197 Types.push_back(canonTy);
4198 }
4199
4200 // Apply qualifiers from the element type to the array.
4201 QualType canon = getQualifiedType(QualType(canonTy,0),
4202 canonElementType.Quals);
4203
4204 // If we didn't need extra canonicalization for the element type or the size
4205 // expression, then just use that as our result.
4206 if (QualType(canonElementType.Ty, 0) == elementType &&
4207 canonTy->getSizeExpr() == numElements)
4208 return canon;
4209
4210 // Otherwise, we need to build a type which follows the spelling
4211 // of the element type.
4212 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
4213 DependentSizedArrayType(elementType, canon, numElements, ASM,
4214 elementTypeQuals, brackets);
4215 Types.push_back(sugaredType);
4216 return QualType(sugaredType, 0);
4217}
4218
4221 unsigned elementTypeQuals) const {
4222 llvm::FoldingSetNodeID ID;
4223 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
4224
4225 void *insertPos = nullptr;
4226 if (IncompleteArrayType *iat =
4227 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
4228 return QualType(iat, 0);
4229
4230 // If the element type isn't canonical, this won't be a canonical type
4231 // either, so fill in the canonical type field. We also have to pull
4232 // qualifiers off the element type.
4233 QualType canon;
4234
4235 // FIXME: Check below should look for qualifiers behind sugar.
4236 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
4237 SplitQualType canonSplit = getCanonicalType(elementType).split();
4238 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
4239 ASM, elementTypeQuals);
4240 canon = getQualifiedType(canon, canonSplit.Quals);
4241
4242 // Get the new insert position for the node we care about.
4243 IncompleteArrayType *existing =
4244 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
4245 assert(!existing && "Shouldn't be in the map!"); (void) existing;
4246 }
4247
4248 auto *newType = new (*this, alignof(IncompleteArrayType))
4249 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
4250
4251 IncompleteArrayTypes.InsertNode(newType, insertPos);
4252 Types.push_back(newType);
4253 return QualType(newType, 0);
4254}
4255
4258#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
4259 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
4260 NUMVECTORS};
4261
4262#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
4263 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
4264
4265 switch (Ty->getKind()) {
4266 default:
4267 llvm_unreachable("Unsupported builtin vector type");
4268 case BuiltinType::SveInt8:
4269 return SVE_INT_ELTTY(8, 16, true, 1);
4270 case BuiltinType::SveUint8:
4271 return SVE_INT_ELTTY(8, 16, false, 1);
4272 case BuiltinType::SveInt8x2:
4273 return SVE_INT_ELTTY(8, 16, true, 2);
4274 case BuiltinType::SveUint8x2:
4275 return SVE_INT_ELTTY(8, 16, false, 2);
4276 case BuiltinType::SveInt8x3:
4277 return SVE_INT_ELTTY(8, 16, true, 3);
4278 case BuiltinType::SveUint8x3:
4279 return SVE_INT_ELTTY(8, 16, false, 3);
4280 case BuiltinType::SveInt8x4:
4281 return SVE_INT_ELTTY(8, 16, true, 4);
4282 case BuiltinType::SveUint8x4:
4283 return SVE_INT_ELTTY(8, 16, false, 4);
4284 case BuiltinType::SveInt16:
4285 return SVE_INT_ELTTY(16, 8, true, 1);
4286 case BuiltinType::SveUint16:
4287 return SVE_INT_ELTTY(16, 8, false, 1);
4288 case BuiltinType::SveInt16x2:
4289 return SVE_INT_ELTTY(16, 8, true, 2);
4290 case BuiltinType::SveUint16x2:
4291 return SVE_INT_ELTTY(16, 8, false, 2);
4292 case BuiltinType::SveInt16x3:
4293 return SVE_INT_ELTTY(16, 8, true, 3);
4294 case BuiltinType::SveUint16x3:
4295 return SVE_INT_ELTTY(16, 8, false, 3);
4296 case BuiltinType::SveInt16x4:
4297 return SVE_INT_ELTTY(16, 8, true, 4);
4298 case BuiltinType::SveUint16x4:
4299 return SVE_INT_ELTTY(16, 8, false, 4);
4300 case BuiltinType::SveInt32:
4301 return SVE_INT_ELTTY(32, 4, true, 1);
4302 case BuiltinType::SveUint32:
4303 return SVE_INT_ELTTY(32, 4, false, 1);
4304 case BuiltinType::SveInt32x2:
4305 return SVE_INT_ELTTY(32, 4, true, 2);
4306 case BuiltinType::SveUint32x2:
4307 return SVE_INT_ELTTY(32, 4, false, 2);
4308 case BuiltinType::SveInt32x3:
4309 return SVE_INT_ELTTY(32, 4, true, 3);
4310 case BuiltinType::SveUint32x3:
4311 return SVE_INT_ELTTY(32, 4, false, 3);
4312 case BuiltinType::SveInt32x4:
4313 return SVE_INT_ELTTY(32, 4, true, 4);
4314 case BuiltinType::SveUint32x4:
4315 return SVE_INT_ELTTY(32, 4, false, 4);
4316 case BuiltinType::SveInt64:
4317 return SVE_INT_ELTTY(64, 2, true, 1);
4318 case BuiltinType::SveUint64:
4319 return SVE_INT_ELTTY(64, 2, false, 1);
4320 case BuiltinType::SveInt64x2:
4321 return SVE_INT_ELTTY(64, 2, true, 2);
4322 case BuiltinType::SveUint64x2:
4323 return SVE_INT_ELTTY(64, 2, false, 2);
4324 case BuiltinType::SveInt64x3:
4325 return SVE_INT_ELTTY(64, 2, true, 3);
4326 case BuiltinType::SveUint64x3:
4327 return SVE_INT_ELTTY(64, 2, false, 3);
4328 case BuiltinType::SveInt64x4:
4329 return SVE_INT_ELTTY(64, 2, true, 4);
4330 case BuiltinType::SveUint64x4:
4331 return SVE_INT_ELTTY(64, 2, false, 4);
4332 case BuiltinType::SveBool:
4333 return SVE_ELTTY(BoolTy, 16, 1);
4334 case BuiltinType::SveBoolx2:
4335 return SVE_ELTTY(BoolTy, 16, 2);
4336 case BuiltinType::SveBoolx4:
4337 return SVE_ELTTY(BoolTy, 16, 4);
4338 case BuiltinType::SveFloat16:
4339 return SVE_ELTTY(HalfTy, 8, 1);
4340 case BuiltinType::SveFloat16x2:
4341 return SVE_ELTTY(HalfTy, 8, 2);
4342 case BuiltinType::SveFloat16x3:
4343 return SVE_ELTTY(HalfTy, 8, 3);
4344 case BuiltinType::SveFloat16x4:
4345 return SVE_ELTTY(HalfTy, 8, 4);
4346 case BuiltinType::SveFloat32:
4347 return SVE_ELTTY(FloatTy, 4, 1);
4348 case BuiltinType::SveFloat32x2:
4349 return SVE_ELTTY(FloatTy, 4, 2);
4350 case BuiltinType::SveFloat32x3:
4351 return SVE_ELTTY(FloatTy, 4, 3);
4352 case BuiltinType::SveFloat32x4:
4353 return SVE_ELTTY(FloatTy, 4, 4);
4354 case BuiltinType::SveFloat64:
4355 return SVE_ELTTY(DoubleTy, 2, 1);
4356 case BuiltinType::SveFloat64x2:
4357 return SVE_ELTTY(DoubleTy, 2, 2);
4358 case BuiltinType::SveFloat64x3:
4359 return SVE_ELTTY(DoubleTy, 2, 3);
4360 case BuiltinType::SveFloat64x4:
4361 return SVE_ELTTY(DoubleTy, 2, 4);
4362 case BuiltinType::SveBFloat16:
4363 return SVE_ELTTY(BFloat16Ty, 8, 1);
4364 case BuiltinType::SveBFloat16x2:
4365 return SVE_ELTTY(BFloat16Ty, 8, 2);
4366 case BuiltinType::SveBFloat16x3:
4367 return SVE_ELTTY(BFloat16Ty, 8, 3);
4368 case BuiltinType::SveBFloat16x4:
4369 return SVE_ELTTY(BFloat16Ty, 8, 4);
4370#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4371 IsSigned) \
4372 case BuiltinType::Id: \
4373 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4374 llvm::ElementCount::getScalable(NumEls), NF};
4375#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4376 case BuiltinType::Id: \
4377 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4378 llvm::ElementCount::getScalable(NumEls), NF};
4379#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4380 case BuiltinType::Id: \
4381 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4382#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4383 case BuiltinType::Id: \
4384 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4385#include "clang/Basic/RISCVVTypes.def"
4386 }
4387}
4388
4389/// getExternrefType - Return a WebAssembly externref type, which represents an
4390/// opaque reference to a host value.
4392 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) {
4393#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
4394 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
4395 return SingletonId;
4396#include "clang/Basic/WebAssemblyReferenceTypes.def"
4397 }
4398 llvm_unreachable(
4399 "shouldn't try to generate type externref outside WebAssembly target");
4400}
4401
4402/// getScalableVectorType - Return the unique reference to a scalable vector
4403/// type of the specified element type and size. VectorType must be a built-in
4404/// type.
4406 unsigned NumFields) const {
4407 if (Target->hasAArch64SVETypes()) {
4408 uint64_t EltTySize = getTypeSize(EltTy);
4409#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
4410 IsSigned, IsFP, IsBF) \
4411 if (!EltTy->isBooleanType() && \
4412 ((EltTy->hasIntegerRepresentation() && \
4413 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4414 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4415 IsFP && !IsBF) || \
4416 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4417 IsBF && !IsFP)) && \
4418 EltTySize == ElBits && NumElts == NumEls) { \
4419 return SingletonId; \
4420 }
4421#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
4422 if (EltTy->isBooleanType() && NumElts == NumEls) \
4423 return SingletonId;
4424#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId)
4425#include "clang/Basic/AArch64SVEACLETypes.def"
4426 } else if (Target->hasRISCVVTypes()) {
4427 uint64_t EltTySize = getTypeSize(EltTy);
4428#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4429 IsFP, IsBF) \
4430 if (!EltTy->isBooleanType() && \
4431 ((EltTy->hasIntegerRepresentation() && \
4432 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4433 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4434 IsFP && !IsBF) || \
4435 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4436 IsBF && !IsFP)) && \
4437 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4438 return SingletonId;
4439#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4440 if (EltTy->isBooleanType() && NumElts == NumEls) \
4441 return SingletonId;
4442#include "clang/Basic/RISCVVTypes.def"
4443 }
4444 return QualType();
4445}
4446
4447/// getVectorType - Return the unique reference to a vector type of
4448/// the specified element type and size. VectorType must be a built-in type.
4450 VectorKind VecKind) const {
4451 assert(vecType->isBuiltinType() ||
4452 (vecType->isBitIntType() &&
4453 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4454 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
4455 vecType->castAs<BitIntType>()->getNumBits() >= 8));
4456
4457 // Check if we've already instantiated a vector of this type.
4458 llvm::FoldingSetNodeID ID;
4459 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
4460
4461 void *InsertPos = nullptr;
4462 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4463 return QualType(VTP, 0);
4464
4465 // If the element type isn't canonical, this won't be a canonical type either,
4466 // so fill in the canonical type field.
4467 QualType Canonical;
4468 if (!vecType.isCanonical()) {
4469 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
4470
4471 // Get the new insert position for the node we care about.
4472 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4473 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4474 }
4475 auto *New = new (*this, alignof(VectorType))
4476 VectorType(vecType, NumElts, Canonical, VecKind);
4477 VectorTypes.InsertNode(New, InsertPos);
4478 Types.push_back(New);
4479 return QualType(New, 0);
4480}
4481
4483 SourceLocation AttrLoc,
4484 VectorKind VecKind) const {
4485 llvm::FoldingSetNodeID ID;
4486 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
4487 VecKind);
4488 void *InsertPos = nullptr;
4489 DependentVectorType *Canon =
4490 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4492
4493 if (Canon) {
4494 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4495 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4496 } else {
4497 QualType CanonVecTy = getCanonicalType(VecType);
4498 if (CanonVecTy == VecType) {
4499 New = new (*this, alignof(DependentVectorType))
4500 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4501
4502 DependentVectorType *CanonCheck =
4503 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4504 assert(!CanonCheck &&
4505 "Dependent-sized vector_size canonical type broken");
4506 (void)CanonCheck;
4507 DependentVectorTypes.InsertNode(New, InsertPos);
4508 } else {
4509 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
4510 SourceLocation(), VecKind);
4511 New = new (*this, alignof(DependentVectorType))
4512 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4513 }
4514 }
4515
4516 Types.push_back(New);
4517 return QualType(New, 0);
4518}
4519
4520/// getExtVectorType - Return the unique reference to an extended vector type of
4521/// the specified element type and size. VectorType must be a built-in type.
4523 unsigned NumElts) const {
4524 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4525 (vecType->isBitIntType() &&
4526 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4527 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
4528 vecType->castAs<BitIntType>()->getNumBits() >= 8));
4529
4530 // Check if we've already instantiated a vector of this type.
4531 llvm::FoldingSetNodeID ID;
4532 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
4534 void *InsertPos = nullptr;
4535 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4536 return QualType(VTP, 0);
4537
4538 // If the element type isn't canonical, this won't be a canonical type either,
4539 // so fill in the canonical type field.
4540 QualType Canonical;
4541 if (!vecType.isCanonical()) {
4542 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
4543
4544 // Get the new insert position for the node we care about.
4545 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4546 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4547 }
4548 auto *New = new (*this, alignof(ExtVectorType))
4549 ExtVectorType(vecType, NumElts, Canonical);
4550 VectorTypes.InsertNode(New, InsertPos);
4551 Types.push_back(New);
4552 return QualType(New, 0);
4553}
4554
4557 Expr *SizeExpr,
4558 SourceLocation AttrLoc) const {
4559 llvm::FoldingSetNodeID ID;
4561 SizeExpr);
4562
4563 void *InsertPos = nullptr;
4565 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4567 if (Canon) {
4568 // We already have a canonical version of this array type; use it as
4569 // the canonical type for a newly-built type.
4570 New = new (*this, alignof(DependentSizedExtVectorType))
4571 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4572 AttrLoc);
4573 } else {
4574 QualType CanonVecTy = getCanonicalType(vecType);
4575 if (CanonVecTy == vecType) {
4576 New = new (*this, alignof(DependentSizedExtVectorType))
4577 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4578
4579 DependentSizedExtVectorType *CanonCheck
4580 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4581 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4582 (void)CanonCheck;
4583 DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
4584 } else {
4585 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
4586 SourceLocation());
4587 New = new (*this, alignof(DependentSizedExtVectorType))
4588 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4589 }
4590 }
4591
4592 Types.push_back(New);
4593 return QualType(New, 0);
4594}
4595
4597 unsigned NumColumns) const {
4598 llvm::FoldingSetNodeID ID;
4599 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
4600 Type::ConstantMatrix);
4601
4602 assert(MatrixType::isValidElementType(ElementTy) &&
4603 "need a valid element type");
4604 assert(ConstantMatrixType::isDimensionValid(NumRows) &&
4606 "need valid matrix dimensions");
4607 void *InsertPos = nullptr;
4608 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4609 return QualType(MTP, 0);
4610
4611 QualType Canonical;
4612 if (!ElementTy.isCanonical()) {
4613 Canonical =
4614 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
4615
4616 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4617 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4618 (void)NewIP;
4619 }
4620
4621 auto *New = new (*this, alignof(ConstantMatrixType))
4622 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4623 MatrixTypes.InsertNode(New, InsertPos);
4624 Types.push_back(New);
4625 return QualType(New, 0);
4626}
4627
4629 Expr *RowExpr,
4630 Expr *ColumnExpr,
4631 SourceLocation AttrLoc) const {
4632 QualType CanonElementTy = getCanonicalType(ElementTy);
4633 llvm::FoldingSetNodeID ID;
4634 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
4635 ColumnExpr);
4636
4637 void *InsertPos = nullptr;
4639 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4640
4641 if (!Canon) {
4642 Canon = new (*this, alignof(DependentSizedMatrixType))
4643 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4644 ColumnExpr, AttrLoc);
4645#ifndef NDEBUG
4646 DependentSizedMatrixType *CanonCheck =
4647 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4648 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4649#endif
4650 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
4651 Types.push_back(Canon);
4652 }
4653
4654 // Already have a canonical version of the matrix type
4655 //
4656 // If it exactly matches the requested type, use it directly.
4657 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4658 Canon->getRowExpr() == ColumnExpr)
4659 return QualType(Canon, 0);
4660
4661 // Use Canon as the canonical type for newly-built type.
4662 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4663 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4664 ColumnExpr, AttrLoc);
4665 Types.push_back(New);
4666 return QualType(New, 0);
4667}
4668
4670 Expr *AddrSpaceExpr,
4671 SourceLocation AttrLoc) const {
4672 assert(AddrSpaceExpr->isInstantiationDependent());
4673
4674 QualType canonPointeeType = getCanonicalType(PointeeType);
4675
4676 void *insertPos = nullptr;
4677 llvm::FoldingSetNodeID ID;
4678 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
4679 AddrSpaceExpr);
4680
4681 DependentAddressSpaceType *canonTy =
4682 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
4683
4684 if (!canonTy) {
4685 canonTy = new (*this, alignof(DependentAddressSpaceType))
4686 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4687 AttrLoc);
4688 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
4689 Types.push_back(canonTy);
4690 }
4691
4692 if (canonPointeeType == PointeeType &&
4693 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4694 return QualType(canonTy, 0);
4695
4696 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4697 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4698 AddrSpaceExpr, AttrLoc);
4699 Types.push_back(sugaredType);
4700 return QualType(sugaredType, 0);
4701}
4702
4703/// Determine whether \p T is canonical as the result type of a function.
4705 return T.isCanonical() &&
4706 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4707 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4708}
4709
4710/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4713 const FunctionType::ExtInfo &Info) const {
4714 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4715 // functionality creates a function without a prototype regardless of
4716 // language mode (so it makes them even in C++). Once the rewriter has been
4717 // fixed, this assertion can be enabled again.
4718 //assert(!LangOpts.requiresStrictPrototypes() &&
4719 // "strict prototypes are disabled");
4720
4721 // Unique functions, to guarantee there is only one function of a particular
4722 // structure.
4723 llvm::FoldingSetNodeID ID;
4724 FunctionNoProtoType::Profile(ID, ResultTy, Info);
4725
4726 void *InsertPos = nullptr;
4727 if (FunctionNoProtoType *FT =
4728 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4729 return QualType(FT, 0);
4730
4731 QualType Canonical;
4732 if (!isCanonicalResultType(ResultTy)) {
4733 Canonical =
4735
4736 // Get the new insert position for the node we care about.
4737 FunctionNoProtoType *NewIP =
4738 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4739 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4740 }
4741
4742 auto *New = new (*this, alignof(FunctionNoProtoType))
4743 FunctionNoProtoType(ResultTy, Canonical, Info);
4744 Types.push_back(New);
4745 FunctionNoProtoTypes.InsertNode(New, InsertPos);
4746 return QualType(New, 0);
4747}
4748
4751 CanQualType CanResultType = getCanonicalType(ResultType);
4752
4753 // Canonical result types do not have ARC lifetime qualifiers.
4754 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4755 Qualifiers Qs = CanResultType.getQualifiers();
4756 Qs.removeObjCLifetime();
4758 getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
4759 }
4760
4761 return CanResultType;
4762}
4763
4765 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4766 if (ESI.Type == EST_None)
4767 return true;
4768 if (!NoexceptInType)
4769 return false;
4770
4771 // C++17 onwards: exception specification is part of the type, as a simple
4772 // boolean "can this function type throw".
4773 if (ESI.Type == EST_BasicNoexcept)
4774 return true;
4775
4776 // A noexcept(expr) specification is (possibly) canonical if expr is
4777 // value-dependent.
4778 if (ESI.Type == EST_DependentNoexcept)
4779 return true;
4780
4781 // A dynamic exception specification is canonical if it only contains pack
4782 // expansions (so we can't tell whether it's non-throwing) and all its
4783 // contained types are canonical.
4784 if (ESI.Type == EST_Dynamic) {
4785 bool AnyPackExpansions = false;
4786 for (QualType ET : ESI.Exceptions) {
4787 if (!ET.isCanonical())
4788 return false;
4789 if (ET->getAs<PackExpansionType>())
4790 AnyPackExpansions = true;
4791 }
4792 return AnyPackExpansions;
4793 }
4794
4795 return false;
4796}
4797
4798QualType ASTContext::getFunctionTypeInternal(
4799 QualType ResultTy, ArrayRef<QualType> ArgArray,
4800 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4801 size_t NumArgs = ArgArray.size();
4802
4803 // Unique functions, to guarantee there is only one function of a particular
4804 // structure.
4805 llvm::FoldingSetNodeID ID;
4806 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
4807 *this, true);
4808
4809 QualType Canonical;
4810 bool Unique = false;
4811
4812 void *InsertPos = nullptr;
4813 if (FunctionProtoType *FPT =
4814 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4815 QualType Existing = QualType(FPT, 0);
4816
4817 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4818 // it so long as our exception specification doesn't contain a dependent
4819 // noexcept expression, or we're just looking for a canonical type.
4820 // Otherwise, we're going to need to create a type
4821 // sugar node to hold the concrete expression.
4822 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
4823 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4824 return Existing;
4825
4826 // We need a new type sugar node for this one, to hold the new noexcept
4827 // expression. We do no canonicalization here, but that's OK since we don't
4828 // expect to see the same noexcept expression much more than once.
4829 Canonical = getCanonicalType(Existing);
4830 Unique = true;
4831 }
4832
4833 bool NoexceptInType = getLangOpts().CPlusPlus17;
4834 bool IsCanonicalExceptionSpec =
4836
4837 // Determine whether the type being created is already canonical or not.
4838 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
4839 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
4840 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
4841 if (!ArgArray[i].isCanonicalAsParam())
4842 isCanonical = false;
4843
4844 if (OnlyWantCanonical)
4845 assert(isCanonical &&
4846 "given non-canonical parameters constructing canonical type");
4847
4848 // If this type isn't canonical, get the canonical version of it if we don't
4849 // already have it. The exception spec is only partially part of the
4850 // canonical type, and only in C++17 onwards.
4851 if (!isCanonical && Canonical.isNull()) {
4852 SmallVector<QualType, 16> CanonicalArgs;
4853 CanonicalArgs.reserve(NumArgs);
4854 for (unsigned i = 0; i != NumArgs; ++i)
4855 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
4856
4857 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
4858 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
4859 CanonicalEPI.HasTrailingReturn = false;
4860
4861 if (IsCanonicalExceptionSpec) {
4862 // Exception spec is already OK.
4863 } else if (NoexceptInType) {
4864 switch (EPI.ExceptionSpec.Type) {
4866 // We don't know yet. It shouldn't matter what we pick here; no-one
4867 // should ever look at this.
4868 [[fallthrough]];
4869 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
4870 CanonicalEPI.ExceptionSpec.Type = EST_None;
4871 break;
4872
4873 // A dynamic exception specification is almost always "not noexcept",
4874 // with the exception that a pack expansion might expand to no types.
4875 case EST_Dynamic: {
4876 bool AnyPacks = false;
4877 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
4878 if (ET->getAs<PackExpansionType>())
4879 AnyPacks = true;
4880 ExceptionTypeStorage.push_back(getCanonicalType(ET));
4881 }
4882 if (!AnyPacks)
4883 CanonicalEPI.ExceptionSpec.Type = EST_None;
4884 else {
4885 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
4886 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
4887 }
4888 break;
4889 }
4890
4891 case EST_DynamicNone:
4892 case EST_BasicNoexcept:
4893 case EST_NoexceptTrue:
4894 case EST_NoThrow:
4895 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
4896 break;
4897
4899 llvm_unreachable("dependent noexcept is already canonical");
4900 }
4901 } else {
4903 }
4904
4905 // Adjust the canonical function result type.
4906 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
4907 Canonical =
4908 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
4909
4910 // Get the new insert position for the node we care about.
4911 FunctionProtoType *NewIP =
4912 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4913 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4914 }
4915
4916 // Compute the needed size to hold this FunctionProtoType and the
4917 // various trailing objects.
4918 auto ESH = FunctionProtoType::getExceptionSpecSize(
4919 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
4920 size_t Size = FunctionProtoType::totalSizeToAlloc<
4926 EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType,
4927 ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
4928 EPI.ExtParameterInfos ? NumArgs : 0,
4930 EPI.FunctionEffects.conditions().size());
4931
4932 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType));
4934 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
4935 Types.push_back(FTP);
4936 if (!Unique)
4937 FunctionProtoTypes.InsertNode(FTP, InsertPos);
4938 if (!EPI.FunctionEffects.empty())
4939 AnyFunctionEffects = true;
4940 return QualType(FTP, 0);
4941}
4942
4943QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
4944 llvm::FoldingSetNodeID ID;
4945 PipeType::Profile(ID, T, ReadOnly);
4946
4947 void *InsertPos = nullptr;
4948 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
4949 return QualType(PT, 0);
4950
4951 // If the pipe element type isn't canonical, this won't be a canonical type
4952 // either, so fill in the canonical type field.
4953 QualType Canonical;
4954 if (!T.isCanonical()) {
4955 Canonical = getPipeType(getCanonicalType(T), ReadOnly);
4956
4957 // Get the new insert position for the node we care about.
4958 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
4959 assert(!NewIP && "Shouldn't be in the map!");
4960 (void)NewIP;
4961 }
4962 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
4963 Types.push_back(New);
4964 PipeTypes.InsertNode(New, InsertPos);
4965 return QualType(New, 0);
4966}
4967
4969 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
4970 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
4971 : Ty;
4972}
4973
4975 return getPipeType(T, true);
4976}
4977
4979 return getPipeType(T, false);
4980}
4981
4982QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
4983 llvm::FoldingSetNodeID ID;
4984 BitIntType::Profile(ID, IsUnsigned, NumBits);
4985
4986 void *InsertPos = nullptr;
4987 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4988 return QualType(EIT, 0);
4989
4990 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
4991 BitIntTypes.InsertNode(New, InsertPos);
4992 Types.push_back(New);
4993 return QualType(New, 0);
4994}
4995
4997 Expr *NumBitsExpr) const {
4998 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
4999 llvm::FoldingSetNodeID ID;
5000 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
5001
5002 void *InsertPos = nullptr;
5003 if (DependentBitIntType *Existing =
5004 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5005 return QualType(Existing, 0);
5006
5007 auto *New = new (*this, alignof(DependentBitIntType))
5008 DependentBitIntType(IsUnsigned, NumBitsExpr);
5009 DependentBitIntTypes.InsertNode(New, InsertPos);
5010
5011 Types.push_back(New);
5012 return QualType(New, 0);
5013}
5014
5015#ifndef NDEBUG
5017 if (!isa<CXXRecordDecl>(D)) return false;
5018 const auto *RD = cast<CXXRecordDecl>(D);
5019 if (isa<ClassTemplatePartialSpecializationDecl>(RD))
5020 return true;
5021 if (RD->getDescribedClassTemplate() &&
5022 !isa<ClassTemplateSpecializationDecl>(RD))
5023 return true;
5024 return false;
5025}
5026#endif
5027
5028/// getInjectedClassNameType - Return the unique reference to the
5029/// injected class name type for the specified templated declaration.
5031 QualType TST) const {
5033 if (Decl->TypeForDecl) {
5034 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
5035 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
5036 assert(PrevDecl->TypeForDecl && "previous declaration has no type");
5037 Decl->TypeForDecl = PrevDecl->TypeForDecl;
5038 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
5039 } else {
5040 Type *newType = new (*this, alignof(InjectedClassNameType))
5042 Decl->TypeForDecl = newType;
5043 Types.push_back(newType);
5044 }
5045 return QualType(Decl->TypeForDecl, 0);
5046}
5047
5048/// getTypeDeclType - Return the unique reference to the type for the
5049/// specified type declaration.