clang 20.0.0git
SemaExprCXX.cpp
Go to the documentation of this file.
1//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// Implements semantic analysis for C++ expressions.
11///
12//===----------------------------------------------------------------------===//
13
14#include "TreeTransform.h"
15#include "TypeLocBuilder.h"
17#include "clang/AST/ASTLambda.h"
19#include "clang/AST/CharUnits.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/ExprCXX.h"
23#include "clang/AST/ExprObjC.h"
25#include "clang/AST/Type.h"
26#include "clang/AST/TypeLoc.h"
34#include "clang/Sema/DeclSpec.h"
37#include "clang/Sema/Lookup.h"
39#include "clang/Sema/Scope.h"
41#include "clang/Sema/SemaCUDA.h"
44#include "clang/Sema/SemaObjC.h"
45#include "clang/Sema/SemaPPC.h"
46#include "clang/Sema/Template.h"
48#include "llvm/ADT/APInt.h"
49#include "llvm/ADT/STLExtras.h"
50#include "llvm/ADT/STLForwardCompat.h"
51#include "llvm/ADT/StringExtras.h"
52#include "llvm/Support/ErrorHandling.h"
53#include "llvm/Support/TypeSize.h"
54#include <optional>
55using namespace clang;
56using namespace sema;
57
59 SourceLocation NameLoc,
60 const IdentifierInfo &Name) {
62
63 // Convert the nested-name-specifier into a type.
65 switch (NNS->getKind()) {
68 Type = QualType(NNS->getAsType(), 0);
69 break;
70
72 // Strip off the last layer of the nested-name-specifier and build a
73 // typename type for it.
74 assert(NNS->getAsIdentifier() == &Name && "not a constructor name");
77 break;
78
83 llvm_unreachable("Nested name specifier is not a type for inheriting ctor");
84 }
85
86 // This reference to the type is located entirely at the location of the
87 // final identifier in the qualified-id.
90}
91
93 SourceLocation NameLoc, Scope *S,
94 CXXScopeSpec &SS, bool EnteringContext) {
95 CXXRecordDecl *CurClass = getCurrentClass(S, &SS);
96 assert(CurClass && &II == CurClass->getIdentifier() &&
97 "not a constructor name");
98
99 // When naming a constructor as a member of a dependent context (eg, in a
100 // friend declaration or an inherited constructor declaration), form an
101 // unresolved "typename" type.
102 if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
104 SS.getScopeRep(), &II);
105 return ParsedType::make(T);
106 }
107
108 if (SS.isNotEmpty() && RequireCompleteDeclContext(SS, CurClass))
109 return ParsedType();
110
111 // Find the injected-class-name declaration. Note that we make no attempt to
112 // diagnose cases where the injected-class-name is shadowed: the only
113 // declaration that can validly shadow the injected-class-name is a
114 // non-static data member, and if the class contains both a non-static data
115 // member and a constructor then it is ill-formed (we check that in
116 // CheckCompletedCXXClass).
117 CXXRecordDecl *InjectedClassName = nullptr;
118 for (NamedDecl *ND : CurClass->lookup(&II)) {
119 auto *RD = dyn_cast<CXXRecordDecl>(ND);
120 if (RD && RD->isInjectedClassName()) {
121 InjectedClassName = RD;
122 break;
123 }
124 }
125 if (!InjectedClassName) {
126 if (!CurClass->isInvalidDecl()) {
127 // FIXME: RequireCompleteDeclContext doesn't check dependent contexts
128 // properly. Work around it here for now.
130 diag::err_incomplete_nested_name_spec) << CurClass << SS.getRange();
131 }
132 return ParsedType();
133 }
134
135 QualType T = Context.getTypeDeclType(InjectedClassName);
136 DiagnoseUseOfDecl(InjectedClassName, NameLoc);
137 MarkAnyDeclReferenced(NameLoc, InjectedClassName, /*OdrUse=*/false);
138
139 return ParsedType::make(T);
140}
141
143 SourceLocation NameLoc, Scope *S,
144 CXXScopeSpec &SS, ParsedType ObjectTypePtr,
145 bool EnteringContext) {
146 // Determine where to perform name lookup.
147
148 // FIXME: This area of the standard is very messy, and the current
149 // wording is rather unclear about which scopes we search for the
150 // destructor name; see core issues 399 and 555. Issue 399 in
151 // particular shows where the current description of destructor name
152 // lookup is completely out of line with existing practice, e.g.,
153 // this appears to be ill-formed:
154 //
155 // namespace N {
156 // template <typename T> struct S {
157 // ~S();
158 // };
159 // }
160 //
161 // void f(N::S<int>* s) {
162 // s->N::S<int>::~S();
163 // }
164 //
165 // See also PR6358 and PR6359.
166 //
167 // For now, we accept all the cases in which the name given could plausibly
168 // be interpreted as a correct destructor name, issuing off-by-default
169 // extension diagnostics on the cases that don't strictly conform to the
170 // C++20 rules. This basically means we always consider looking in the
171 // nested-name-specifier prefix, the complete nested-name-specifier, and
172 // the scope, and accept if we find the expected type in any of the three
173 // places.
174
175 if (SS.isInvalid())
176 return nullptr;
177
178 // Whether we've failed with a diagnostic already.
179 bool Failed = false;
180
183
184 // If we have an object type, it's because we are in a
185 // pseudo-destructor-expression or a member access expression, and
186 // we know what type we're looking for.
187 QualType SearchType =
188 ObjectTypePtr ? GetTypeFromParser(ObjectTypePtr) : QualType();
189
190 auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
191 auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
192 auto *Type = dyn_cast<TypeDecl>(D->getUnderlyingDecl());
193 if (!Type)
194 return false;
195
196 if (SearchType.isNull() || SearchType->isDependentType())
197 return true;
198
200 return Context.hasSameUnqualifiedType(T, SearchType);
201 };
202
203 unsigned NumAcceptableResults = 0;
204 for (NamedDecl *D : Found) {
205 if (IsAcceptableResult(D))
206 ++NumAcceptableResults;
207
208 // Don't list a class twice in the lookup failure diagnostic if it's
209 // found by both its injected-class-name and by the name in the enclosing
210 // scope.
211 if (auto *RD = dyn_cast<CXXRecordDecl>(D))
212 if (RD->isInjectedClassName())
213 D = cast<NamedDecl>(RD->getParent());
214
215 if (FoundDeclSet.insert(D).second)
216 FoundDecls.push_back(D);
217 }
218
219 // As an extension, attempt to "fix" an ambiguity by erasing all non-type
220 // results, and all non-matching results if we have a search type. It's not
221 // clear what the right behavior is if destructor lookup hits an ambiguity,
222 // but other compilers do generally accept at least some kinds of
223 // ambiguity.
224 if (Found.isAmbiguous() && NumAcceptableResults == 1) {
225 Diag(NameLoc, diag::ext_dtor_name_ambiguous);
226 LookupResult::Filter F = Found.makeFilter();
227 while (F.hasNext()) {
228 NamedDecl *D = F.next();
229 if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
230 Diag(D->getLocation(), diag::note_destructor_type_here)
232 else
233 Diag(D->getLocation(), diag::note_destructor_nontype_here);
234
235 if (!IsAcceptableResult(D))
236 F.erase();
237 }
238 F.done();
239 }
240
241 if (Found.isAmbiguous())
242 Failed = true;
243
244 if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
245 if (IsAcceptableResult(Type)) {
247 MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
248 return CreateParsedType(
251 }
252 }
253
254 return nullptr;
255 };
256
257 bool IsDependent = false;
258
259 auto LookupInObjectType = [&]() -> ParsedType {
260 if (Failed || SearchType.isNull())
261 return nullptr;
262
263 IsDependent |= SearchType->isDependentType();
264
265 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
266 DeclContext *LookupCtx = computeDeclContext(SearchType);
267 if (!LookupCtx)
268 return nullptr;
269 LookupQualifiedName(Found, LookupCtx);
270 return CheckLookupResult(Found);
271 };
272
273 auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
274 if (Failed)
275 return nullptr;
276
277 IsDependent |= isDependentScopeSpecifier(LookupSS);
278 DeclContext *LookupCtx = computeDeclContext(LookupSS, EnteringContext);
279 if (!LookupCtx)
280 return nullptr;
281
282 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
283 if (RequireCompleteDeclContext(LookupSS, LookupCtx)) {
284 Failed = true;
285 return nullptr;
286 }
287 LookupQualifiedName(Found, LookupCtx);
288 return CheckLookupResult(Found);
289 };
290
291 auto LookupInScope = [&]() -> ParsedType {
292 if (Failed || !S)
293 return nullptr;
294
295 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
296 LookupName(Found, S);
297 return CheckLookupResult(Found);
298 };
299
300 // C++2a [basic.lookup.qual]p6:
301 // In a qualified-id of the form
302 //
303 // nested-name-specifier[opt] type-name :: ~ type-name
304 //
305 // the second type-name is looked up in the same scope as the first.
306 //
307 // We interpret this as meaning that if you do a dual-scope lookup for the
308 // first name, you also do a dual-scope lookup for the second name, per
309 // C++ [basic.lookup.classref]p4:
310 //
311 // If the id-expression in a class member access is a qualified-id of the
312 // form
313 //
314 // class-name-or-namespace-name :: ...
315 //
316 // the class-name-or-namespace-name following the . or -> is first looked
317 // up in the class of the object expression and the name, if found, is used.
318 // Otherwise, it is looked up in the context of the entire
319 // postfix-expression.
320 //
321 // This looks in the same scopes as for an unqualified destructor name:
322 //
323 // C++ [basic.lookup.classref]p3:
324 // If the unqualified-id is ~ type-name, the type-name is looked up
325 // in the context of the entire postfix-expression. If the type T
326 // of the object expression is of a class type C, the type-name is
327 // also looked up in the scope of class C. At least one of the
328 // lookups shall find a name that refers to cv T.
329 //
330 // FIXME: The intent is unclear here. Should type-name::~type-name look in
331 // the scope anyway if it finds a non-matching name declared in the class?
332 // If both lookups succeed and find a dependent result, which result should
333 // we retain? (Same question for p->~type-name().)
334
335 if (NestedNameSpecifier *Prefix =
336 SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
337 // This is
338 //
339 // nested-name-specifier type-name :: ~ type-name
340 //
341 // Look for the second type-name in the nested-name-specifier.
342 CXXScopeSpec PrefixSS;
343 PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
344 if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
345 return T;
346 } else {
347 // This is one of
348 //
349 // type-name :: ~ type-name
350 // ~ type-name
351 //
352 // Look in the scope and (if any) the object type.
353 if (ParsedType T = LookupInScope())
354 return T;
355 if (ParsedType T = LookupInObjectType())
356 return T;
357 }
358
359 if (Failed)
360 return nullptr;
361
362 if (IsDependent) {
363 // We didn't find our type, but that's OK: it's dependent anyway.
364
365 // FIXME: What if we have no nested-name-specifier?
366 QualType T =
368 SS.getWithLocInContext(Context), II, NameLoc);
369 return ParsedType::make(T);
370 }
371
372 // The remaining cases are all non-standard extensions imitating the behavior
373 // of various other compilers.
374 unsigned NumNonExtensionDecls = FoundDecls.size();
375
376 if (SS.isSet()) {
377 // For compatibility with older broken C++ rules and existing code,
378 //
379 // nested-name-specifier :: ~ type-name
380 //
381 // also looks for type-name within the nested-name-specifier.
382 if (ParsedType T = LookupInNestedNameSpec(SS)) {
383 Diag(SS.getEndLoc(), diag::ext_dtor_named_in_wrong_scope)
384 << SS.getRange()
386 ("::" + II.getName()).str());
387 return T;
388 }
389
390 // For compatibility with other compilers and older versions of Clang,
391 //
392 // nested-name-specifier type-name :: ~ type-name
393 //
394 // also looks for type-name in the scope. Unfortunately, we can't
395 // reasonably apply this fallback for dependent nested-name-specifiers.
396 if (SS.isValid() && SS.getScopeRep()->getPrefix()) {
397 if (ParsedType T = LookupInScope()) {
398 Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
400 Diag(FoundDecls.back()->getLocation(), diag::note_destructor_type_here)
402 return T;
403 }
404 }
405 }
406
407 // We didn't find anything matching; tell the user what we did find (if
408 // anything).
409
410 // Don't tell the user about declarations we shouldn't have found.
411 FoundDecls.resize(NumNonExtensionDecls);
412
413 // List types before non-types.
414 std::stable_sort(FoundDecls.begin(), FoundDecls.end(),
415 [](NamedDecl *A, NamedDecl *B) {
416 return isa<TypeDecl>(A->getUnderlyingDecl()) >
417 isa<TypeDecl>(B->getUnderlyingDecl());
418 });
419
420 // Suggest a fixit to properly name the destroyed type.
421 auto MakeFixItHint = [&]{
422 const CXXRecordDecl *Destroyed = nullptr;
423 // FIXME: If we have a scope specifier, suggest its last component?
424 if (!SearchType.isNull())
425 Destroyed = SearchType->getAsCXXRecordDecl();
426 else if (S)
427 Destroyed = dyn_cast_or_null<CXXRecordDecl>(S->getEntity());
428 if (Destroyed)
430 Destroyed->getNameAsString());
431 return FixItHint();
432 };
433
434 if (FoundDecls.empty()) {
435 // FIXME: Attempt typo-correction?
436 Diag(NameLoc, diag::err_undeclared_destructor_name)
437 << &II << MakeFixItHint();
438 } else if (!SearchType.isNull() && FoundDecls.size() == 1) {
439 if (auto *TD = dyn_cast<TypeDecl>(FoundDecls[0]->getUnderlyingDecl())) {
440 assert(!SearchType.isNull() &&
441 "should only reject a type result if we have a search type");
443 Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
444 << T << SearchType << MakeFixItHint();
445 } else {
446 Diag(NameLoc, diag::err_destructor_expr_nontype)
447 << &II << MakeFixItHint();
448 }
449 } else {
450 Diag(NameLoc, SearchType.isNull() ? diag::err_destructor_name_nontype
451 : diag::err_destructor_expr_mismatch)
452 << &II << SearchType << MakeFixItHint();
453 }
454
455 for (NamedDecl *FoundD : FoundDecls) {
456 if (auto *TD = dyn_cast<TypeDecl>(FoundD->getUnderlyingDecl()))
457 Diag(FoundD->getLocation(), diag::note_destructor_type_here)
459 else
460 Diag(FoundD->getLocation(), diag::note_destructor_nontype_here)
461 << FoundD;
462 }
463
464 return nullptr;
465}
466
468 ParsedType ObjectType) {
470 return nullptr;
471
473 Diag(DS.getTypeSpecTypeLoc(), diag::err_decltype_auto_invalid);
474 return nullptr;
475 }
476
478 "unexpected type in getDestructorType");
480
481 // If we know the type of the object, check that the correct destructor
482 // type was named now; we can give better diagnostics this way.
483 QualType SearchType = GetTypeFromParser(ObjectType);
484 if (!SearchType.isNull() && !SearchType->isDependentType() &&
485 !Context.hasSameUnqualifiedType(T, SearchType)) {
486 Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch)
487 << T << SearchType;
488 return nullptr;
489 }
490
491 return ParsedType::make(T);
492}
493
495 const UnqualifiedId &Name, bool IsUDSuffix) {
496 assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
497 if (!IsUDSuffix) {
498 // [over.literal] p8
499 //
500 // double operator""_Bq(long double); // OK: not a reserved identifier
501 // double operator"" _Bq(long double); // ill-formed, no diagnostic required
502 const IdentifierInfo *II = Name.Identifier;
504 SourceLocation Loc = Name.getEndLoc();
506 if (auto Hint = FixItHint::CreateReplacement(
507 Name.getSourceRange(),
508 (StringRef("operator\"\"") + II->getName()).str());
509 isReservedInAllContexts(Status)) {
510 Diag(Loc, diag::warn_reserved_extern_symbol)
511 << II << static_cast<int>(Status) << Hint;
512 } else {
513 Diag(Loc, diag::warn_deprecated_literal_operator_id) << II << Hint;
514 }
515 }
516 }
517
518 if (!SS.isValid())
519 return false;
520
521 switch (SS.getScopeRep()->getKind()) {
525 // Per C++11 [over.literal]p2, literal operators can only be declared at
526 // namespace scope. Therefore, this unqualified-id cannot name anything.
527 // Reject it early, because we have no AST representation for this in the
528 // case where the scope is dependent.
529 Diag(Name.getBeginLoc(), diag::err_literal_operator_id_outside_namespace)
530 << SS.getScopeRep();
531 return true;
532
537 return false;
538 }
539
540 llvm_unreachable("unknown nested name specifier kind");
541}
542
544 SourceLocation TypeidLoc,
545 TypeSourceInfo *Operand,
546 SourceLocation RParenLoc) {
547 // C++ [expr.typeid]p4:
548 // The top-level cv-qualifiers of the lvalue expression or the type-id
549 // that is the operand of typeid are always ignored.
550 // If the type of the type-id is a class type or a reference to a class
551 // type, the class shall be completely-defined.
552 Qualifiers Quals;
553 QualType T
554 = Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(),
555 Quals);
556 if (T->getAs<RecordType>() &&
557 RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
558 return ExprError();
559
561 return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << T);
562
563 if (CheckQualifiedFunctionForTypeId(T, TypeidLoc))
564 return ExprError();
565
566 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
567 SourceRange(TypeidLoc, RParenLoc));
568}
569
571 SourceLocation TypeidLoc,
572 Expr *E,
573 SourceLocation RParenLoc) {
574 bool WasEvaluated = false;
575 if (E && !E->isTypeDependent()) {
576 if (E->hasPlaceholderType()) {
578 if (result.isInvalid()) return ExprError();
579 E = result.get();
580 }
581
582 QualType T = E->getType();
583 if (const RecordType *RecordT = T->getAs<RecordType>()) {
584 CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getDecl());
585 // C++ [expr.typeid]p3:
586 // [...] If the type of the expression is a class type, the class
587 // shall be completely-defined.
588 if (RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
589 return ExprError();
590
591 // C++ [expr.typeid]p3:
592 // When typeid is applied to an expression other than an glvalue of a
593 // polymorphic class type [...] [the] expression is an unevaluated
594 // operand. [...]
595 if (RecordD->isPolymorphic() && E->isGLValue()) {
596 if (isUnevaluatedContext()) {
597 // The operand was processed in unevaluated context, switch the
598 // context and recheck the subexpression.
600 if (Result.isInvalid())
601 return ExprError();
602 E = Result.get();
603 }
604
605 // We require a vtable to query the type at run time.
606 MarkVTableUsed(TypeidLoc, RecordD);
607 WasEvaluated = true;
608 }
609 }
610
612 if (Result.isInvalid())
613 return ExprError();
614 E = Result.get();
615
616 // C++ [expr.typeid]p4:
617 // [...] If the type of the type-id is a reference to a possibly
618 // cv-qualified type, the result of the typeid expression refers to a
619 // std::type_info object representing the cv-unqualified referenced
620 // type.
621 Qualifiers Quals;
622 QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
623 if (!Context.hasSameType(T, UnqualT)) {
624 T = UnqualT;
625 E = ImpCastExprToType(E, UnqualT, CK_NoOp, E->getValueKind()).get();
626 }
627 }
628
630 return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid)
631 << E->getType());
632 else if (!inTemplateInstantiation() &&
633 E->HasSideEffects(Context, WasEvaluated)) {
634 // The expression operand for typeid is in an unevaluated expression
635 // context, so side effects could result in unintended consequences.
636 Diag(E->getExprLoc(), WasEvaluated
637 ? diag::warn_side_effects_typeid
638 : diag::warn_side_effects_unevaluated_context);
639 }
640
641 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
642 SourceRange(TypeidLoc, RParenLoc));
643}
644
645/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
648 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
649 // typeid is not supported in OpenCL.
650 if (getLangOpts().OpenCLCPlusPlus) {
651 return ExprError(Diag(OpLoc, diag::err_openclcxx_not_supported)
652 << "typeid");
653 }
654
655 // Find the std::type_info type.
656 if (!getStdNamespace())
657 return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
658
659 if (!CXXTypeInfoDecl) {
660 IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info");
661 LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
664 // Microsoft's typeinfo doesn't have type_info in std but in the global
665 // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
666 if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
669 }
670 if (!CXXTypeInfoDecl)
671 return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
672 }
673
674 if (!getLangOpts().RTTI) {
675 return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti));
676 }
677
679
680 if (isType) {
681 // The operand is a type; handle it as such.
682 TypeSourceInfo *TInfo = nullptr;
684 &TInfo);
685 if (T.isNull())
686 return ExprError();
687
688 if (!TInfo)
689 TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
690
691 return BuildCXXTypeId(TypeInfoType, OpLoc, TInfo, RParenLoc);
692 }
693
694 // The operand is an expression.
696 BuildCXXTypeId(TypeInfoType, OpLoc, (Expr *)TyOrExpr, RParenLoc);
697
698 if (!getLangOpts().RTTIData && !Result.isInvalid())
699 if (auto *CTE = dyn_cast<CXXTypeidExpr>(Result.get()))
700 if (CTE->isPotentiallyEvaluated() && !CTE->isMostDerived(Context))
701 Diag(OpLoc, diag::warn_no_typeid_with_rtti_disabled)
702 << (getDiagnostics().getDiagnosticOptions().getFormat() ==
704 return Result;
705}
706
707/// Grabs __declspec(uuid()) off a type, or returns 0 if we cannot resolve to
708/// a single GUID.
709static void
712 // Optionally remove one level of pointer, reference or array indirection.
713 const Type *Ty = QT.getTypePtr();
714 if (QT->isPointerOrReferenceType())
715 Ty = QT->getPointeeType().getTypePtr();
716 else if (QT->isArrayType())
717 Ty = Ty->getBaseElementTypeUnsafe();
718
719 const auto *TD = Ty->getAsTagDecl();
720 if (!TD)
721 return;
722
723 if (const auto *Uuid = TD->getMostRecentDecl()->getAttr<UuidAttr>()) {
724 UuidAttrs.insert(Uuid);
725 return;
726 }
727
728 // __uuidof can grab UUIDs from template arguments.
729 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(TD)) {
730 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
731 for (const TemplateArgument &TA : TAL.asArray()) {
732 const UuidAttr *UuidForTA = nullptr;
733 if (TA.getKind() == TemplateArgument::Type)
734 getUuidAttrOfType(SemaRef, TA.getAsType(), UuidAttrs);
735 else if (TA.getKind() == TemplateArgument::Declaration)
736 getUuidAttrOfType(SemaRef, TA.getAsDecl()->getType(), UuidAttrs);
737
738 if (UuidForTA)
739 UuidAttrs.insert(UuidForTA);
740 }
741 }
742}
743
745 SourceLocation TypeidLoc,
746 TypeSourceInfo *Operand,
747 SourceLocation RParenLoc) {
748 MSGuidDecl *Guid = nullptr;
749 if (!Operand->getType()->isDependentType()) {
751 getUuidAttrOfType(*this, Operand->getType(), UuidAttrs);
752 if (UuidAttrs.empty())
753 return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
754 if (UuidAttrs.size() > 1)
755 return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
756 Guid = UuidAttrs.back()->getGuidDecl();
757 }
758
759 return new (Context)
760 CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
761}
762
764 Expr *E, SourceLocation RParenLoc) {
765 MSGuidDecl *Guid = nullptr;
766 if (!E->getType()->isDependentType()) {
768 // A null pointer results in {00000000-0000-0000-0000-000000000000}.
770 } else {
772 getUuidAttrOfType(*this, E->getType(), UuidAttrs);
773 if (UuidAttrs.empty())
774 return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
775 if (UuidAttrs.size() > 1)
776 return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
777 Guid = UuidAttrs.back()->getGuidDecl();
778 }
779 }
780
781 return new (Context)
782 CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
783}
784
785/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
788 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
789 QualType GuidType = Context.getMSGuidType();
790 GuidType.addConst();
791
792 if (isType) {
793 // The operand is a type; handle it as such.
794 TypeSourceInfo *TInfo = nullptr;
796 &TInfo);
797 if (T.isNull())
798 return ExprError();
799
800 if (!TInfo)
801 TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
802
803 return BuildCXXUuidof(GuidType, OpLoc, TInfo, RParenLoc);
804 }
805
806 // The operand is an expression.
807 return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
808}
809
812 assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
813 "Unknown C++ Boolean value!");
814 return new (Context)
815 CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
816}
817
821}
822
825 bool IsThrownVarInScope = false;
826 if (Ex) {
827 // C++0x [class.copymove]p31:
828 // When certain criteria are met, an implementation is allowed to omit the
829 // copy/move construction of a class object [...]
830 //
831 // - in a throw-expression, when the operand is the name of a
832 // non-volatile automatic object (other than a function or catch-
833 // clause parameter) whose scope does not extend beyond the end of the
834 // innermost enclosing try-block (if there is one), the copy/move
835 // operation from the operand to the exception object (15.1) can be
836 // omitted by constructing the automatic object directly into the
837 // exception object
838 if (const auto *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
839 if (const auto *Var = dyn_cast<VarDecl>(DRE->getDecl());
840 Var && Var->hasLocalStorage() &&
841 !Var->getType().isVolatileQualified()) {
842 for (; S; S = S->getParent()) {
843 if (S->isDeclScope(Var)) {
844 IsThrownVarInScope = true;
845 break;
846 }
847
848 // FIXME: Many of the scope checks here seem incorrect.
849 if (S->getFlags() &
852 break;
853 }
854 }
855 }
856
857 return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
858}
859
861 bool IsThrownVarInScope) {
862 const llvm::Triple &T = Context.getTargetInfo().getTriple();
863 const bool IsOpenMPGPUTarget =
864 getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN());
865 // Don't report an error if 'throw' is used in system headers or in an OpenMP
866 // target region compiled for a GPU architecture.
867 if (!IsOpenMPGPUTarget && !getLangOpts().CXXExceptions &&
868 !getSourceManager().isInSystemHeader(OpLoc) && !getLangOpts().CUDA) {
869 // Delay error emission for the OpenMP device code.
870 targetDiag(OpLoc, diag::err_exceptions_disabled) << "throw";
871 }
872
873 // In OpenMP target regions, we replace 'throw' with a trap on GPU targets.
874 if (IsOpenMPGPUTarget)
875 targetDiag(OpLoc, diag::warn_throw_not_valid_on_target) << T.str();
876
877 // Exceptions aren't allowed in CUDA device code.
878 if (getLangOpts().CUDA)
879 CUDA().DiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions)
880 << "throw" << llvm::to_underlying(CUDA().CurrentTarget());
881
882 if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
883 Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw";
884
885 // Exceptions that escape a compute construct are ill-formed.
886 if (getLangOpts().OpenACC && getCurScope() &&
887 getCurScope()->isInOpenACCComputeConstructScope(Scope::TryScope))
888 Diag(OpLoc, diag::err_acc_branch_in_out_compute_construct)
889 << /*throw*/ 2 << /*out of*/ 0;
890
891 if (Ex && !Ex->isTypeDependent()) {
892 // Initialize the exception result. This implicitly weeds out
893 // abstract types or types with inaccessible copy constructors.
894
895 // C++0x [class.copymove]p31:
896 // When certain criteria are met, an implementation is allowed to omit the
897 // copy/move construction of a class object [...]
898 //
899 // - in a throw-expression, when the operand is the name of a
900 // non-volatile automatic object (other than a function or
901 // catch-clause
902 // parameter) whose scope does not extend beyond the end of the
903 // innermost enclosing try-block (if there is one), the copy/move
904 // operation from the operand to the exception object (15.1) can be
905 // omitted by constructing the automatic object directly into the
906 // exception object
907 NamedReturnInfo NRInfo =
908 IsThrownVarInScope ? getNamedReturnInfo(Ex) : NamedReturnInfo();
909
910 QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType());
911 if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex))
912 return ExprError();
913
914 InitializedEntity Entity =
915 InitializedEntity::InitializeException(OpLoc, ExceptionObjectTy);
916 ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRInfo, Ex);
917 if (Res.isInvalid())
918 return ExprError();
919 Ex = Res.get();
920 }
921
922 // PPC MMA non-pointer types are not allowed as throw expr types.
923 if (Ex && Context.getTargetInfo().getTriple().isPPC64())
924 PPC().CheckPPCMMAType(Ex->getType(), Ex->getBeginLoc());
925
926 return new (Context)
927 CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
928}
929
930static void
932 llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen,
933 llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases,
934 llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen,
935 bool ParentIsPublic) {
936 for (const CXXBaseSpecifier &BS : RD->bases()) {
937 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
938 bool NewSubobject;
939 // Virtual bases constitute the same subobject. Non-virtual bases are
940 // always distinct subobjects.
941 if (BS.isVirtual())
942 NewSubobject = VBases.insert(BaseDecl).second;
943 else
944 NewSubobject = true;
945
946 if (NewSubobject)
947 ++SubobjectsSeen[BaseDecl];
948
949 // Only add subobjects which have public access throughout the entire chain.
950 bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public;
951 if (PublicPath)
952 PublicSubobjectsSeen.insert(BaseDecl);
953
954 // Recurse on to each base subobject.
955 collectPublicBases(BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen,
956 PublicPath);
957 }
958}
959
962 llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
963 llvm::SmallSet<CXXRecordDecl *, 2> VBases;
964 llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
965 SubobjectsSeen[RD] = 1;
966 PublicSubobjectsSeen.insert(RD);
967 collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen,
968 /*ParentIsPublic=*/true);
969
970 for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) {
971 // Skip ambiguous objects.
972 if (SubobjectsSeen[PublicSubobject] > 1)
973 continue;
974
975 Objects.push_back(PublicSubobject);
976 }
977}
978
980 QualType ExceptionObjectTy, Expr *E) {
981 // If the type of the exception would be an incomplete type or a pointer
982 // to an incomplete type other than (cv) void the program is ill-formed.
983 QualType Ty = ExceptionObjectTy;
984 bool isPointer = false;
985 if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
986 Ty = Ptr->getPointeeType();
987 isPointer = true;
988 }
989
990 // Cannot throw WebAssembly reference type.
992 Diag(ThrowLoc, diag::err_wasm_reftype_tc) << 0 << E->getSourceRange();
993 return true;
994 }
995
996 // Cannot throw WebAssembly table.
997 if (isPointer && Ty.isWebAssemblyReferenceType()) {
998 Diag(ThrowLoc, diag::err_wasm_table_art) << 2 << E->getSourceRange();
999 return true;
1000 }
1001
1002 if (!isPointer || !Ty->isVoidType()) {
1003 if (RequireCompleteType(ThrowLoc, Ty,
1004 isPointer ? diag::err_throw_incomplete_ptr
1005 : diag::err_throw_incomplete,
1006 E->getSourceRange()))
1007 return true;
1008
1009 if (!isPointer && Ty->isSizelessType()) {
1010 Diag(ThrowLoc, diag::err_throw_sizeless) << Ty << E->getSourceRange();
1011 return true;
1012 }
1013
1014 if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy,
1015 diag::err_throw_abstract_type, E))
1016 return true;
1017 }
1018
1019 // If the exception has class type, we need additional handling.
1021 if (!RD)
1022 return false;
1023
1024 // If we are throwing a polymorphic class type or pointer thereof,
1025 // exception handling will make use of the vtable.
1026 MarkVTableUsed(ThrowLoc, RD);
1027
1028 // If a pointer is thrown, the referenced object will not be destroyed.
1029 if (isPointer)
1030 return false;
1031
1032 // If the class has a destructor, we must be able to call it.
1033 if (!RD->hasIrrelevantDestructor()) {
1037 PDiag(diag::err_access_dtor_exception) << Ty);
1039 return true;
1040 }
1041 }
1042
1043 // The MSVC ABI creates a list of all types which can catch the exception
1044 // object. This list also references the appropriate copy constructor to call
1045 // if the object is caught by value and has a non-trivial copy constructor.
1047 // We are only interested in the public, unambiguous bases contained within
1048 // the exception object. Bases which are ambiguous or otherwise
1049 // inaccessible are not catchable types.
1050 llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects;
1051 getUnambiguousPublicSubobjects(RD, UnambiguousPublicSubobjects);
1052
1053 for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) {
1054 // Attempt to lookup the copy constructor. Various pieces of machinery
1055 // will spring into action, like template instantiation, which means this
1056 // cannot be a simple walk of the class's decls. Instead, we must perform
1057 // lookup and overload resolution.
1058 CXXConstructorDecl *CD = LookupCopyingConstructor(Subobject, 0);
1059 if (!CD || CD->isDeleted())
1060 continue;
1061
1062 // Mark the constructor referenced as it is used by this throw expression.
1064
1065 // Skip this copy constructor if it is trivial, we don't need to record it
1066 // in the catchable type data.
1067 if (CD->isTrivial())
1068 continue;
1069
1070 // The copy constructor is non-trivial, create a mapping from this class
1071 // type to this constructor.
1072 // N.B. The selection of copy constructor is not sensitive to this
1073 // particular throw-site. Lookup will be performed at the catch-site to
1074 // ensure that the copy constructor is, in fact, accessible (via
1075 // friendship or any other means).
1077
1078 // We don't keep the instantiated default argument expressions around so
1079 // we must rebuild them here.
1080 for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
1081 if (CheckCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)))
1082 return true;
1083 }
1084 }
1085 }
1086
1087 // Under the Itanium C++ ABI, memory for the exception object is allocated by
1088 // the runtime with no ability for the compiler to request additional
1089 // alignment. Warn if the exception type requires alignment beyond the minimum
1090 // guaranteed by the target C++ runtime.
1092 CharUnits TypeAlign = Context.getTypeAlignInChars(Ty);
1093 CharUnits ExnObjAlign = Context.getExnObjectAlignment();
1094 if (ExnObjAlign < TypeAlign) {
1095 Diag(ThrowLoc, diag::warn_throw_underaligned_obj);
1096 Diag(ThrowLoc, diag::note_throw_underaligned_obj)
1097 << Ty << (unsigned)TypeAlign.getQuantity()
1098 << (unsigned)ExnObjAlign.getQuantity();
1099 }
1100 }
1101 if (!isPointer && getLangOpts().AssumeNothrowExceptionDtor) {
1102 if (CXXDestructorDecl *Dtor = RD->getDestructor()) {
1103 auto Ty = Dtor->getType();
1104 if (auto *FT = Ty.getTypePtr()->getAs<FunctionProtoType>()) {
1105 if (!isUnresolvedExceptionSpec(FT->getExceptionSpecType()) &&
1106 !FT->isNothrow())
1107 Diag(ThrowLoc, diag::err_throw_object_throwing_dtor) << RD;
1108 }
1109 }
1110 }
1111
1112 return false;
1113}
1114
1116 ArrayRef<FunctionScopeInfo *> FunctionScopes, QualType ThisTy,
1117 DeclContext *CurSemaContext, ASTContext &ASTCtx) {
1118
1119 QualType ClassType = ThisTy->getPointeeType();
1120 LambdaScopeInfo *CurLSI = nullptr;
1121 DeclContext *CurDC = CurSemaContext;
1122
1123 // Iterate through the stack of lambdas starting from the innermost lambda to
1124 // the outermost lambda, checking if '*this' is ever captured by copy - since
1125 // that could change the cv-qualifiers of the '*this' object.
1126 // The object referred to by '*this' starts out with the cv-qualifiers of its
1127 // member function. We then start with the innermost lambda and iterate
1128 // outward checking to see if any lambda performs a by-copy capture of '*this'
1129 // - and if so, any nested lambda must respect the 'constness' of that
1130 // capturing lamdbda's call operator.
1131 //
1132
1133 // Since the FunctionScopeInfo stack is representative of the lexical
1134 // nesting of the lambda expressions during initial parsing (and is the best
1135 // place for querying information about captures about lambdas that are
1136 // partially processed) and perhaps during instantiation of function templates
1137 // that contain lambda expressions that need to be transformed BUT not
1138 // necessarily during instantiation of a nested generic lambda's function call
1139 // operator (which might even be instantiated at the end of the TU) - at which
1140 // time the DeclContext tree is mature enough to query capture information
1141 // reliably - we use a two pronged approach to walk through all the lexically
1142 // enclosing lambda expressions:
1143 //
1144 // 1) Climb down the FunctionScopeInfo stack as long as each item represents
1145 // a Lambda (i.e. LambdaScopeInfo) AND each LSI's 'closure-type' is lexically
1146 // enclosed by the call-operator of the LSI below it on the stack (while
1147 // tracking the enclosing DC for step 2 if needed). Note the topmost LSI on
1148 // the stack represents the innermost lambda.
1149 //
1150 // 2) If we run out of enclosing LSI's, check if the enclosing DeclContext
1151 // represents a lambda's call operator. If it does, we must be instantiating
1152 // a generic lambda's call operator (represented by the Current LSI, and
1153 // should be the only scenario where an inconsistency between the LSI and the
1154 // DeclContext should occur), so climb out the DeclContexts if they
1155 // represent lambdas, while querying the corresponding closure types
1156 // regarding capture information.
1157
1158 // 1) Climb down the function scope info stack.
1159 for (int I = FunctionScopes.size();
1160 I-- && isa<LambdaScopeInfo>(FunctionScopes[I]) &&
1161 (!CurLSI || !CurLSI->Lambda || CurLSI->Lambda->getDeclContext() ==
1162 cast<LambdaScopeInfo>(FunctionScopes[I])->CallOperator);
1163 CurDC = getLambdaAwareParentOfDeclContext(CurDC)) {
1164 CurLSI = cast<LambdaScopeInfo>(FunctionScopes[I]);
1165
1166 if (!CurLSI->isCXXThisCaptured())
1167 continue;
1168
1169 auto C = CurLSI->getCXXThisCapture();
1170
1171 if (C.isCopyCapture()) {
1172 if (CurLSI->lambdaCaptureShouldBeConst())
1173 ClassType.addConst();
1174 return ASTCtx.getPointerType(ClassType);
1175 }
1176 }
1177
1178 // 2) We've run out of ScopeInfos but check 1. if CurDC is a lambda (which
1179 // can happen during instantiation of its nested generic lambda call
1180 // operator); 2. if we're in a lambda scope (lambda body).
1181 if (CurLSI && isLambdaCallOperator(CurDC)) {
1183 "While computing 'this' capture-type for a generic lambda, when we "
1184 "run out of enclosing LSI's, yet the enclosing DC is a "
1185 "lambda-call-operator we must be (i.e. Current LSI) in a generic "
1186 "lambda call oeprator");
1187 assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
1188
1189 auto IsThisCaptured =
1190 [](CXXRecordDecl *Closure, bool &IsByCopy, bool &IsConst) {
1191 IsConst = false;
1192 IsByCopy = false;
1193 for (auto &&C : Closure->captures()) {
1194 if (C.capturesThis()) {
1195 if (C.getCaptureKind() == LCK_StarThis)
1196 IsByCopy = true;
1197 if (Closure->getLambdaCallOperator()->isConst())
1198 IsConst = true;
1199 return true;
1200 }
1201 }
1202 return false;
1203 };
1204
1205 bool IsByCopyCapture = false;
1206 bool IsConstCapture = false;
1207 CXXRecordDecl *Closure = cast<CXXRecordDecl>(CurDC->getParent());
1208 while (Closure &&
1209 IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
1210 if (IsByCopyCapture) {
1211 if (IsConstCapture)
1212 ClassType.addConst();
1213 return ASTCtx.getPointerType(ClassType);
1214 }
1215 Closure = isLambdaCallOperator(Closure->getParent())
1216 ? cast<CXXRecordDecl>(Closure->getParent()->getParent())
1217 : nullptr;
1218 }
1219 }
1220 return ThisTy;
1221}
1222
1226
1227 if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) {
1228 if (method && method->isImplicitObjectMemberFunction())
1229 ThisTy = method->getThisType().getNonReferenceType();
1230 }
1231
1233 inTemplateInstantiation() && isa<CXXRecordDecl>(DC)) {
1234
1235 // This is a lambda call operator that is being instantiated as a default
1236 // initializer. DC must point to the enclosing class type, so we can recover
1237 // the 'this' type from it.
1238 QualType ClassTy = Context.getTypeDeclType(cast<CXXRecordDecl>(DC));
1239 // There are no cv-qualifiers for 'this' within default initializers,
1240 // per [expr.prim.general]p4.
1241 ThisTy = Context.getPointerType(ClassTy);
1242 }
1243
1244 // If we are within a lambda's call operator, the cv-qualifiers of 'this'
1245 // might need to be adjusted if the lambda or any of its enclosing lambda's
1246 // captures '*this' by copy.
1247 if (!ThisTy.isNull() && isLambdaCallOperator(CurContext))
1250 return ThisTy;
1251}
1252
1254 Decl *ContextDecl,
1255 Qualifiers CXXThisTypeQuals,
1256 bool Enabled)
1257 : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
1258{
1259 if (!Enabled || !ContextDecl)
1260 return;
1261
1262 CXXRecordDecl *Record = nullptr;
1263 if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(ContextDecl))
1264 Record = Template->getTemplatedDecl();
1265 else
1266 Record = cast<CXXRecordDecl>(ContextDecl);
1267
1269 T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
1270
1272 S.Context.getLangOpts().HLSL ? T : S.Context.getPointerType(T);
1273
1274 this->Enabled = true;
1275}
1276
1277
1279 if (Enabled) {
1280 S.CXXThisTypeOverride = OldCXXThisTypeOverride;
1281 }
1282}
1283
1285 SourceLocation DiagLoc = LSI->IntroducerRange.getEnd();
1286 assert(!LSI->isCXXThisCaptured());
1287 // [=, this] {}; // until C++20: Error: this when = is the default
1289 !Sema.getLangOpts().CPlusPlus20)
1290 return;
1291 Sema.Diag(DiagLoc, diag::note_lambda_this_capture_fixit)
1293 DiagLoc, LSI->NumExplicitCaptures > 0 ? ", this" : "this");
1294}
1295
1297 bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
1298 const bool ByCopy) {
1299 // We don't need to capture this in an unevaluated context.
1300 if (isUnevaluatedContext() && !Explicit)
1301 return true;
1302
1303 assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
1304
1305 const int MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
1306 ? *FunctionScopeIndexToStopAt
1307 : FunctionScopes.size() - 1;
1308
1309 // Check that we can capture the *enclosing object* (referred to by '*this')
1310 // by the capturing-entity/closure (lambda/block/etc) at
1311 // MaxFunctionScopesIndex-deep on the FunctionScopes stack.
1312
1313 // Note: The *enclosing object* can only be captured by-value by a
1314 // closure that is a lambda, using the explicit notation:
1315 // [*this] { ... }.
1316 // Every other capture of the *enclosing object* results in its by-reference
1317 // capture.
1318
1319 // For a closure 'L' (at MaxFunctionScopesIndex in the FunctionScopes
1320 // stack), we can capture the *enclosing object* only if:
1321 // - 'L' has an explicit byref or byval capture of the *enclosing object*
1322 // - or, 'L' has an implicit capture.
1323 // AND
1324 // -- there is no enclosing closure
1325 // -- or, there is some enclosing closure 'E' that has already captured the
1326 // *enclosing object*, and every intervening closure (if any) between 'E'
1327 // and 'L' can implicitly capture the *enclosing object*.
1328 // -- or, every enclosing closure can implicitly capture the
1329 // *enclosing object*
1330
1331
1332 unsigned NumCapturingClosures = 0;
1333 for (int idx = MaxFunctionScopesIndex; idx >= 0; idx--) {
1334 if (CapturingScopeInfo *CSI =
1335 dyn_cast<CapturingScopeInfo>(FunctionScopes[idx])) {
1336 if (CSI->CXXThisCaptureIndex != 0) {
1337 // 'this' is already being captured; there isn't anything more to do.
1338 CSI->Captures[CSI->CXXThisCaptureIndex - 1].markUsed(BuildAndDiagnose);
1339 break;
1340 }
1341 LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI);
1343 // This context can't implicitly capture 'this'; fail out.
1344 if (BuildAndDiagnose) {
1346 Diag(Loc, diag::err_this_capture)
1347 << (Explicit && idx == MaxFunctionScopesIndex);
1348 if (!Explicit)
1349 buildLambdaThisCaptureFixit(*this, LSI);
1350 }
1351 return true;
1352 }
1353 if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
1354 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
1355 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
1356 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion ||
1357 (Explicit && idx == MaxFunctionScopesIndex)) {
1358 // Regarding (Explicit && idx == MaxFunctionScopesIndex): only the first
1359 // iteration through can be an explicit capture, all enclosing closures,
1360 // if any, must perform implicit captures.
1361
1362 // This closure can capture 'this'; continue looking upwards.
1363 NumCapturingClosures++;
1364 continue;
1365 }
1366 // This context can't implicitly capture 'this'; fail out.
1367 if (BuildAndDiagnose) {
1369 Diag(Loc, diag::err_this_capture)
1370 << (Explicit && idx == MaxFunctionScopesIndex);
1371 }
1372 if (!Explicit)
1373 buildLambdaThisCaptureFixit(*this, LSI);
1374 return true;
1375 }
1376 break;
1377 }
1378 if (!BuildAndDiagnose) return false;
1379
1380 // If we got here, then the closure at MaxFunctionScopesIndex on the
1381 // FunctionScopes stack, can capture the *enclosing object*, so capture it
1382 // (including implicit by-reference captures in any enclosing closures).
1383
1384 // In the loop below, respect the ByCopy flag only for the closure requesting
1385 // the capture (i.e. first iteration through the loop below). Ignore it for
1386 // all enclosing closure's up to NumCapturingClosures (since they must be
1387 // implicitly capturing the *enclosing object* by reference (see loop
1388 // above)).
1389 assert((!ByCopy ||
1390 isa<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
1391 "Only a lambda can capture the enclosing object (referred to by "
1392 "*this) by copy");
1393 QualType ThisTy = getCurrentThisType();
1394 for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
1395 --idx, --NumCapturingClosures) {
1396 CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
1397
1398 // The type of the corresponding data member (not a 'this' pointer if 'by
1399 // copy').
1400 QualType CaptureType = ByCopy ? ThisTy->getPointeeType() : ThisTy;
1401
1402 bool isNested = NumCapturingClosures > 1;
1403 CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
1404 }
1405 return false;
1406}
1407
1409 // C++20 [expr.prim.this]p1:
1410 // The keyword this names a pointer to the object for which an
1411 // implicit object member function is invoked or a non-static
1412 // data member's initializer is evaluated.
1413 QualType ThisTy = getCurrentThisType();
1414
1415 if (CheckCXXThisType(Loc, ThisTy))
1416 return ExprError();
1417
1418 return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false);
1419}
1420
1422 if (!Type.isNull())
1423 return false;
1424
1425 // C++20 [expr.prim.this]p3:
1426 // If a declaration declares a member function or member function template
1427 // of a class X, the expression this is a prvalue of type
1428 // "pointer to cv-qualifier-seq X" wherever X is the current class between
1429 // the optional cv-qualifier-seq and the end of the function-definition,
1430 // member-declarator, or declarator. It shall not appear within the
1431 // declaration of either a static member function or an explicit object
1432 // member function of the current class (although its type and value
1433 // category are defined within such member functions as they are within
1434 // an implicit object member function).
1436 const auto *Method = dyn_cast<CXXMethodDecl>(DC);
1437 if (Method && Method->isExplicitObjectMemberFunction()) {
1438 Diag(Loc, diag::err_invalid_this_use) << 1;
1440 Diag(Loc, diag::err_invalid_this_use) << 1;
1441 } else {
1442 Diag(Loc, diag::err_invalid_this_use) << 0;
1443 }
1444 return true;
1445}
1446
1448 bool IsImplicit) {
1449 auto *This = CXXThisExpr::Create(Context, Loc, Type, IsImplicit);
1450 MarkThisReferenced(This);
1451 return This;
1452}
1453
1455 CheckCXXThisCapture(This->getExprLoc());
1456 if (This->isTypeDependent())
1457 return;
1458
1459 // Check if 'this' is captured by value in a lambda with a dependent explicit
1460 // object parameter, and mark it as type-dependent as well if so.
1461 auto IsDependent = [&]() {
1462 for (auto *Scope : llvm::reverse(FunctionScopes)) {
1463 auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope);
1464 if (!LSI)
1465 continue;
1466
1467 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
1468 LSI->AfterParameterList)
1469 return false;
1470
1471 // If this lambda captures 'this' by value, then 'this' is dependent iff
1472 // this lambda has a dependent explicit object parameter. If we can't
1473 // determine whether it does (e.g. because the CXXMethodDecl's type is
1474 // null), assume it doesn't.
1475 if (LSI->isCXXThisCaptured()) {
1476 if (!LSI->getCXXThisCapture().isCopyCapture())
1477 continue;
1478
1479 const auto *MD = LSI->CallOperator;
1480 if (MD->getType().isNull())
1481 return false;
1482
1483 const auto *Ty = MD->getType()->getAs<FunctionProtoType>();
1484 return Ty && MD->isExplicitObjectMemberFunction() &&
1485 Ty->getParamType(0)->isDependentType();
1486 }
1487 }
1488 return false;
1489 }();
1490
1491 This->setCapturedByCopyInLambdaWithExplicitObjectParameter(IsDependent);
1492}
1493
1495 // If we're outside the body of a member function, then we'll have a specified
1496 // type for 'this'.
1498 return false;
1499
1500 // Determine whether we're looking into a class that's currently being
1501 // defined.
1502 CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
1503 return Class && Class->isBeingDefined();
1504}
1505
1508 SourceLocation LParenOrBraceLoc,
1509 MultiExprArg exprs,
1510 SourceLocation RParenOrBraceLoc,
1511 bool ListInitialization) {
1512 if (!TypeRep)
1513 return ExprError();
1514
1515 TypeSourceInfo *TInfo;
1516 QualType Ty = GetTypeFromParser(TypeRep, &TInfo);
1517 if (!TInfo)
1519
1520 auto Result = BuildCXXTypeConstructExpr(TInfo, LParenOrBraceLoc, exprs,
1521 RParenOrBraceLoc, ListInitialization);
1522 // Avoid creating a non-type-dependent expression that contains typos.
1523 // Non-type-dependent expressions are liable to be discarded without
1524 // checking for embedded typos.
1525 if (!Result.isInvalid() && Result.get()->isInstantiationDependent() &&
1526 !Result.get()->isTypeDependent())
1528 else if (Result.isInvalid())
1530 RParenOrBraceLoc, exprs, Ty);
1531 return Result;
1532}
1533
1536 SourceLocation LParenOrBraceLoc,
1537 MultiExprArg Exprs,
1538 SourceLocation RParenOrBraceLoc,
1539 bool ListInitialization) {
1540 QualType Ty = TInfo->getType();
1541 SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
1542
1543 assert((!ListInitialization || Exprs.size() == 1) &&
1544 "List initialization must have exactly one expression.");
1545 SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
1546
1547 InitializedEntity Entity =
1549 InitializationKind Kind =
1550 Exprs.size()
1551 ? ListInitialization
1553 TyBeginLoc, LParenOrBraceLoc, RParenOrBraceLoc)
1554 : InitializationKind::CreateDirect(TyBeginLoc, LParenOrBraceLoc,
1555 RParenOrBraceLoc)
1556 : InitializationKind::CreateValue(TyBeginLoc, LParenOrBraceLoc,
1557 RParenOrBraceLoc);
1558
1559 // C++17 [expr.type.conv]p1:
1560 // If the type is a placeholder for a deduced class type, [...perform class
1561 // template argument deduction...]
1562 // C++23:
1563 // Otherwise, if the type contains a placeholder type, it is replaced by the
1564 // type determined by placeholder type deduction.
1565 DeducedType *Deduced = Ty->getContainedDeducedType();
1566 if (Deduced && !Deduced->isDeduced() &&
1567 isa<DeducedTemplateSpecializationType>(Deduced)) {
1569 Kind, Exprs);
1570 if (Ty.isNull())
1571 return ExprError();
1572 Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
1573 } else if (Deduced && !Deduced->isDeduced()) {
1574 MultiExprArg Inits = Exprs;
1575 if (ListInitialization) {
1576 auto *ILE = cast<InitListExpr>(Exprs[0]);
1577 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
1578 }
1579
1580 if (Inits.empty())
1581 return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_init_no_expression)
1582 << Ty << FullRange);
1583 if (Inits.size() > 1) {
1584 Expr *FirstBad = Inits[1];
1585 return ExprError(Diag(FirstBad->getBeginLoc(),
1586 diag::err_auto_expr_init_multiple_expressions)
1587 << Ty << FullRange);
1588 }
1589 if (getLangOpts().CPlusPlus23) {
1590 if (Ty->getAs<AutoType>())
1591 Diag(TyBeginLoc, diag::warn_cxx20_compat_auto_expr) << FullRange;
1592 }
1593 Expr *Deduce = Inits[0];
1594 if (isa<InitListExpr>(Deduce))
1595 return ExprError(
1596 Diag(Deduce->getBeginLoc(), diag::err_auto_expr_init_paren_braces)
1597 << ListInitialization << Ty << FullRange);
1599 TemplateDeductionInfo Info(Deduce->getExprLoc());
1601 DeduceAutoType(TInfo->getTypeLoc(), Deduce, DeducedType, Info);
1604 return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_deduction_failure)
1605 << Ty << Deduce->getType() << FullRange
1606 << Deduce->getSourceRange());
1607 if (DeducedType.isNull()) {
1609 return ExprError();
1610 }
1611
1612 Ty = DeducedType;
1613 Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
1614 }
1615
1618 Context, Ty.getNonReferenceType(), TInfo, LParenOrBraceLoc, Exprs,
1619 RParenOrBraceLoc, ListInitialization);
1620
1621 // C++ [expr.type.conv]p1:
1622 // If the expression list is a parenthesized single expression, the type
1623 // conversion expression is equivalent (in definedness, and if defined in
1624 // meaning) to the corresponding cast expression.
1625 if (Exprs.size() == 1 && !ListInitialization &&
1626 !isa<InitListExpr>(Exprs[0])) {
1627 Expr *Arg = Exprs[0];
1628 return BuildCXXFunctionalCastExpr(TInfo, Ty, LParenOrBraceLoc, Arg,
1629 RParenOrBraceLoc);
1630 }
1631
1632 // For an expression of the form T(), T shall not be an array type.
1633 QualType ElemTy = Ty;
1634 if (Ty->isArrayType()) {
1635 if (!ListInitialization)
1636 return ExprError(Diag(TyBeginLoc, diag::err_value_init_for_array_type)
1637 << FullRange);
1638 ElemTy = Context.getBaseElementType(Ty);
1639 }
1640
1641 // Only construct objects with object types.
1642 // The standard doesn't explicitly forbid function types here, but that's an
1643 // obvious oversight, as there's no way to dynamically construct a function
1644 // in general.
1645 if (Ty->isFunctionType())
1646 return ExprError(Diag(TyBeginLoc, diag::err_init_for_function_type)
1647 << Ty << FullRange);
1648
1649 // C++17 [expr.type.conv]p2, per DR2351:
1650 // If the type is cv void and the initializer is () or {}, the expression is
1651 // a prvalue of the specified type that performs no initialization.
1652 if (Ty->isVoidType()) {
1653 if (Exprs.empty())
1654 return new (Context) CXXScalarValueInitExpr(
1655 Ty.getUnqualifiedType(), TInfo, Kind.getRange().getEnd());
1656 if (ListInitialization &&
1657 cast<InitListExpr>(Exprs[0])->getNumInits() == 0) {
1659 Context, Ty.getUnqualifiedType(), VK_PRValue, TInfo, CK_ToVoid,
1660 Exprs[0], /*Path=*/nullptr, CurFPFeatureOverrides(),
1661 Exprs[0]->getBeginLoc(), Exprs[0]->getEndLoc());
1662 }
1663 } else if (RequireCompleteType(TyBeginLoc, ElemTy,
1664 diag::err_invalid_incomplete_type_use,
1665 FullRange))
1666 return ExprError();
1667
1668 // Otherwise, the expression is a prvalue of the specified type whose
1669 // result object is direct-initialized (11.6) with the initializer.
1670 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
1671 ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Exprs);
1672
1673 if (Result.isInvalid())
1674 return Result;
1675
1676 Expr *Inner = Result.get();
1677 if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Inner))
1678 Inner = BTE->getSubExpr();
1679 if (auto *CE = dyn_cast<ConstantExpr>(Inner);
1680 CE && CE->isImmediateInvocation())
1681 Inner = CE->getSubExpr();
1682 if (!isa<CXXTemporaryObjectExpr>(Inner) &&
1683 !isa<CXXScalarValueInitExpr>(Inner)) {
1684 // If we created a CXXTemporaryObjectExpr, that node also represents the
1685 // functional cast. Otherwise, create an explicit cast to represent
1686 // the syntactic form of a functional-style cast that was used here.
1687 //
1688 // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr
1689 // would give a more consistent AST representation than using a
1690 // CXXTemporaryObjectExpr. It's also weird that the functional cast
1691 // is sometimes handled by initialization and sometimes not.
1692 QualType ResultType = Result.get()->getType();
1693 SourceRange Locs = ListInitialization
1694 ? SourceRange()
1695 : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
1697 Context, ResultType, Expr::getValueKindForType(Ty), TInfo, CK_NoOp,
1698 Result.get(), /*Path=*/nullptr, CurFPFeatureOverrides(),
1699 Locs.getBegin(), Locs.getEnd());
1700 }
1701
1702 return Result;
1703}
1704
1706 // [CUDA] Ignore this function, if we can't call it.
1707 const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
1708 if (getLangOpts().CUDA) {
1709 auto CallPreference = CUDA().IdentifyPreference(Caller, Method);
1710 // If it's not callable at all, it's not the right function.
1711 if (CallPreference < SemaCUDA::CFP_WrongSide)
1712 return false;
1713 if (CallPreference == SemaCUDA::CFP_WrongSide) {
1714 // Maybe. We have to check if there are better alternatives.
1716 Method->getDeclContext()->lookup(Method->getDeclName());
1717 for (const auto *D : R) {
1718 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1719 if (CUDA().IdentifyPreference(Caller, FD) > SemaCUDA::CFP_WrongSide)
1720 return false;
1721 }
1722 }
1723 // We've found no better variants.
1724 }
1725 }
1726
1728 bool Result = Method->isUsualDeallocationFunction(PreventedBy);
1729
1730 if (Result || !getLangOpts().CUDA || PreventedBy.empty())
1731 return Result;
1732
1733 // In case of CUDA, return true if none of the 1-argument deallocator
1734 // functions are actually callable.
1735 return llvm::none_of(PreventedBy, [&](const FunctionDecl *FD) {
1736 assert(FD->getNumParams() == 1 &&
1737 "Only single-operand functions should be in PreventedBy");
1738 return CUDA().IdentifyPreference(Caller, FD) >= SemaCUDA::CFP_HostDevice;
1739 });
1740}
1741
1742/// Determine whether the given function is a non-placement
1743/// deallocation function.
1745 if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
1746 return S.isUsualDeallocationFunction(Method);
1747
1748 if (FD->getOverloadedOperator() != OO_Delete &&
1749 FD->getOverloadedOperator() != OO_Array_Delete)
1750 return false;
1751
1752 unsigned UsualParams = 1;
1753
1754 if (S.getLangOpts().SizedDeallocation && UsualParams < FD->getNumParams() &&
1756 FD->getParamDecl(UsualParams)->getType(),
1757 S.Context.getSizeType()))
1758 ++UsualParams;
1759
1760 if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
1762 FD->getParamDecl(UsualParams)->getType(),
1764 ++UsualParams;
1765
1766 return UsualParams == FD->getNumParams();
1767}
1768
1769namespace {
1770 struct UsualDeallocFnInfo {
1771 UsualDeallocFnInfo() : Found(), FD(nullptr) {}
1772 UsualDeallocFnInfo(Sema &S, DeclAccessPair Found)
1773 : Found(Found), FD(dyn_cast<FunctionDecl>(Found->getUnderlyingDecl())),
1774 Destroying(false), HasSizeT(false), HasAlignValT(false),
1775 CUDAPref(SemaCUDA::CFP_Native) {
1776 // A function template declaration is never a usual deallocation function.
1777 if (!FD)
1778 return;
1779 unsigned NumBaseParams = 1;
1780 if (FD->isDestroyingOperatorDelete()) {
1781 Destroying = true;
1782 ++NumBaseParams;
1783 }
1784
1785 if (NumBaseParams < FD->getNumParams() &&
1787 FD->getParamDecl(NumBaseParams)->getType(),
1788 S.Context.getSizeType())) {
1789 ++NumBaseParams;
1790 HasSizeT = true;
1791 }
1792
1793 if (NumBaseParams < FD->getNumParams() &&
1794 FD->getParamDecl(NumBaseParams)->getType()->isAlignValT()) {
1795 ++NumBaseParams;
1796 HasAlignValT = true;
1797 }
1798
1799 // In CUDA, determine how much we'd like / dislike to call this.
1800 if (S.getLangOpts().CUDA)
1801 CUDAPref = S.CUDA().IdentifyPreference(
1802 S.getCurFunctionDecl(/*AllowLambda=*/true), FD);
1803 }
1804
1805 explicit operator bool() const { return FD; }
1806
1807 bool isBetterThan(const UsualDeallocFnInfo &Other, bool WantSize,
1808 bool WantAlign) const {
1809 // C++ P0722:
1810 // A destroying operator delete is preferred over a non-destroying
1811 // operator delete.
1812 if (Destroying != Other.Destroying)
1813 return Destroying;
1814
1815 // C++17 [expr.delete]p10:
1816 // If the type has new-extended alignment, a function with a parameter
1817 // of type std::align_val_t is preferred; otherwise a function without
1818 // such a parameter is preferred
1819 if (HasAlignValT != Other.HasAlignValT)
1820 return HasAlignValT == WantAlign;
1821
1822 if (HasSizeT != Other.HasSizeT)
1823 return HasSizeT == WantSize;
1824
1825 // Use CUDA call preference as a tiebreaker.
1826 return CUDAPref > Other.CUDAPref;
1827 }
1828
1830 FunctionDecl *FD;
1831 bool Destroying, HasSizeT, HasAlignValT;
1833 };
1834}
1835
1836/// Determine whether a type has new-extended alignment. This may be called when
1837/// the type is incomplete (for a delete-expression with an incomplete pointee
1838/// type), in which case it will conservatively return false if the alignment is
1839/// not known.
1840static bool hasNewExtendedAlignment(Sema &S, QualType AllocType) {
1841 return S.getLangOpts().AlignedAllocation &&
1842 S.getASTContext().getTypeAlignIfKnown(AllocType) >
1844}
1845
1846/// Select the correct "usual" deallocation function to use from a selection of
1847/// deallocation functions (either global or class-scope).
1848static UsualDeallocFnInfo resolveDeallocationOverload(
1849 Sema &S, LookupResult &R, bool WantSize, bool WantAlign,
1850 llvm::SmallVectorImpl<UsualDeallocFnInfo> *BestFns = nullptr) {
1851 UsualDeallocFnInfo Best;
1852
1853 for (auto I = R.begin(), E = R.end(); I != E; ++I) {
1854 UsualDeallocFnInfo Info(S, I.getPair());
1855 if (!Info || !isNonPlacementDeallocationFunction(S, Info.FD) ||
1856 Info.CUDAPref == SemaCUDA::CFP_Never)
1857 continue;
1858
1859 if (!Best) {
1860 Best = Info;
1861 if (BestFns)
1862 BestFns->push_back(Info);
1863 continue;
1864 }
1865
1866 if (Best.isBetterThan(Info, WantSize, WantAlign))
1867 continue;
1868
1869 // If more than one preferred function is found, all non-preferred
1870 // functions are eliminated from further consideration.
1871 if (BestFns && Info.isBetterThan(Best, WantSize, WantAlign))
1872 BestFns->clear();
1873
1874 Best = Info;
1875 if (BestFns)
1876 BestFns->push_back(Info);
1877 }
1878
1879 return Best;
1880}
1881
1882/// Determine whether a given type is a class for which 'delete[]' would call
1883/// a member 'operator delete[]' with a 'size_t' parameter. This implies that
1884/// we need to store the array size (even if the type is
1885/// trivially-destructible).
1887 QualType allocType) {
1888 const RecordType *record =
1889 allocType->getBaseElementTypeUnsafe()->getAs<RecordType>();
1890 if (!record) return false;
1891
1892 // Try to find an operator delete[] in class scope.
1893
1894 DeclarationName deleteName =
1895 S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
1896 LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
1897 S.LookupQualifiedName(ops, record->getDecl());
1898
1899 // We're just doing this for information.
1900 ops.suppressDiagnostics();
1901
1902 // Very likely: there's no operator delete[].
1903 if (ops.empty()) return false;
1904
1905 // If it's ambiguous, it should be illegal to call operator delete[]
1906 // on this thing, so it doesn't matter if we allocate extra space or not.
1907 if (ops.isAmbiguous()) return false;
1908
1909 // C++17 [expr.delete]p10:
1910 // If the deallocation functions have class scope, the one without a
1911 // parameter of type std::size_t is selected.
1912 auto Best = resolveDeallocationOverload(
1913 S, ops, /*WantSize*/false,
1914 /*WantAlign*/hasNewExtendedAlignment(S, allocType));
1915 return Best && Best.HasSizeT;
1916}
1917
1919Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
1920 SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
1921 SourceLocation PlacementRParen, SourceRange TypeIdParens,
1923 std::optional<Expr *> ArraySize;
1924 // If the specified type is an array, unwrap it and save the expression.
1925 if (D.getNumTypeObjects() > 0 &&
1926 D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
1927 DeclaratorChunk &Chunk = D.getTypeObject(0);
1928 if (D.getDeclSpec().hasAutoTypeSpec())
1929 return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto)
1930 << D.getSourceRange());
1931 if (Chunk.Arr.hasStatic)
1932 return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new)
1933 << D.getSourceRange());
1934 if (!Chunk.Arr.NumElts && !Initializer)
1935 return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size)
1936 << D.getSourceRange());
1937
1938 ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts);
1939 D.DropFirstTypeObject();
1940 }
1941
1942 // Every dimension shall be of constant size.
1943 if (ArraySize) {
1944 for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
1945 if (D.getTypeObject(I).Kind != DeclaratorChunk::Array)
1946 break;
1947
1948 DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
1949 if (Expr *NumElts = (Expr *)Array.NumElts) {
1950 if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
1951 // FIXME: GCC permits constant folding here. We should either do so consistently
1952 // or not do so at all, rather than changing behavior in C++14 onwards.
1953 if (getLangOpts().CPlusPlus14) {
1954 // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
1955 // shall be a converted constant expression (5.19) of type std::size_t
1956 // and shall evaluate to a strictly positive value.
1957 llvm::APSInt Value(Context.getIntWidth(Context.getSizeType()));
1958 Array.NumElts
1961 .get();
1962 } else {
1963 Array.NumElts =
1965 NumElts, nullptr, diag::err_new_array_nonconst, AllowFold)
1966 .get();
1967 }
1968 if (!Array.NumElts)
1969 return ExprError();
1970 }
1971 }
1972 }
1973 }
1974
1976 QualType AllocType = TInfo->getType();
1977 if (D.isInvalidType())
1978 return ExprError();
1979
1980 SourceRange DirectInitRange;
1981 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
1982 DirectInitRange = List->getSourceRange();
1983
1984 return BuildCXXNew(SourceRange(StartLoc, D.getEndLoc()), UseGlobal,
1985 PlacementLParen, PlacementArgs, PlacementRParen,
1986 TypeIdParens, AllocType, TInfo, ArraySize, DirectInitRange,
1987 Initializer);
1988}
1989
1991 Expr *Init, bool IsCPlusPlus20) {
1992 if (!Init)
1993 return true;
1994 if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init))
1995 return IsCPlusPlus20 || PLE->getNumExprs() == 0;
1996 if (isa<ImplicitValueInitExpr>(Init))
1997 return true;
1998 else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init))
1999 return !CCE->isListInitialization() &&
2000 CCE->getConstructor()->isDefaultConstructor();
2001 else if (Style == CXXNewInitializationStyle::Braces) {
2002 assert(isa<InitListExpr>(Init) &&
2003 "Shouldn't create list CXXConstructExprs for arrays.");
2004 return true;
2005 }
2006 return false;
2007}
2008
2009bool
2011 if (!getLangOpts().AlignedAllocationUnavailable)
2012 return false;
2013 if (FD.isDefined())
2014 return false;
2015 std::optional<unsigned> AlignmentParam;
2016 if (FD.isReplaceableGlobalAllocationFunction(&AlignmentParam) &&
2017 AlignmentParam)
2018 return true;
2019 return false;
2020}
2021
2022// Emit a diagnostic if an aligned allocation/deallocation function that is not
2023// implemented in the standard library is selected.
2027 const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
2028 StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
2029 getASTContext().getTargetInfo().getPlatformName());
2030 VersionTuple OSVersion = alignedAllocMinVersion(T.getOS());
2031
2033 bool IsDelete = Kind == OO_Delete || Kind == OO_Array_Delete;
2034 Diag(Loc, diag::err_aligned_allocation_unavailable)
2035 << IsDelete << FD.getType().getAsString() << OSName
2036 << OSVersion.getAsString() << OSVersion.empty();
2037 Diag(Loc, diag::note_silence_aligned_allocation_unavailable);
2038 }
2039}
2040
2042 SourceLocation PlacementLParen,
2043 MultiExprArg PlacementArgs,
2044 SourceLocation PlacementRParen,
2045 SourceRange TypeIdParens, QualType AllocType,
2046 TypeSourceInfo *AllocTypeInfo,
2047 std::optional<Expr *> ArraySize,
2048 SourceRange DirectInitRange, Expr *Initializer) {
2049 SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
2050 SourceLocation StartLoc = Range.getBegin();
2051
2052 CXXNewInitializationStyle InitStyle;
2053 if (DirectInitRange.isValid()) {
2054 assert(Initializer && "Have parens but no initializer.");
2056 } else if (isa_and_nonnull<InitListExpr>(Initializer))
2058 else {
2059 assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
2060 isa<CXXConstructExpr>(Initializer)) &&
2061 "Initializer expression that cannot have been implicitly created.");
2063 }
2064
2065 MultiExprArg Exprs(&Initializer, Initializer ? 1 : 0);
2066 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) {
2067 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2068 "paren init for non-call init");
2069 Exprs = MultiExprArg(List->getExprs(), List->getNumExprs());
2070 }
2071
2072 // C++11 [expr.new]p15:
2073 // A new-expression that creates an object of type T initializes that
2074 // object as follows:
2075 InitializationKind Kind = [&] {
2076 switch (InitStyle) {
2077 // - If the new-initializer is omitted, the object is default-
2078 // initialized (8.5); if no initialization is performed,
2079 // the object has indeterminate value
2081 return InitializationKind::CreateDefault(TypeRange.getBegin());
2082 // - Otherwise, the new-initializer is interpreted according to the
2083 // initialization rules of 8.5 for direct-initialization.
2085 return InitializationKind::CreateDirect(TypeRange.getBegin(),
2086 DirectInitRange.getBegin(),
2087 DirectInitRange.getEnd());
2090 Initializer->getBeginLoc(),
2091 Initializer->getEndLoc());
2092 }
2093 llvm_unreachable("Unknown initialization kind");
2094 }();
2095
2096 // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
2097 auto *Deduced = AllocType->getContainedDeducedType();
2098 if (Deduced && !Deduced->isDeduced() &&
2099 isa<DeducedTemplateSpecializationType>(Deduced)) {
2100 if (ArraySize)
2101 return ExprError(
2102 Diag(*ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
2103 diag::err_deduced_class_template_compound_type)
2104 << /*array*/ 2
2105 << (*ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
2106
2107 InitializedEntity Entity
2108 = InitializedEntity::InitializeNew(StartLoc, AllocType);
2110 AllocTypeInfo, Entity, Kind, Exprs);
2111 if (AllocType.isNull())
2112 return ExprError();
2113 } else if (Deduced && !Deduced->isDeduced()) {
2114 MultiExprArg Inits = Exprs;
2115 bool Braced = (InitStyle == CXXNewInitializationStyle::Braces);
2116 if (Braced) {
2117 auto *ILE = cast<InitListExpr>(Exprs[0]);
2118 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
2119 }
2120
2121 if (InitStyle == CXXNewInitializationStyle::None || Inits.empty())
2122 return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
2123 << AllocType << TypeRange);
2124 if (Inits.size() > 1) {
2125 Expr *FirstBad = Inits[1];
2126 return ExprError(Diag(FirstBad->getBeginLoc(),
2127 diag::err_auto_new_ctor_multiple_expressions)
2128 << AllocType << TypeRange);
2129 }
2130 if (Braced && !getLangOpts().CPlusPlus17)
2131 Diag(Initializer->getBeginLoc(), diag::ext_auto_new_list_init)
2132 << AllocType << TypeRange;
2133 Expr *Deduce = Inits[0];
2134 if (isa<InitListExpr>(Deduce))
2135 return ExprError(
2136 Diag(Deduce->getBeginLoc(), diag::err_auto_expr_init_paren_braces)
2137 << Braced << AllocType << TypeRange);
2139 TemplateDeductionInfo Info(Deduce->getExprLoc());
2141 DeduceAutoType(AllocTypeInfo->getTypeLoc(), Deduce, DeducedType, Info);
2144 return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
2145 << AllocType << Deduce->getType() << TypeRange
2146 << Deduce->getSourceRange());
2147 if (DeducedType.isNull()) {
2149 return ExprError();
2150 }
2151 AllocType = DeducedType;
2152 }
2153
2154 // Per C++0x [expr.new]p5, the type being constructed may be a
2155 // typedef of an array type.
2156 if (!ArraySize) {
2157 if (const ConstantArrayType *Array
2158 = Context.getAsConstantArrayType(AllocType)) {
2159 ArraySize = IntegerLiteral::Create(Context, Array->getSize(),
2161 TypeRange.getEnd());
2162 AllocType = Array->getElementType();
2163 }
2164 }
2165
2166 if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
2167 return ExprError();
2168
2169 if (ArraySize && !checkArrayElementAlignment(AllocType, TypeRange.getBegin()))
2170 return ExprError();
2171
2172 // In ARC, infer 'retaining' for the allocated
2173 if (getLangOpts().ObjCAutoRefCount &&
2174 AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2175 AllocType->isObjCLifetimeType()) {
2176 AllocType = Context.getLifetimeQualifiedType(AllocType,
2177 AllocType->getObjCARCImplicitLifetime());
2178 }
2179
2180 QualType ResultType = Context.getPointerType(AllocType);
2181
2182 if (ArraySize && *ArraySize &&
2183 (*ArraySize)->getType()->isNonOverloadPlaceholderType()) {
2184 ExprResult result = CheckPlaceholderExpr(*ArraySize);
2185 if (result.isInvalid()) return ExprError();
2186 ArraySize = result.get();
2187 }
2188 // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
2189 // integral or enumeration type with a non-negative value."
2190 // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
2191 // enumeration type, or a class type for which a single non-explicit
2192 // conversion function to integral or unscoped enumeration type exists.
2193 // C++1y [expr.new]p6: The expression [...] is implicitly converted to
2194 // std::size_t.
2195 std::optional<uint64_t> KnownArraySize;
2196 if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
2197 ExprResult ConvertedSize;
2198 if (getLangOpts().CPlusPlus14) {
2199 assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
2200
2201 ConvertedSize = PerformImplicitConversion(*ArraySize, Context.getSizeType(),
2203
2204 if (!ConvertedSize.isInvalid() &&
2205 (*ArraySize)->getType()->getAs<RecordType>())
2206 // Diagnose the compatibility of this conversion.
2207 Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion)
2208 << (*ArraySize)->getType() << 0 << "'size_t'";
2209 } else {
2210 class SizeConvertDiagnoser : public ICEConvertDiagnoser {
2211 protected:
2212 Expr *ArraySize;
2213
2214 public:
2215 SizeConvertDiagnoser(Expr *ArraySize)
2216 : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
2217 ArraySize(ArraySize) {}
2218
2219 SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
2220 QualType T) override {
2221 return S.Diag(Loc, diag::err_array_size_not_integral)
2222 << S.getLangOpts().CPlusPlus11 << T;
2223 }
2224
2225 SemaDiagnosticBuilder diagnoseIncomplete(
2226 Sema &S, SourceLocation Loc, QualType T) override {
2227 return S.Diag(Loc, diag::err_array_size_incomplete_type)
2228 << T << ArraySize->getSourceRange();
2229 }
2230
2231 SemaDiagnosticBuilder diagnoseExplicitConv(
2232 Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
2233 return S.Diag(Loc, diag::err_array_size_explicit_conversion) << T << ConvTy;
2234 }
2235
2236 SemaDiagnosticBuilder noteExplicitConv(
2237 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2238 return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
2239 << ConvTy->isEnumeralType() << ConvTy;
2240 }
2241
2242 SemaDiagnosticBuilder diagnoseAmbiguous(
2243 Sema &S, SourceLocation Loc, QualType T) override {
2244 return S.Diag(Loc, diag::err_array_size_ambiguous_conversion) << T;
2245 }
2246
2247 SemaDiagnosticBuilder noteAmbiguous(
2248 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2249 return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
2250 << ConvTy->isEnumeralType() << ConvTy;
2251 }
2252
2253 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
2254 QualType T,
2255 QualType ConvTy) override {
2256 return S.Diag(Loc,
2257 S.getLangOpts().CPlusPlus11
2258 ? diag::warn_cxx98_compat_array_size_conversion
2259 : diag::ext_array_size_conversion)
2260 << T << ConvTy->isEnumeralType() << ConvTy;
2261 }
2262 } SizeDiagnoser(*ArraySize);
2263
2264 ConvertedSize = PerformContextualImplicitConversion(StartLoc, *ArraySize,
2265 SizeDiagnoser);
2266 }
2267 if (ConvertedSize.isInvalid())
2268 return ExprError();
2269
2270 ArraySize = ConvertedSize.get();
2271 QualType SizeType = (*ArraySize)->getType();
2272
2273 if (!SizeType->isIntegralOrUnscopedEnumerationType())
2274 return ExprError();
2275
2276 // C++98 [expr.new]p7:
2277 // The expression in a direct-new-declarator shall have integral type
2278 // with a non-negative value.
2279 //
2280 // Let's see if this is a constant < 0. If so, we reject it out of hand,
2281 // per CWG1464. Otherwise, if it's not a constant, we must have an
2282 // unparenthesized array type.
2283
2284 // We've already performed any required implicit conversion to integer or
2285 // unscoped enumeration type.
2286 // FIXME: Per CWG1464, we are required to check the value prior to
2287 // converting to size_t. This will never find a negative array size in
2288 // C++14 onwards, because Value is always unsigned here!
2289 if (std::optional<llvm::APSInt> Value =
2290 (*ArraySize)->getIntegerConstantExpr(Context)) {
2291 if (Value->isSigned() && Value->isNegative()) {
2292 return ExprError(Diag((*ArraySize)->getBeginLoc(),
2293 diag::err_typecheck_negative_array_size)
2294 << (*ArraySize)->getSourceRange());
2295 }
2296
2297 if (!AllocType->isDependentType()) {
2298 unsigned ActiveSizeBits =
2300 if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
2301 return ExprError(
2302 Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large)
2303 << toString(*Value, 10) << (*ArraySize)->getSourceRange());
2304 }
2305
2306 KnownArraySize = Value->getZExtValue();
2307 } else if (TypeIdParens.isValid()) {
2308 // Can't have dynamic array size when the type-id is in parentheses.
2309 Diag((*ArraySize)->getBeginLoc(), diag::ext_new_paren_array_nonconst)
2310 << (*ArraySize)->getSourceRange()
2311 << FixItHint::CreateRemoval(TypeIdParens.getBegin())
2312 << FixItHint::CreateRemoval(TypeIdParens.getEnd());
2313
2314 TypeIdParens = SourceRange();
2315 }
2316
2317 // Note that we do *not* convert the argument in any way. It can
2318 // be signed, larger than size_t, whatever.
2319 }
2320
2321 FunctionDecl *OperatorNew = nullptr;
2322 FunctionDecl *OperatorDelete = nullptr;
2323 unsigned Alignment =
2324 AllocType->isDependentType() ? 0 : Context.getTypeAlign(AllocType);
2325 unsigned NewAlignment = Context.getTargetInfo().getNewAlign();
2326 bool PassAlignment = getLangOpts().AlignedAllocation &&
2327 Alignment > NewAlignment;
2328
2329 if (CheckArgsForPlaceholders(PlacementArgs))
2330 return ExprError();
2331
2333 if (!AllocType->isDependentType() &&
2334 !Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
2336 StartLoc, SourceRange(PlacementLParen, PlacementRParen), Scope, Scope,
2337 AllocType, ArraySize.has_value(), PassAlignment, PlacementArgs,
2338 OperatorNew, OperatorDelete))
2339 return ExprError();
2340
2341 // If this is an array allocation, compute whether the usual array
2342 // deallocation function for the type has a size_t parameter.
2343 bool UsualArrayDeleteWantsSize = false;
2344 if (ArraySize && !AllocType->isDependentType())
2345 UsualArrayDeleteWantsSize =
2346 doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType);
2347
2348 SmallVector<Expr *, 8> AllPlaceArgs;
2349 if (OperatorNew) {
2350 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
2351 VariadicCallType CallType = Proto->isVariadic() ? VariadicFunction
2353
2354 // We've already converted the placement args, just fill in any default
2355 // arguments. Skip the first parameter because we don't have a corresponding
2356 // argument. Skip the second parameter too if we're passing in the
2357 // alignment; we've already filled it in.
2358 unsigned NumImplicitArgs = PassAlignment ? 2 : 1;
2359 if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto,
2360 NumImplicitArgs, PlacementArgs, AllPlaceArgs,
2361 CallType))
2362 return ExprError();
2363
2364 if (!AllPlaceArgs.empty())
2365 PlacementArgs = AllPlaceArgs;
2366
2367 // We would like to perform some checking on the given `operator new` call,
2368 // but the PlacementArgs does not contain the implicit arguments,
2369 // namely allocation size and maybe allocation alignment,
2370 // so we need to conjure them.
2371
2372 QualType SizeTy = Context.getSizeType();
2373 unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
2374
2375 llvm::APInt SingleEltSize(
2376 SizeTyWidth, Context.getTypeSizeInChars(AllocType).getQuantity());
2377
2378 // How many bytes do we want to allocate here?
2379 std::optional<llvm::APInt> AllocationSize;
2380 if (!ArraySize && !AllocType->isDependentType()) {
2381 // For non-array operator new, we only want to allocate one element.
2382 AllocationSize = SingleEltSize;
2383 } else if (KnownArraySize && !AllocType->isDependentType()) {
2384 // For array operator new, only deal with static array size case.
2385 bool Overflow;
2386 AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
2387 .umul_ov(SingleEltSize, Overflow);
2388 (void)Overflow;
2389 assert(
2390 !Overflow &&
2391 "Expected that all the overflows would have been handled already.");
2392 }
2393
2394 IntegerLiteral AllocationSizeLiteral(
2395 Context, AllocationSize.value_or(llvm::APInt::getZero(SizeTyWidth)),
2396 SizeTy, SourceLocation());
2397 // Otherwise, if we failed to constant-fold the allocation size, we'll
2398 // just give up and pass-in something opaque, that isn't a null pointer.
2399 OpaqueValueExpr OpaqueAllocationSize(SourceLocation(), SizeTy, VK_PRValue,
2400 OK_Ordinary, /*SourceExpr=*/nullptr);
2401
2402 // Let's synthesize the alignment argument in case we will need it.
2403 // Since we *really* want to allocate these on stack, this is slightly ugly
2404 // because there might not be a `std::align_val_t` type.
2406 QualType AlignValT =
2408 IntegerLiteral AlignmentLiteral(
2409 Context,
2410 llvm::APInt(Context.getTypeSize(SizeTy),
2411 Alignment / Context.getCharWidth()),
2412 SizeTy, SourceLocation());
2413 ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
2414 CK_IntegralCast, &AlignmentLiteral,
2416
2417 // Adjust placement args by prepending conjured size and alignment exprs.
2419 CallArgs.reserve(NumImplicitArgs + PlacementArgs.size());
2420 CallArgs.emplace_back(AllocationSize
2421 ? static_cast<Expr *>(&AllocationSizeLiteral)
2422 : &OpaqueAllocationSize);
2423 if (PassAlignment)
2424 CallArgs.emplace_back(&DesiredAlignment);
2425 CallArgs.insert(CallArgs.end(), PlacementArgs.begin(), PlacementArgs.end());
2426
2427 DiagnoseSentinelCalls(OperatorNew, PlacementLParen, CallArgs);
2428
2429 checkCall(OperatorNew, Proto, /*ThisArg=*/nullptr, CallArgs,
2430 /*IsMemberFunction=*/false, StartLoc, Range, CallType);
2431
2432 // Warn if the type is over-aligned and is being allocated by (unaligned)
2433 // global operator new.
2434 if (PlacementArgs.empty() && !PassAlignment &&
2435 (OperatorNew->isImplicit() ||
2436 (OperatorNew->getBeginLoc().isValid() &&
2437 getSourceManager().isInSystemHeader(OperatorNew->getBeginLoc())))) {
2438 if (Alignment > NewAlignment)
2439 Diag(StartLoc, diag::warn_overaligned_type)
2440 << AllocType
2441 << unsigned(Alignment / Context.getCharWidth())
2442 << unsigned(NewAlignment / Context.getCharWidth());
2443 }
2444 }
2445
2446 // Array 'new' can't have any initializers except empty parentheses.
2447 // Initializer lists are also allowed, in C++11. Rely on the parser for the
2448 // dialect distinction.
2449 if (ArraySize && !isLegalArrayNewInitializer(InitStyle, Initializer,
2451 SourceRange InitRange(Exprs.front()->getBeginLoc(),
2452 Exprs.back()->getEndLoc());
2453 Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
2454 return ExprError();
2455 }
2456
2457 // If we can perform the initialization, and we've not already done so,
2458 // do it now.
2459 if (!AllocType->isDependentType() &&
2461 // The type we initialize is the complete type, including the array bound.
2462 QualType InitType;
2463 if (KnownArraySize)
2464 InitType = Context.getConstantArrayType(
2465 AllocType,
2466 llvm::APInt(Context.getTypeSize(Context.getSizeType()),
2467 *KnownArraySize),
2468 *ArraySize, ArraySizeModifier::Normal, 0);
2469 else if (ArraySize)
2470 InitType = Context.getIncompleteArrayType(AllocType,
2472 else
2473 InitType = AllocType;
2474
2475 InitializedEntity Entity
2476 = InitializedEntity::InitializeNew(StartLoc, InitType);
2477 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
2478 ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind, Exprs);
2479 if (FullInit.isInvalid())
2480 return ExprError();
2481
2482 // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
2483 // we don't want the initialized object to be destructed.
2484 // FIXME: We should not create these in the first place.
2485 if (CXXBindTemporaryExpr *Binder =
2486 dyn_cast_or_null<CXXBindTemporaryExpr>(FullInit.get()))
2487 FullInit = Binder->getSubExpr();
2488
2489 Initializer = FullInit.get();
2490
2491 // FIXME: If we have a KnownArraySize, check that the array bound of the
2492 // initializer is no greater than that constant value.
2493
2494 if (ArraySize && !*ArraySize) {
2495 auto *CAT = Context.getAsConstantArrayType(Initializer->getType());
2496 if (CAT) {
2497 // FIXME: Track that the array size was inferred rather than explicitly
2498 // specified.
2499 ArraySize = IntegerLiteral::Create(
2500 Context, CAT->getSize(), Context.getSizeType(), TypeRange.getEnd());
2501 } else {
2502 Diag(TypeRange.getEnd(), diag::err_new_array_size_unknown_from_init)
2503 << Initializer->getSourceRange();
2504 }
2505 }
2506 }
2507
2508 // Mark the new and delete operators as referenced.
2509 if (OperatorNew) {
2510 if (DiagnoseUseOfDecl(OperatorNew, StartLoc))
2511 return ExprError();
2512 MarkFunctionReferenced(StartLoc, OperatorNew);
2513 }
2514 if (OperatorDelete) {
2515 if (DiagnoseUseOfDecl(OperatorDelete, StartLoc))
2516 return ExprError();
2517 MarkFunctionReferenced(StartLoc, OperatorDelete);
2518 }
2519
2520 return CXXNewExpr::Create(Context, UseGlobal, OperatorNew, OperatorDelete,
2521 PassAlignment, UsualArrayDeleteWantsSize,
2522 PlacementArgs, TypeIdParens, ArraySize, InitStyle,
2523 Initializer, ResultType, AllocTypeInfo, Range,
2524 DirectInitRange);
2525}
2526
2528 SourceRange R) {
2529 // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
2530 // abstract class type or array thereof.
2531 if (AllocType->isFunctionType())
2532 return Diag(Loc, diag::err_bad_new_type)
2533 << AllocType << 0 << R;
2534 else if (AllocType->isReferenceType())
2535 return Diag(Loc, diag::err_bad_new_type)
2536 << AllocType << 1 << R;
2537 else if (!AllocType->isDependentType() &&
2539 Loc, AllocType, diag::err_new_incomplete_or_sizeless_type, R))
2540 return true;
2541 else if (RequireNonAbstractType(Loc, AllocType,
2542 diag::err_allocation_of_abstract_type))
2543 return true;
2544 else if (AllocType->isVariablyModifiedType())
2545 return Diag(Loc, diag::err_variably_modified_new_type)
2546 << AllocType;
2547 else if (AllocType.getAddressSpace() != LangAS::Default &&
2548 !getLangOpts().OpenCLCPlusPlus)
2549 return Diag(Loc, diag::err_address_space_qualified_new)
2550 << AllocType.getUnqualifiedType()
2552 else if (getLangOpts().ObjCAutoRefCount) {
2553 if (const ArrayType *AT = Context.getAsArrayType(AllocType)) {
2554 QualType BaseAllocType = Context.getBaseElementType(AT);
2555 if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2556 BaseAllocType->isObjCLifetimeType())
2557 return Diag(Loc, diag::err_arc_new_array_without_ownership)
2558 << BaseAllocType;
2559 }
2560 }
2561
2562 return false;
2563}
2564
2567 bool &PassAlignment, FunctionDecl *&Operator,
2568 OverloadCandidateSet *AlignedCandidates, Expr *AlignArg, bool Diagnose) {
2569 OverloadCandidateSet Candidates(R.getNameLoc(),
2571 for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
2572 Alloc != AllocEnd; ++Alloc) {
2573 // Even member operator new/delete are implicitly treated as
2574 // static, so don't use AddMemberCandidate.
2575 NamedDecl *D = (*Alloc)->getUnderlyingDecl();
2576
2577 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
2578 S.AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(),
2579 /*ExplicitTemplateArgs=*/nullptr, Args,
2580 Candidates,
2581 /*SuppressUserConversions=*/false);
2582 continue;
2583 }
2584
2585 FunctionDecl *Fn = cast<FunctionDecl>(D);
2586 S.AddOverloadCandidate(Fn, Alloc.getPair(), Args, Candidates,
2587 /*SuppressUserConversions=*/false);
2588 }
2589
2590 // Do the resolution.
2592 switch (Candidates.BestViableFunction(S, R.getNameLoc(), Best)) {
2593 case OR_Success: {
2594 // Got one!
2595 FunctionDecl *FnDecl = Best->Function;
2597 Best->FoundDecl) == Sema::AR_inaccessible)
2598 return true;
2599
2600 Operator = FnDecl;
2601 return false;
2602 }
2603
2605 // C++17 [expr.new]p13:
2606 // If no matching function is found and the allocated object type has
2607 // new-extended alignment, the alignment argument is removed from the
2608 // argument list, and overload resolution is performed again.
2609 if (PassAlignment) {
2610 PassAlignment = false;
2611 AlignArg = Args[1];
2612 Args.erase(Args.begin() + 1);
2613 return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
2614 Operator, &Candidates, AlignArg,
2615 Diagnose);
2616 }
2617
2618 // MSVC will fall back on trying to find a matching global operator new
2619 // if operator new[] cannot be found. Also, MSVC will leak by not
2620 // generating a call to operator delete or operator delete[], but we
2621 // will not replicate that bug.
2622 // FIXME: Find out how this interacts with the std::align_val_t fallback
2623 // once MSVC implements it.
2624 if (R.getLookupName().getCXXOverloadedOperator() == OO_Array_New &&
2625 S.Context.getLangOpts().MSVCCompat) {
2626 R.clear();
2629 // FIXME: This will give bad diagnostics pointing at the wrong functions.
2630 return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
2631 Operator, /*Candidates=*/nullptr,
2632 /*AlignArg=*/nullptr, Diagnose);
2633 }
2634
2635 if (Diagnose) {
2636 // If this is an allocation of the form 'new (p) X' for some object
2637 // pointer p (or an expression that will decay to such a pointer),
2638 // diagnose the missing inclusion of <new>.
2639 if (!R.isClassLookup() && Args.size() == 2 &&
2640 (Args[1]->getType()->isObjectPointerType() ||
2641 Args[1]->getType()->isArrayType())) {
2642 S.Diag(R.getNameLoc(), diag::err_need_header_before_placement_new)
2643 << R.getLookupName() << Range;
2644 // Listing the candidates is unlikely to be useful; skip it.
2645 return true;
2646 }
2647
2648 // Finish checking all candidates before we note any. This checking can
2649 // produce additional diagnostics so can't be interleaved with our
2650 // emission of notes.
2651 //
2652 // For an aligned allocation, separately check the aligned and unaligned
2653 // candidates with their respective argument lists.
2656 llvm::SmallVector<Expr*, 4> AlignedArgs;
2657 if (AlignedCandidates) {
2658 auto IsAligned = [](OverloadCandidate &C) {
2659 return C.Function->getNumParams() > 1 &&
2660 C.Function->getParamDecl(1)->getType()->isAlignValT();
2661 };
2662 auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
2663
2664 AlignedArgs.reserve(Args.size() + 1);
2665 AlignedArgs.push_back(Args[0]);
2666 AlignedArgs.push_back(AlignArg);
2667 AlignedArgs.append(Args.begin() + 1, Args.end());
2668 AlignedCands = AlignedCandidates->CompleteCandidates(
2669 S, OCD_AllCandidates, AlignedArgs, R.getNameLoc(), IsAligned);
2670
2671 Cands = Candidates.CompleteCandidates(S, OCD_AllCandidates, Args,
2672 R.getNameLoc(), IsUnaligned);
2673 } else {
2674 Cands = Candidates.CompleteCandidates(S, OCD_AllCandidates, Args,
2675 R.getNameLoc());
2676 }
2677
2678 S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
2679 << R.getLookupName() << Range;
2680 if (AlignedCandidates)
2681 AlignedCandidates->NoteCandidates(S, AlignedArgs, AlignedCands, "",
2682 R.getNameLoc());
2683 Candidates.NoteCandidates(S, Args, Cands, "", R.getNameLoc());
2684 }
2685 return true;
2686
2687 case OR_Ambiguous:
2688 if (Diagnose) {
2689 Candidates.NoteCandidates(
2691 S.PDiag(diag::err_ovl_ambiguous_call)
2692 << R.getLookupName() << Range),
2693 S, OCD_AmbiguousCandidates, Args);
2694 }
2695 return true;
2696
2697 case OR_Deleted: {
2698 if (Diagnose)
2700 Candidates, Best->Function, Args);
2701 return true;
2702 }
2703 }
2704 llvm_unreachable("Unreachable, bad result from BestViableFunction");
2705}
2706
2708 AllocationFunctionScope NewScope,
2709 AllocationFunctionScope DeleteScope,
2710 QualType AllocType, bool IsArray,
2711 bool &PassAlignment, MultiExprArg PlaceArgs,
2712 FunctionDecl *&OperatorNew,
2713 FunctionDecl *&OperatorDelete,
2714 bool Diagnose) {
2715 // --- Choosing an allocation function ---
2716 // C++ 5.3.4p8 - 14 & 18
2717 // 1) If looking in AFS_Global scope for allocation functions, only look in
2718 // the global scope. Else, if AFS_Class, only look in the scope of the
2719 // allocated class. If AFS_Both, look in both.
2720 // 2) If an array size is given, look for operator new[], else look for
2721 // operator new.
2722 // 3) The first argument is always size_t. Append the arguments from the
2723 // placement form.
2724
2725 SmallVector<Expr*, 8> AllocArgs;
2726 AllocArgs.reserve((PassAlignment ? 2 : 1) + PlaceArgs.size());
2727
2728 // We don't care about the actual value of these arguments.
2729 // FIXME: Should the Sema create the expression and embed it in the syntax
2730 // tree? Or should the consumer just recalculate the value?
2731 // FIXME: Using a dummy value will interact poorly with attribute enable_if.
2732 QualType SizeTy = Context.getSizeType();
2733 unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
2734 IntegerLiteral Size(Context, llvm::APInt::getZero(SizeTyWidth), SizeTy,
2735 SourceLocation());
2736 AllocArgs.push_back(&Size);
2737
2738 QualType AlignValT = Context.VoidTy;
2739 if (PassAlignment) {
2742 }
2743 CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
2744 if (PassAlignment)
2745 AllocArgs.push_back(&Align);
2746
2747 AllocArgs.insert(AllocArgs.end(), PlaceArgs.begin(), PlaceArgs.end());
2748
2749 // C++ [expr.new]p8:
2750 // If the allocated type is a non-array type, the allocation
2751 // function's name is operator new and the deallocation function's
2752 // name is operator delete. If the allocated type is an array
2753 // type, the allocation function's name is operator new[] and the
2754 // deallocation function's name is operator delete[].
2756 IsArray ? OO_Array_New : OO_New);
2757
2758 QualType AllocElemType = Context.getBaseElementType(AllocType);
2759
2760 // Find the allocation function.
2761 {
2762 LookupResult R(*this, NewName, StartLoc, LookupOrdinaryName);
2763
2764 // C++1z [expr.new]p9:
2765 // If the new-expression begins with a unary :: operator, the allocation
2766 // function's name is looked up in the global scope. Otherwise, if the
2767 // allocated type is a class type T or array thereof, the allocation
2768 // function's name is looked up in the scope of T.
2769 if (AllocElemType->isRecordType() && NewScope != AFS_Global)
2770 LookupQualifiedName(R, AllocElemType->getAsCXXRecordDecl());
2771
2772 // We can see ambiguity here if the allocation function is found in
2773 // multiple base classes.
2774 if (R.isAmbiguous())
2775 return true;
2776
2777 // If this lookup fails to find the name, or if the allocated type is not
2778 // a class type, the allocation function's name is looked up in the
2779 // global scope.
2780 if (R.empty()) {
2781 if (NewScope == AFS_Class)
2782 return true;
2783
2785 }
2786
2787 if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
2788 if (PlaceArgs.empty()) {
2789 Diag(StartLoc, diag::err_openclcxx_not_supported) << "default new";
2790 } else {
2791 Diag(StartLoc, diag::err_openclcxx_placement_new);
2792 }
2793 return true;
2794 }
2795
2796 assert(!R.empty() && "implicitly declared allocation functions not found");
2797 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
2798
2799 // We do our own custom access checks below.
2801
2802 if (resolveAllocationOverload(*this, R, Range, AllocArgs, PassAlignment,
2803 OperatorNew, /*Candidates=*/nullptr,
2804 /*AlignArg=*/nullptr, Diagnose))
2805 return true;
2806 }
2807
2808 // We don't need an operator delete if we're running under -fno-exceptions.
2809 if (!getLangOpts().Exceptions) {
2810 OperatorDelete = nullptr;
2811 return false;
2812 }
2813
2814 // Note, the name of OperatorNew might have been changed from array to
2815 // non-array by resolveAllocationOverload.
2817 OperatorNew->getDeclName().getCXXOverloadedOperator() == OO_Array_New
2818 ? OO_Array_Delete
2819 : OO_Delete);
2820
2821 // C++ [expr.new]p19:
2822 //
2823 // If the new-expression begins with a unary :: operator, the
2824 // deallocation function's name is looked up in the global
2825 // scope. Otherwise, if the allocated type is a class type T or an
2826 // array thereof, the deallocation function's name is looked up in
2827 // the scope of T. If this lookup fails to find the name, or if
2828 // the allocated type is not a class type or array thereof, the
2829 // deallocation function's name is looked up in the global scope.
2830 LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
2831 if (AllocElemType->isRecordType() && DeleteScope != AFS_Global) {
2832 auto *RD =
2833 cast<CXXRecordDecl>(AllocElemType->castAs<RecordType>()->getDecl());
2834 LookupQualifiedName(FoundDelete, RD);
2835 }
2836 if (FoundDelete.isAmbiguous())
2837 return true; // FIXME: clean up expressions?
2838
2839 // Filter out any destroying operator deletes. We can't possibly call such a
2840 // function in this context, because we're handling the case where the object
2841 // was not successfully constructed.
2842 // FIXME: This is not covered by the language rules yet.
2843 {
2844 LookupResult::Filter Filter = FoundDelete.makeFilter();
2845 while (Filter.hasNext()) {
2846 auto *FD = dyn_cast<FunctionDecl>(Filter.next()->getUnderlyingDecl());
2847 if (FD && FD->isDestroyingOperatorDelete())
2848 Filter.erase();
2849 }
2850 Filter.done();
2851 }
2852
2853 bool FoundGlobalDelete = FoundDelete.empty();
2854 if (FoundDelete.empty()) {
2855 FoundDelete.clear(LookupOrdinaryName);
2856
2857 if (DeleteScope == AFS_Class)
2858 return true;
2859
2862 }
2863
2864 FoundDelete.suppressDiagnostics();
2865
2867
2868 // Whether we're looking for a placement operator delete is dictated
2869 // by whether we selected a placement operator new, not by whether
2870 // we had explicit placement arguments. This matters for things like
2871 // struct A { void *operator new(size_t, int = 0); ... };
2872 // A *a = new A()
2873 //
2874 // We don't have any definition for what a "placement allocation function"
2875 // is, but we assume it's any allocation function whose
2876 // parameter-declaration-clause is anything other than (size_t).
2877 //
2878 // FIXME: Should (size_t, std::align_val_t) also be considered non-placement?
2879 // This affects whether an exception from the constructor of an overaligned
2880 // type uses the sized or non-sized form of aligned operator delete.
2881 bool isPlacementNew = !PlaceArgs.empty() || OperatorNew->param_size() != 1 ||
2882 OperatorNew->isVariadic();
2883
2884 if (isPlacementNew) {
2885 // C++ [expr.new]p20:
2886 // A declaration of a placement deallocation function matches the
2887 // declaration of a placement allocation function if it has the
2888 // same number of parameters and, after parameter transformations
2889 // (8.3.5), all parameter types except the first are
2890 // identical. [...]
2891 //
2892 // To perform this comparison, we compute the function type that
2893 // the deallocation function should have, and use that type both
2894 // for template argument deduction and for comparison purposes.
2895 QualType ExpectedFunctionType;
2896 {
2897 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
2898
2899 SmallVector<QualType, 4> ArgTypes;
2900 ArgTypes.push_back(Context.VoidPtrTy);
2901 for (unsigned I = 1, N = Proto->getNumParams(); I < N; ++I)
2902 ArgTypes.push_back(Proto->getParamType(I));
2903
2905 // FIXME: This is not part of the standard's rule.
2906 EPI.Variadic = Proto->isVariadic();
2907
2908 ExpectedFunctionType
2909 = Context.getFunctionType(Context.VoidTy, ArgTypes, EPI);
2910 }
2911
2912 for (LookupResult::iterator D = FoundDelete.begin(),
2913 DEnd = FoundDelete.end();
2914 D != DEnd; ++D) {
2915 FunctionDecl *Fn = nullptr;
2916 if (FunctionTemplateDecl *FnTmpl =
2917 dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) {
2918 // Perform template argument deduction to try to match the
2919 // expected function type.
2920 TemplateDeductionInfo Info(StartLoc);
2921 if (DeduceTemplateArguments(FnTmpl, nullptr, ExpectedFunctionType, Fn,
2923 continue;
2924 } else
2925 Fn = cast<FunctionDecl>((*D)->getUnderlyingDecl());
2926
2927 if (Context.hasSameType(adjustCCAndNoReturn(Fn->getType(),
2928 ExpectedFunctionType,
2929 /*AdjustExcpetionSpec*/true),
2930 ExpectedFunctionType))
2931 Matches.push_back(std::make_pair(D.getPair(), Fn));
2932 }
2933
2934 if (getLangOpts().CUDA)
2935 CUDA().EraseUnwantedMatches(getCurFunctionDecl(/*AllowLambda=*/true),
2936 Matches);
2937 } else {
2938 // C++1y [expr.new]p22:
2939 // For a non-placement allocation function, the normal deallocation
2940 // function lookup is used
2941 //
2942 // Per [expr.delete]p10, this lookup prefers a member operator delete
2943 // without a size_t argument, but prefers a non-member operator delete
2944 // with a size_t where possible (which it always is in this case).
2946 UsualDeallocFnInfo Selected = resolveDeallocationOverload(
2947 *this, FoundDelete, /*WantSize*/ FoundGlobalDelete,
2948 /*WantAlign*/ hasNewExtendedAlignment(*this, AllocElemType),
2949 &BestDeallocFns);
2950 if (Selected)
2951 Matches.push_back(std::make_pair(Selected.Found, Selected.FD));
2952 else {
2953 // If we failed to select an operator, all remaining functions are viable
2954 // but ambiguous.
2955 for (auto Fn : BestDeallocFns)
2956 Matches.push_back(std::make_pair(Fn.Found, Fn.FD));
2957 }
2958 }
2959
2960 // C++ [expr.new]p20:
2961 // [...] If the lookup finds a single matching deallocation
2962 // function, that function will be called; otherwise, no
2963 // deallocation function will be called.
2964 if (Matches.size() == 1) {
2965 OperatorDelete = Matches[0].second;
2966
2967 // C++1z [expr.new]p23:
2968 // If the lookup finds a usual deallocation function (3.7.4.2)
2969 // with a parameter of type std::size_t and that function, considered
2970 // as a placement deallocation function, would have been
2971 // selected as a match for the allocation function, the program
2972 // is ill-formed.
2973 if (getLangOpts().CPlusPlus11 && isPlacementNew &&
2974 isNonPlacementDeallocationFunction(*this, OperatorDelete)) {
2975 UsualDeallocFnInfo Info(*this,
2976 DeclAccessPair::make(OperatorDelete, AS_public));
2977 // Core issue, per mail to core reflector, 2016-10-09:
2978 // If this is a member operator delete, and there is a corresponding
2979 // non-sized member operator delete, this isn't /really/ a sized
2980 // deallocation function, it just happens to have a size_t parameter.
2981 bool IsSizedDelete = Info.HasSizeT;
2982 if (IsSizedDelete && !FoundGlobalDelete) {
2983 auto NonSizedDelete =
2984 resolveDeallocationOverload(*this, FoundDelete, /*WantSize*/false,
2985 /*WantAlign*/Info.HasAlignValT);
2986 if (NonSizedDelete && !NonSizedDelete.HasSizeT &&
2987 NonSizedDelete.HasAlignValT == Info.HasAlignValT)
2988 IsSizedDelete = false;
2989 }
2990
2991 if (IsSizedDelete) {
2992 SourceRange R = PlaceArgs.empty()
2993 ? SourceRange()
2994 : SourceRange(PlaceArgs.front()->getBeginLoc(),
2995 PlaceArgs.back()->getEndLoc());
2996 Diag(StartLoc, diag::err_placement_new_non_placement_delete) << R;
2997 if (!OperatorDelete->isImplicit())
2998 Diag(OperatorDelete->getLocation(), diag::note_previous_decl)
2999 << DeleteName;
3000 }
3001 }
3002
3003 CheckAllocationAccess(StartLoc, Range, FoundDelete.getNamingClass(),
3004 Matches[0].first);
3005 } else if (!Matches.empty()) {
3006 // We found multiple suitable operators. Per [expr.new]p20, that means we
3007 // call no 'operator delete' function, but we should at least warn the user.
3008 // FIXME: Suppress this warning if the construction cannot throw.
3009 Diag(StartLoc, diag::warn_ambiguous_suitable_delete_function_found)
3010 << DeleteName << AllocElemType;
3011
3012 for (auto &Match : Matches)
3013 Diag(Match.second->getLocation(),
3014 diag::note_member_declared_here) << DeleteName;
3015 }
3016
3017 return false;
3018}
3019
3022 return;
3023
3024 // The implicitly declared new and delete operators
3025 // are not supported in OpenCL.
3026 if (getLangOpts().OpenCLCPlusPlus)
3027 return;
3028
3029 // C++ [basic.stc.dynamic.general]p2:
3030 // The library provides default definitions for the global allocation
3031 // and deallocation functions. Some global allocation and deallocation
3032 // functions are replaceable ([new.delete]); these are attached to the
3033 // global module ([module.unit]).
3034 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3035 PushGlobalModuleFragment(SourceLocation());
3036
3037 // C++ [basic.std.dynamic]p2:
3038 // [...] The following allocation and deallocation functions (18.4) are
3039 // implicitly declared in global scope in each translation unit of a
3040 // program
3041 //
3042 // C++03:
3043 // void* operator new(std::size_t) throw(std::bad_alloc);
3044 // void* operator new[](std::size_t) throw(std::bad_alloc);
3045 // void operator delete(void*) throw();
3046 // void operator delete[](void*) throw();
3047 // C++11:
3048 // void* operator new(std::size_t);
3049 // void* operator new[](std::size_t);
3050 // void operator delete(void*) noexcept;
3051 // void operator delete[](void*) noexcept;
3052 // C++1y:
3053 // void* operator new(std::size_t);
3054 // void* operator new[](std::size_t);
3055 // void operator delete(void*) noexcept;
3056 // void operator delete[](void*) noexcept;
3057 // void operator delete(void*, std::size_t) noexcept;
3058 // void operator delete[](void*, std::size_t) noexcept;
3059 //
3060 // These implicit declarations introduce only the function names operator
3061 // new, operator new[], operator delete, operator delete[].
3062 //
3063 // Here, we need to refer to std::bad_alloc, so we will implicitly declare
3064 // "std" or "bad_alloc" as necessary to form the exception specification.
3065 // However, we do not make these implicit declarations visible to name
3066 // lookup.
3067 if (!StdBadAlloc && !getLangOpts().CPlusPlus11) {
3068 // The "std::bad_alloc" class has not yet been declared, so build it
3069 // implicitly.
3073 &PP.getIdentifierTable().get("bad_alloc"), nullptr);
3074 getStdBadAlloc()->setImplicit(true);
3075
3076 // The implicitly declared "std::bad_alloc" should live in global module
3077 // fragment.
3078 if (TheGlobalModuleFragment) {
3081 getStdBadAlloc()->setLocalOwningModule(TheGlobalModuleFragment);
3082 }
3083 }
3084 if (!StdAlignValT && getLangOpts().AlignedAllocation) {
3085 // The "std::align_val_t" enum class has not yet been declared, so build it
3086 // implicitly.
3087 auto *AlignValT = EnumDecl::Create(
3089 &PP.getIdentifierTable().get("align_val_t"), nullptr, true, true, true);
3090
3091 // The implicitly declared "std::align_val_t" should live in global module
3092 // fragment.
3093 if (TheGlobalModuleFragment) {
3094 AlignValT->setModuleOwnershipKind(
3096 AlignValT->setLocalOwningModule(TheGlobalModuleFragment);
3097 }
3098
3099 AlignValT->setIntegerType(Context.getSizeType());
3100 AlignValT->setPromotionType(Context.getSizeType());
3101 AlignValT->setImplicit(true);
3102
3103 StdAlignValT = AlignValT;
3104 }
3105
3107
3109 QualType SizeT = Context.getSizeType();
3110
3111 auto DeclareGlobalAllocationFunctions = [&](OverloadedOperatorKind Kind,
3112 QualType Return, QualType Param) {
3114 Params.push_back(Param);
3115
3116 // Create up to four variants of the function (sized/aligned).
3117 bool HasSizedVariant = getLangOpts().SizedDeallocation &&
3118 (Kind == OO_Delete || Kind == OO_Array_Delete);
3119 bool HasAlignedVariant = getLangOpts().AlignedAllocation;
3120
3121 int NumSizeVariants = (HasSizedVariant ? 2 : 1);
3122 int NumAlignVariants = (HasAlignedVariant ? 2 : 1);
3123 for (int Sized = 0; Sized < NumSizeVariants; ++Sized) {
3124 if (Sized)
3125 Params.push_back(SizeT);
3126
3127 for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
3128 if (Aligned)
3129 Params.push_back(Context.getTypeDeclType(getStdAlignValT()));
3130
3132 Context.DeclarationNames.getCXXOperatorName(Kind), Return, Params);
3133
3134 if (Aligned)
3135 Params.pop_back();
3136 }
3137 }
3138 };
3139
3140 DeclareGlobalAllocationFunctions(OO_New, VoidPtr, SizeT);
3141 DeclareGlobalAllocationFunctions(OO_Array_New, VoidPtr, SizeT);
3142 DeclareGlobalAllocationFunctions(OO_Delete, Context.VoidTy, VoidPtr);
3143 DeclareGlobalAllocationFunctions(OO_Array_Delete, Context.VoidTy, VoidPtr);
3144
3145 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3146 PopGlobalModuleFragment();
3147}
3148
3149/// DeclareGlobalAllocationFunction - Declares a single implicit global
3150/// allocation function if it doesn't already exist.
3152 QualType Return,
3153 ArrayRef<QualType> Params) {
3155
3156 // Check if this function is already declared.
3157 DeclContext::lookup_result R = GlobalCtx->lookup(Name);
3158 for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end();
3159 Alloc != AllocEnd; ++Alloc) {
3160 // Only look at non-template functions, as it is the predefined,
3161 // non-templated allocation function we are trying to declare here.
3162 if (FunctionDecl *Func = dyn_cast<FunctionDecl>(*Alloc)) {
3163 if (Func->getNumParams() == Params.size()) {
3165 for (auto *P : Func->parameters())
3166 FuncParams.push_back(
3167 Context.getCanonicalType(P->getType().getUnqualifiedType()));
3168 if (llvm::ArrayRef(FuncParams) == Params) {
3169 // Make the function visible to name lookup, even if we found it in
3170 // an unimported module. It either is an implicitly-declared global
3171 // allocation function, or is suppressing that function.
3172 Func->setVisibleDespiteOwningModule();
3173 return;
3174 }
3175 }
3176 }
3177 }
3178
3180 /*IsVariadic=*/false, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
3181
3182 QualType BadAllocType;
3183 bool HasBadAllocExceptionSpec
3184 = (Name.getCXXOverloadedOperator() == OO_New ||
3185 Name.getCXXOverloadedOperator() == OO_Array_New);
3186 if (HasBadAllocExceptionSpec) {
3187 if (!getLangOpts().CPlusPlus11) {
3188 BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
3189 assert(StdBadAlloc && "Must have std::bad_alloc declared");
3191 EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
3192 }
3193 if (getLangOpts().NewInfallible) {
3195 }
3196 } else {
3197 EPI.ExceptionSpec =
3199 }
3200
3201 auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
3202 QualType FnType = Context.getFunctionType(Return, Params, EPI);
3204 Context, GlobalCtx, SourceLocation(), SourceLocation(), Name, FnType,
3205 /*TInfo=*/nullptr, SC_None, getCurFPFeatures().isFPConstrained(), false,
3206 true);
3207 Alloc->setImplicit();
3208 // Global allocation functions should always be visible.
3209 Alloc->setVisibleDespiteOwningModule();
3210
3211 if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible &&
3212 !getLangOpts().CheckNew)
3213 Alloc->addAttr(
3214 ReturnsNonNullAttr::CreateImplicit(Context, Alloc->getLocation()));
3215
3216 // C++ [basic.stc.dynamic.general]p2:
3217 // The library provides default definitions for the global allocation
3218 // and deallocation functions. Some global allocation and deallocation
3219 // functions are replaceable ([new.delete]); these are attached to the
3220 // global module ([module.unit]).
3221 //
3222 // In the language wording, these functions are attched to the global
3223 // module all the time. But in the implementation, the global module
3224 // is only meaningful when we're in a module unit. So here we attach
3225 // these allocation functions to global module conditionally.
3226 if (TheGlobalModuleFragment) {
3227 Alloc->setModuleOwnershipKind(
3229 Alloc->setLocalOwningModule(TheGlobalModuleFragment);
3230 }
3231
3233 Alloc->addAttr(VisibilityAttr::CreateImplicit(
3235 ? VisibilityAttr::Hidden
3237 ? VisibilityAttr::Protected
3238 : VisibilityAttr::Default));
3239
3241 for (QualType T : Params) {
3242 ParamDecls.push_back(ParmVarDecl::Create(
3243 Context, Alloc, SourceLocation(), SourceLocation(), nullptr, T,
3244 /*TInfo=*/nullptr, SC_None, nullptr));
3245 ParamDecls.back()->setImplicit();
3246 }
3247 Alloc->setParams(ParamDecls);
3248 if (ExtraAttr)
3249 Alloc->addAttr(ExtraAttr);
3252 IdResolver.tryAddTopLevelDecl(Alloc, Name);
3253 };
3254
3255 if (!LangOpts.CUDA)
3256 CreateAllocationFunctionDecl(nullptr);
3257 else {
3258 // Host and device get their own declaration so each can be
3259 // defined or re-declared independently.
3260 CreateAllocationFunctionDecl(CUDAHostAttr::CreateImplicit(Context));
3261 CreateAllocationFunctionDecl(CUDADeviceAttr::CreateImplicit(Context));
3262 }
3263}
3264
3266 bool CanProvideSize,
3267 bool Overaligned,
3268 DeclarationName Name) {
3270
3271 LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName);
3273
3274 // FIXME: It's possible for this to result in ambiguity, through a
3275 // user-declared variadic operator delete or the enable_if attribute. We
3276 // should probably not consider those cases to be usual deallocation
3277 // functions. But for now we just make an arbitrary choice in that case.
3278 auto Result = resolveDeallocationOverload(*this, FoundDelete, CanProvideSize,
3279 Overaligned);
3280 assert(Result.FD && "operator delete missing from global scope?");
3281 return Result.FD;
3282}
3283
3285 CXXRecordDecl *RD) {
3287
3288 FunctionDecl *OperatorDelete = nullptr;
3289 if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete))
3290 return nullptr;
3291 if (OperatorDelete)
3292 return OperatorDelete;
3293
3294 // If there's no class-specific operator delete, look up the global
3295 // non-array delete.
3298 Name);
3299}
3300
3302 DeclarationName Name,
3303 FunctionDecl *&Operator, bool Diagnose,
3304 bool WantSize, bool WantAligned) {
3305 LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
3306 // Try to find operator delete/operator delete[] in class scope.
3308
3309 if (Found.isAmbiguous())
3310 return true;
3311
3312 Found.suppressDiagnostics();
3313
3314 bool Overaligned =
3315 WantAligned || hasNewExtendedAlignment(*this, Context.getRecordType(RD));
3316
3317 // C++17 [expr.delete]p10:
3318 // If the deallocation functions have class scope, the one without a
3319 // parameter of type std::size_t is selected.
3321 resolveDeallocationOverload(*this, Found, /*WantSize*/ WantSize,
3322 /*WantAlign*/ Overaligned, &Matches);
3323
3324 // If we could find an overload, use it.
3325 if (Matches.size() == 1) {
3326 Operator = cast<CXXMethodDecl>(Matches[0].FD);
3327
3328 // FIXME: DiagnoseUseOfDecl?
3329 if (Operator->isDeleted()) {
3330 if (Diagnose) {
3331 StringLiteral *Msg = Operator->getDeletedMessage();
3332 Diag(StartLoc, diag::err_deleted_function_use)
3333 << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef());
3334 NoteDeletedFunction(Operator);
3335 }
3336 return true;
3337 }
3338
3339 if (CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(),
3340 Matches[0].Found, Diagnose) == AR_inaccessible)
3341 return true;
3342
3343 return false;
3344 }
3345
3346 // We found multiple suitable operators; complain about the ambiguity.
3347 // FIXME: The standard doesn't say to do this; it appears that the intent
3348 // is that this should never happen.
3349 if (!Matches.empty()) {
3350 if (Diagnose) {
3351 Diag(StartLoc, diag::err_ambiguous_suitable_delete_member_function_found)
3352 << Name << RD;
3353 for (auto &Match : Matches)
3354 Diag(Match.FD->getLocation(), diag::note_member_declared_here) << Name;
3355 }
3356 return true;
3357 }
3358
3359 // We did find operator delete/operator delete[] declarations, but
3360 // none of them were suitable.
3361 if (!Found.empty()) {
3362 if (Diagnose) {
3363 Diag(StartLoc, diag::err_no_suitable_delete_member_function_found)
3364 << Name << RD;
3365
3366 for (NamedDecl *D : Found)
3367 Diag(D->getUnderlyingDecl()->getLocation(),
3368 diag::note_member_declared_here) << Name;
3369 }
3370 return true;
3371 }
3372
3373 Operator = nullptr;
3374 return false;
3375}
3376
3377namespace {
3378/// Checks whether delete-expression, and new-expression used for
3379/// initializing deletee have the same array form.
3380class MismatchingNewDeleteDetector {
3381public:
3382 enum MismatchResult {
3383 /// Indicates that there is no mismatch or a mismatch cannot be proven.
3384 NoMismatch,
3385 /// Indicates that variable is initialized with mismatching form of \a new.
3386 VarInitMismatches,
3387 /// Indicates that member is initialized with mismatching form of \a new.
3388 MemberInitMismatches,
3389 /// Indicates that 1 or more constructors' definitions could not been
3390 /// analyzed, and they will be checked again at the end of translation unit.
3391 AnalyzeLater
3392 };
3393
3394 /// \param EndOfTU True, if this is the final analysis at the end of
3395 /// translation unit. False, if this is the initial analysis at the point
3396 /// delete-expression was encountered.
3397 explicit MismatchingNewDeleteDetector(bool EndOfTU)
3398 : Field(nullptr), IsArrayForm(false), EndOfTU(EndOfTU),
3399 HasUndefinedConstructors(false) {}
3400
3401 /// Checks whether pointee of a delete-expression is initialized with
3402 /// matching form of new-expression.
3403 ///
3404 /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the
3405 /// point where delete-expression is encountered, then a warning will be
3406 /// issued immediately. If return value is \c AnalyzeLater at the point where
3407 /// delete-expression is seen, then member will be analyzed at the end of
3408 /// translation unit. \c AnalyzeLater is returned iff at least one constructor
3409 /// couldn't be analyzed. If at least one constructor initializes the member
3410 /// with matching type of new, the return value is \c NoMismatch.
3411 MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE);
3412 /// Analyzes a class member.
3413 /// \param Field Class member to analyze.
3414 /// \param DeleteWasArrayForm Array form-ness of the delete-expression used
3415 /// for deleting the \p Field.
3416 MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm);
3418 /// List of mismatching new-expressions used for initialization of the pointee
3420 /// Indicates whether delete-expression was in array form.
3421 bool IsArrayForm;
3422
3423private:
3424 const bool EndOfTU;
3425 /// Indicates that there is at least one constructor without body.
3426 bool HasUndefinedConstructors;
3427 /// Returns \c CXXNewExpr from given initialization expression.
3428 /// \param E Expression used for initializing pointee in delete-expression.
3429 /// E can be a single-element \c InitListExpr consisting of new-expression.
3430 const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E);
3431 /// Returns whether member is initialized with mismatching form of
3432 /// \c new either by the member initializer or in-class initialization.
3433 ///
3434 /// If bodies of all constructors are not visible at the end of translation
3435 /// unit or at least one constructor initializes member with the matching
3436 /// form of \c new, mismatch cannot be proven, and this function will return
3437 /// \c NoMismatch.
3438 MismatchResult analyzeMemberExpr(const MemberExpr *ME);
3439 /// Returns whether variable is initialized with mismatching form of
3440 /// \c new.
3441 ///
3442 /// If variable is initialized with matching form of \c new or variable is not
3443 /// initialized with a \c new expression, this function will return true.
3444 /// If variable is initialized with mismatching form of \c new, returns false.
3445 /// \param D Variable to analyze.
3446 bool hasMatchingVarInit(const DeclRefExpr *D);
3447 /// Checks whether the constructor initializes pointee with mismatching
3448 /// form of \c new.
3449 ///
3450 /// Returns true, if member is initialized with matching form of \c new in
3451 /// member initializer list. Returns false, if member is initialized with the
3452 /// matching form of \c new in this constructor's initializer or given
3453 /// constructor isn't defined at the point where delete-expression is seen, or
3454 /// member isn't initialized by the constructor.
3455 bool hasMatchingNewInCtor(const CXXConstructorDecl *CD);
3456 /// Checks whether member is initialized with matching form of
3457 /// \c new in member initializer list.
3458 bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI);
3459 /// Checks whether member is initialized with mismatching form of \c new by
3460 /// in-class initializer.
3461 MismatchResult analyzeInClassInitializer();
3462};
3463}
3464
3465MismatchingNewDeleteDetector::MismatchResult
3466MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) {
3467 NewExprs.clear();
3468 assert(DE && "Expected delete-expression");
3469 IsArrayForm = DE->isArrayForm();
3470 const Expr *E = DE->getArgument()->IgnoreParenImpCasts();
3471 if (const MemberExpr *ME = dyn_cast<const MemberExpr>(E)) {
3472 return analyzeMemberExpr(ME);
3473 } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(E)) {
3474 if (!hasMatchingVarInit(D))
3475 return VarInitMismatches;
3476 }
3477 return NoMismatch;
3478}
3479
3480const CXXNewExpr *
3481MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) {
3482 assert(E != nullptr && "Expected a valid initializer expression");
3483 E = E->IgnoreParenImpCasts();
3484 if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(E)) {
3485 if (ILE->getNumInits() == 1)
3486 E = dyn_cast<const CXXNewExpr>(ILE->getInit(0)->IgnoreParenImpCasts());
3487 }
3488
3489 return dyn_cast_or_null<const CXXNewExpr>(E);
3490}
3491
3492bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit(
3493 const CXXCtorInitializer *CI) {
3494 const CXXNewExpr *NE = nullptr;
3495 if (Field == CI->getMember() &&
3496 (NE = getNewExprFromInitListOrExpr(CI->getInit()))) {
3497 if (NE->isArray() == IsArrayForm)
3498 return true;
3499 else
3500 NewExprs.push_back(NE);
3501 }
3502 return false;
3503}
3504
3505bool MismatchingNewDeleteDetector::hasMatchingNewInCtor(
3506 const CXXConstructorDecl *CD) {
3507 if (CD->isImplicit())
3508 return false;
3509 const FunctionDecl *Definition = CD;
3511 HasUndefinedConstructors = true;
3512 return EndOfTU;
3513 }
3514 for (const auto *CI : cast<const CXXConstructorDecl>(Definition)->inits()) {
3515 if (hasMatchingNewInCtorInit(CI))
3516 return true;
3517 }
3518 return false;
3519}
3520
3521MismatchingNewDeleteDetector::MismatchResult
3522MismatchingNewDeleteDetector::analyzeInClassInitializer() {
3523 assert(Field != nullptr && "This should be called only for members");
3524 const Expr *InitExpr = Field->getInClassInitializer();
3525 if (!InitExpr)
3526 return EndOfTU ? NoMismatch : AnalyzeLater;
3527 if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(InitExpr)) {
3528 if (NE->isArray() != IsArrayForm) {
3529 NewExprs.push_back(NE);
3530 return MemberInitMismatches;
3531 }
3532 }
3533 return NoMismatch;
3534}
3535
3536MismatchingNewDeleteDetector::MismatchResult
3537MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field,
3538 bool DeleteWasArrayForm) {
3539 assert(Field != nullptr && "Analysis requires a valid class member.");
3540 this->Field = Field;
3541 IsArrayForm = DeleteWasArrayForm;
3542 const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Field->getParent());
3543 for (const auto *CD : RD->ctors()) {
3544 if (hasMatchingNewInCtor(CD))
3545 return NoMismatch;
3546 }
3547 if (HasUndefinedConstructors)
3548 return EndOfTU ? NoMismatch : AnalyzeLater;
3549 if (!NewExprs.empty())
3550 return MemberInitMismatches;
3551 return Field->hasInClassInitializer() ? analyzeInClassInitializer()
3552 : NoMismatch;
3553}
3554
3555MismatchingNewDeleteDetector::MismatchResult
3556MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) {
3557 assert(ME != nullptr && "Expected a member expression");
3558 if (FieldDecl *F = dyn_cast<FieldDecl>(ME->getMemberDecl()))
3559 return analyzeField(F, IsArrayForm);
3560 return NoMismatch;
3561}
3562
3563bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) {
3564 const CXXNewExpr *NE = nullptr;
3565 if (const VarDecl *VD = dyn_cast<const VarDecl>(D->getDecl())) {
3566 if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(VD->getInit())) &&
3567 NE->isArray() != IsArrayForm) {
3568 NewExprs.push_back(NE);
3569 }
3570 }
3571 return NewExprs.empty();
3572}
3573
3574static void
3576 const MismatchingNewDeleteDetector &Detector) {
3577 SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(DeleteLoc);
3578 FixItHint H;
3579 if (!Detector.IsArrayForm)
3580 H = FixItHint::CreateInsertion(EndOfDelete, "[]");
3581 else {
3583 DeleteLoc, tok::l_square, SemaRef.getSourceManager(),
3584 SemaRef.getLangOpts(), true);
3585 if (RSquare.isValid())
3586 H = FixItHint::CreateRemoval(SourceRange(EndOfDelete, RSquare));
3587 }
3588 SemaRef.Diag(DeleteLoc, diag::warn_mismatched_delete_new)
3589 << Detector.IsArrayForm << H;
3590
3591 for (const auto *NE : Detector.NewExprs)
3592 SemaRef.Diag(NE->getExprLoc(), diag::note_allocated_here)
3593 << Detector.IsArrayForm;
3594}
3595
3596void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) {
3597 if (Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation()))
3598 return;
3599 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false);
3600 switch (Detector.analyzeDeleteExpr(DE)) {
3601 case MismatchingNewDeleteDetector::VarInitMismatches:
3602 case MismatchingNewDeleteDetector::MemberInitMismatches: {
3603 DiagnoseMismatchedNewDelete(*this, DE->getBeginLoc(), Detector);
3604 break;
3605 }
3606 case MismatchingNewDeleteDetector::AnalyzeLater: {
3607 DeleteExprs[Detector.Field].push_back(
3608 std::make_pair(DE->getBeginLoc(), DE->isArrayForm()));
3609 break;
3610 }
3611 case MismatchingNewDeleteDetector::NoMismatch:
3612 break;
3613 }
3614}
3615
3616void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
3617 bool DeleteWasArrayForm) {
3618 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true);
3619 switch (Detector.analyzeField(Field, DeleteWasArrayForm)) {
3620 case MismatchingNewDeleteDetector::VarInitMismatches:
3621 llvm_unreachable("This analysis should have been done for class members.");
3622 case MismatchingNewDeleteDetector::AnalyzeLater:
3623 llvm_unreachable("Analysis cannot be postponed any point beyond end of "
3624 "translation unit.");
3625 case MismatchingNewDeleteDetector::MemberInitMismatches:
3626 DiagnoseMismatchedNewDelete(*this, DeleteLoc, Detector);
3627 break;
3628 case MismatchingNewDeleteDetector::NoMismatch:
3629 break;
3630 }
3631}
3632
3634Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
3635 bool ArrayForm, Expr *ExE) {
3636 // C++ [expr.delete]p1:
3637 // The operand shall have a pointer type, or a class type having a single
3638 // non-explicit conversion function to a pointer type. The result has type
3639 // void.
3640 //
3641 // DR599 amends "pointer type" to "pointer to object type" in both cases.
3642
3643 ExprResult Ex = ExE;
3644 FunctionDecl *OperatorDelete = nullptr;
3645 bool ArrayFormAsWritten = ArrayForm;
3646 bool UsualArrayDeleteWantsSize = false;
3647
3648 if (!Ex.get()->isTypeDependent()) {
3649 // Perform lvalue-to-rvalue cast, if needed.
3650 Ex = DefaultLvalueConversion(Ex.get());
3651 if (Ex.isInvalid())
3652 return ExprError();
3653
3654 QualType Type = Ex.get()->getType();
3655
3656 class DeleteConverter : public ContextualImplicitConverter {
3657 public:
3658 DeleteConverter() : ContextualImplicitConverter(false, true) {}
3659
3660 bool match(QualType ConvType) override {
3661 // FIXME: If we have an operator T* and an operator void*, we must pick
3662 // the operator T*.
3663 if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
3664 if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
3665 return true;
3666 return false;
3667 }
3668
3669 SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc,
3670 QualType T) override {
3671 return S.Diag(Loc, diag::err_delete_operand) << T;
3672 }
3673
3674 SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
3675 QualType T) override {
3676 return S.Diag(Loc, diag::err_delete_incomplete_class_type) << T;
3677 }
3678
3679 SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
3680 QualType T,
3681 QualType ConvTy) override {
3682 return S.Diag(Loc, diag::err_delete_explicit_conversion) << T << ConvTy;
3683 }
3684
3685 SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
3686 QualType ConvTy) override {
3687 return S.Diag(Conv->getLocation(), diag::note_delete_conversion)
3688 << ConvTy;
3689 }
3690
3691 SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
3692 QualType T) override {
3693 return S.Diag(Loc, diag::err_ambiguous_delete_operand) << T;
3694 }
3695
3696 SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
3697 QualType ConvTy) override {
3698 return S.Diag(Conv->getLocation(), diag::note_delete_conversion)
3699 << ConvTy;
3700 }
3701
3702 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
3703 QualType T,
3704 QualType ConvTy) override {
3705 llvm_unreachable("conversion functions are permitted");
3706 }
3707 } Converter;
3708
3709 Ex = PerformContextualImplicitConversion(StartLoc, Ex.get(), Converter);
3710 if (Ex.isInvalid())
3711 return ExprError();
3712 Type = Ex.get()->getType();
3713 if (!Converter.match(Type))
3714 // FIXME: PerformContextualImplicitConversion should return ExprError
3715 // itself in this case.
3716 return ExprError();
3717
3719 QualType PointeeElem = Context.getBaseElementType(Pointee);
3720
3721 if (Pointee.getAddressSpace() != LangAS::Default &&
3722 !getLangOpts().OpenCLCPlusPlus)
3723 return Diag(Ex.get()->getBeginLoc(),
3724 diag::err_address_space_qualified_delete)
3725 << Pointee.getUnqualifiedType()
3727
3728 CXXRecordDecl *PointeeRD = nullptr;
3729 if (Pointee->isVoidType() && !isSFINAEContext()) {
3730 // The C++ standard bans deleting a pointer to a non-object type, which
3731 // effectively bans deletion of "void*". However, most compilers support
3732 // this, so we treat it as a warning unless we're in a SFINAE context.
3733 // But we still prohibit this since C++26.
3734 Diag(StartLoc, LangOpts.CPlusPlus26 ? diag::err_delete_incomplete
3735 : diag::ext_delete_void_ptr_operand)
3736 << (LangOpts.CPlusPlus26 ? Pointee : Type)
3737 << Ex.get()->getSourceRange();
3738 } else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
3739 Pointee->isSizelessType()) {
3740 return ExprError(Diag(StartLoc, diag::err_delete_operand)
3741 << Type << Ex.get()->getSourceRange());
3742 } else if (!Pointee->isDependentType()) {
3743 // FIXME: This can result in errors if the definition was imported from a
3744 // module but is hidden.
3745 if (!RequireCompleteType(StartLoc, Pointee,
3746 LangOpts.CPlusPlus26
3747 ? diag::err_delete_incomplete
3748 : diag::warn_delete_incomplete,
3749 Ex.get())) {
3750 if (const RecordType *RT = PointeeElem->getAs<RecordType>())
3751 PointeeRD = cast<CXXRecordDecl>(RT->getDecl());
3752 }
3753 }
3754
3755 if (Pointee->isArrayType() && !ArrayForm) {
3756 Diag(StartLoc, diag::warn_delete_array_type)
3757 << Type << Ex.get()->getSourceRange()
3759 ArrayForm = true;
3760 }
3761
3763 ArrayForm ? OO_Array_Delete : OO_Delete);
3764
3765 if (PointeeRD) {
3766 if (!UseGlobal &&
3767 FindDeallocationFunction(StartLoc, PointeeRD, DeleteName,
3768 OperatorDelete))
3769 return ExprError();
3770
3771 // If we're allocating an array of records, check whether the
3772 // usual operator delete[] has a size_t parameter.
3773 if (ArrayForm) {
3774 // If the user specifically asked to use the global allocator,
3775 // we'll need to do the lookup into the class.
3776 if (UseGlobal)
3777 UsualArrayDeleteWantsSize =
3778 doesUsualArrayDeleteWantSize(*this, StartLoc, PointeeElem);
3779
3780 // Otherwise, the usual operator delete[] should be the
3781 // function we just found.
3782 else if (isa_and_nonnull<CXXMethodDecl>(OperatorDelete))
3783 UsualArrayDeleteWantsSize =
3784 UsualDeallocFnInfo(*this,
3785 DeclAccessPair::make(OperatorDelete, AS_public))
3786 .HasSizeT;
3787 }
3788
3789 if (!PointeeRD->hasIrrelevantDestructor())
3790 if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
3791 MarkFunctionReferenced(StartLoc,
3792 const_cast<CXXDestructorDecl*>(Dtor));
3793 if (DiagnoseUseOfDecl(Dtor, StartLoc))
3794 return ExprError();
3795 }
3796
3797 CheckVirtualDtorCall(PointeeRD->getDestructor(), StartLoc,
3798 /*IsDelete=*/true, /*CallCanBeVirtual=*/true,
3799 /*WarnOnNonAbstractTypes=*/!ArrayForm,
3800 SourceLocation());
3801 }
3802
3803 if (!OperatorDelete) {
3804 if (getLangOpts().OpenCLCPlusPlus) {
3805 Diag(StartLoc, diag::err_openclcxx_not_supported) << "default delete";
3806 return ExprError();
3807 }
3808
3809 bool IsComplete = isCompleteType(StartLoc, Pointee);
3810 bool CanProvideSize =
3811 IsComplete && (!ArrayForm || UsualArrayDeleteWantsSize ||
3812 Pointee.isDestructedType());
3813 bool Overaligned = hasNewExtendedAlignment(*this, Pointee);
3814
3815 // Look for a global declaration.
3816 OperatorDelete = FindUsualDeallocationFunction(StartLoc, CanProvideSize,
3817 Overaligned, DeleteName);
3818 }
3819
3820 if (OperatorDelete->isInvalidDecl())
3821 return ExprError();
3822
3823 MarkFunctionReferenced(StartLoc, OperatorDelete);
3824
3825 // Check access and ambiguity of destructor if we're going to call it.
3826 // Note that this is required even for a virtual delete.
3827 bool IsVirtualDelete = false;
3828 if (PointeeRD) {
3829 if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
3830 CheckDestructorAccess(Ex.get()->getExprLoc(), Dtor,
3831 PDiag(diag::err_access_dtor) << PointeeElem);
3832 IsVirtualDelete = Dtor->isVirtual();
3833 }
3834 }
3835
3836 DiagnoseUseOfDecl(OperatorDelete, StartLoc);
3837
3838 // Convert the operand to the type of the first parameter of operator
3839 // delete. This is only necessary if we selected a destroying operator
3840 // delete that we are going to call (non-virtually); converting to void*
3841 // is trivial and left to AST consumers to handle.
3842 QualType ParamType = OperatorDelete->getParamDecl(0)->getType();
3843 if (!IsVirtualDelete && !ParamType->getPointeeType()->isVoidType()) {
3844 Qualifiers Qs = Pointee.getQualifiers();
3845 if (Qs.hasCVRQualifiers()) {
3846 // Qualifiers are irrelevant to this conversion; we're only looking
3847 // for access and ambiguity.
3851 Ex = ImpCastExprToType(Ex.get(), Unqual, CK_NoOp);
3852 }
3853 Ex = PerformImplicitConversion(Ex.get(), ParamType, AA_Passing);
3854 if (Ex.isInvalid())
3855 return ExprError();
3856 }
3857 }
3858
3860 Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten,
3861 UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc);
3862 AnalyzeDeleteExprMismatch(Result);
3863 return Result;
3864}
3865
3867 bool IsDelete,
3868 FunctionDecl *&Operator) {
3869
3871 IsDelete ? OO_Delete : OO_New);
3872
3873 LookupResult R(S, NewName, TheCall->getBeginLoc(), Sema::LookupOrdinaryName);
3875 assert(!R.empty() && "implicitly declared allocation functions not found");
3876 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
3877
3878 // We do our own custom access checks below.
3880
3881 SmallVector<Expr *, 8> Args(TheCall->arguments());
3882 OverloadCandidateSet Candidates(R.getNameLoc(),
3884 for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
3885 FnOvl != FnOvlEnd; ++FnOvl) {
3886 // Even member operator new/delete are implicitly treated as
3887 // static, so don't use AddMemberCandidate.
3888 NamedDecl *D = (*FnOvl)->getUnderlyingDecl();
3889
3890 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
3891 S.AddTemplateOverloadCandidate(FnTemplate, FnOvl.getPair(),
3892 /*ExplicitTemplateArgs=*/nullptr, Args,
3893 Candidates,
3894 /*SuppressUserConversions=*/false);
3895 continue;
3896 }
3897
3898 FunctionDecl *Fn = cast<FunctionDecl>(D);
3899 S.AddOverloadCandidate(Fn, FnOvl.getPair(), Args, Candidates,
3900 /*SuppressUserConversions=*/false);
3901 }
3902
3903 SourceRange Range = TheCall->getSourceRange();
3904
3905 // Do the resolution.
3907 switch (Candidates.BestViableFunction(S, R.getNameLoc(), Best)) {
3908 case OR_Success: {
3909 // Got one!
3910 FunctionDecl *FnDecl = Best->Function;
3911 assert(R.getNamingClass() == nullptr &&
3912 "class members should not be considered");
3913
3915 S.Diag(R.getNameLoc(), diag::err_builtin_operator_new_delete_not_usual)
3916 << (IsDelete ? 1 : 0) << Range;
3917 S.Diag(FnDecl->getLocation(), diag::note_non_usual_function_declared_here)
3918 << R.getLookupName() << FnDecl->getSourceRange();
3919 return true;
3920 }
3921
3922 Operator = FnDecl;
3923 return false;
3924 }
3925
3927 Candidates.NoteCandidates(
3929 S.PDiag(diag::err_ovl_no_viable_function_in_call)
3930 << R.getLookupName() << Range),
3931 S, OCD_AllCandidates, Args);
3932 return true;
3933
3934 case OR_Ambiguous:
3935 Candidates.NoteCandidates(
3937 S.PDiag(diag::err_ovl_ambiguous_call)
3938 << R.getLookupName() << Range),
3939 S, OCD_AmbiguousCandidates, Args);
3940 return true;
3941
3942 case OR_Deleted:
3944 Candidates, Best->Function, Args);
3945 return true;
3946 }
3947 llvm_unreachable("Unreachable, bad result from BestViableFunction");
3948}
3949
3950ExprResult Sema::BuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
3951 bool IsDelete) {
3952 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
3953 if (!getLangOpts().CPlusPlus) {
3954 Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language)
3955 << (IsDelete ? "__builtin_operator_delete" : "__builtin_operator_new")
3956 << "C++";
3957 return ExprError();
3958 }
3959 // CodeGen assumes it can find the global new and delete to call,
3960 // so ensure that they are declared.
3962
3963 FunctionDecl *OperatorNewOrDelete = nullptr;
3964 if (resolveBuiltinNewDeleteOverload(*this, TheCall, IsDelete,
3965 OperatorNewOrDelete))
3966 return ExprError();
3967 assert(OperatorNewOrDelete && "should be found");
3968
3969 DiagnoseUseOfDecl(OperatorNewOrDelete, TheCall->getExprLoc());
3970 MarkFunctionReferenced(TheCall->getExprLoc(), OperatorNewOrDelete);
3971
3972 TheCall->setType(OperatorNewOrDelete->getReturnType());
3973 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
3974 QualType ParamTy = OperatorNewOrDelete->getParamDecl(i)->getType();
3975 InitializedEntity Entity =
3978 Entity, TheCall->getArg(i)->getBeginLoc(), TheCall->getArg(i));
3979 if (Arg.isInvalid())
3980 return ExprError();
3981 TheCall->setArg(i, Arg.get());
3982 }
3983 auto Callee = dyn_cast<ImplicitCastExpr>(TheCall->getCallee());
3984 assert(Callee && Callee->getCastKind() == CK_BuiltinFnToFnPtr &&
3985 "Callee expected to be implicit cast to a builtin function pointer");
3986 Callee->setType(OperatorNewOrDelete->getType());
3987
3988 return TheCallResult;
3989}
3990
3992 bool IsDelete, bool CallCanBeVirtual,
3993 bool WarnOnNonAbstractTypes,
3994 SourceLocation DtorLoc) {
3995 if (!dtor || dtor->isVirtual() || !CallCanBeVirtual || isUnevaluatedContext())
3996 return;
3997
3998 // C++ [expr.delete]p3:
3999 // In the first alternative (delete object), if the static type of the
4000 // object to be deleted is different from its dynamic type, the static
4001 // type shall be a base class of the dynamic type of the object to be
4002 // deleted and the static type shall have a virtual destructor or the
4003 // behavior is undefined.
4004 //
4005 const CXXRecordDecl *PointeeRD = dtor->getParent();
4006 // Note: a final class cannot be derived from, no issue there
4007 if (!PointeeRD->isPolymorphic() || PointeeRD->hasAttr<FinalAttr>())
4008 return;
4009
4010 // If the superclass is in a system header, there's nothing that can be done.
4011 // The `delete` (where we emit the warning) can be in a system header,
4012 // what matters for this warning is where the deleted type is defined.
4013 if (getSourceManager().isInSystemHeader(PointeeRD->getLocation()))
4014 return;
4015
4016 QualType ClassType = dtor->getFunctionObjectParameterType();
4017 if (PointeeRD->isAbstract()) {
4018 // If the class is abstract, we warn by default, because we're
4019 // sure the code has undefined behavior.
4020 Diag(Loc, diag::warn_delete_abstract_non_virtual_dtor) << (IsDelete ? 0 : 1)
4021 << ClassType;
4022 } else if (WarnOnNonAbstractTypes) {
4023 // Otherwise, if this is not an array delete, it's a bit suspect,
4024 // but not necessarily wrong.
4025 Diag(Loc, diag::warn_delete_non_virtual_dtor) << (IsDelete ? 0 : 1)
4026 << ClassType;
4027 }
4028 if (!IsDelete) {
4029 std::string TypeStr;
4030 ClassType.getAsStringInternal(TypeStr, getPrintingPolicy());
4031 Diag(DtorLoc, diag::note_delete_non_virtual)
4032 << FixItHint::CreateInsertion(DtorLoc, TypeStr + "::");
4033 }
4034}
4035
4037 SourceLocation StmtLoc,
4038 ConditionKind CK) {
4039 ExprResult E =
4040 CheckConditionVariable(cast<VarDecl>(ConditionVar), StmtLoc, CK);
4041 if (E.isInvalid())
4042 return ConditionError();
4043 return ConditionResult(*this, ConditionVar, MakeFullExpr(E.get(), StmtLoc),
4045}
4046
4048 SourceLocation StmtLoc,
4049 ConditionKind CK) {
4050 if (ConditionVar->isInvalidDecl())
4051 return ExprError();
4052
4053 QualType T = ConditionVar->getType();
4054
4055 // C++ [stmt.select]p2:
4056 // The declarator shall not specify a function or an array.
4057 if (T->isFunctionType())
4058 return ExprError(Diag(ConditionVar->getLocation(),
4059 diag::err_invalid_use_of_function_type)
4060 << ConditionVar->getSourceRange());
4061 else if (T->isArrayType())
4062 return ExprError(Diag(ConditionVar->getLocation(),
4063 diag::err_invalid_use_of_array_type)
4064 << ConditionVar->getSourceRange());
4065
4067 ConditionVar, ConditionVar->getType().getNonReferenceType(), VK_LValue,
4068 ConditionVar->getLocation());
4069
4070 switch (CK) {
4072 return CheckBooleanCondition(StmtLoc, Condition.get());
4073
4075 return CheckBooleanCondition(StmtLoc, Condition.get(), true);
4076
4078 return CheckSwitchCondition(StmtLoc, Condition.get());
4079 }
4080
4081 llvm_unreachable("unexpected condition kind");
4082}
4083
4084ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
4085 // C++11 6.4p4:
4086 // The value of a condition that is an initialized declaration in a statement
4087 // other than a switch statement is the value of the declared variable
4088 // implicitly converted to type bool. If that conversion is ill-formed, the
4089 // program is ill-formed.
4090 // The value of a condition that is an expression is the value of the
4091 // expression, implicitly converted to bool.
4092 //
4093 // C++23 8.5.2p2
4094 // If the if statement is of the form if constexpr, the value of the condition
4095 // is contextually converted to bool and the converted expression shall be
4096 // a constant expression.
4097 //
4098
4100 if (!IsConstexpr || E.isInvalid() || E.get()->isValueDependent())
4101 return E;
4102
4103 // FIXME: Return this value to the caller so they don't need to recompute it.
4104 llvm::APSInt Cond;
4106 E.get(), &Cond,
4107 diag::err_constexpr_if_condition_expression_is_not_constant);
4108 return E;
4109}
4110
4111bool
4113 // Look inside the implicit cast, if it exists.
4114 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(From))
4115 From = Cast->getSubExpr();
4116
4117 // A string literal (2.13.4) that is not a wide string literal can
4118 // be converted to an rvalue of type "pointer to char"; a wide
4119 // string literal can be converted to an rvalue of type "pointer
4120 // to wchar_t" (C++ 4.2p2).
4121 if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From->IgnoreParens()))
4122 if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
4123 if (const BuiltinType *ToPointeeType
4124 = ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
4125 // This conversion is considered only when there is an
4126 // explicit appropriate pointer target type (C++ 4.2p2).
4127 if (!ToPtrType->getPointeeType().hasQualifiers()) {
4128 switch (StrLit->getKind()) {
4132 // We don't allow UTF literals to be implicitly converted
4133 break;
4135 return (ToPointeeType->getKind() == BuiltinType::Char_U ||
4136 ToPointeeType->getKind() == BuiltinType::Char_S);
4139 QualType(ToPointeeType, 0));
4141 assert(false && "Unevaluated string literal in expression");
4142 break;
4143 }
4144 }
4145 }
4146
4147 return false;
4148}
4149
4151 SourceLocation CastLoc,
4152 QualType Ty,
4153 CastKind Kind,
4154 CXXMethodDecl *Method,
4155 DeclAccessPair FoundDecl,
4156 bool HadMultipleCandidates,
4157 Expr *From) {
4158 switch (Kind) {
4159 default: llvm_unreachable("Unhandled cast kind!");
4160 case CK_ConstructorConversion: {
4161 CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Method);
4162 SmallVector<Expr*, 8> ConstructorArgs;
4163
4164 if (S.RequireNonAbstractType(CastLoc, Ty,
4165 diag::err_allocation_of_abstract_type))
4166 return ExprError();
4167
4168 if (S.CompleteConstructorCall(Constructor, Ty, From, CastLoc,
4169 ConstructorArgs))
4170 return ExprError();
4171
4172 S.CheckConstructorAccess(CastLoc, Constructor, FoundDecl,
4174 if (S.DiagnoseUseOfDecl(Method, CastLoc))
4175 return ExprError();
4176
4178 CastLoc, Ty, FoundDecl, cast<CXXConstructorDecl>(Method),
4179 ConstructorArgs, HadMultipleCandidates,
4180 /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
4182 if (Result.isInvalid())
4183 return ExprError();
4184
4185 return S.MaybeBindToTemporary(Result.getAs<Expr>());
4186 }
4187
4188 case CK_UserDefinedConversion: {
4189 assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
4190
4191 S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ nullptr, FoundDecl);
4192 if (S.DiagnoseUseOfDecl(Method, CastLoc))
4193 return ExprError();
4194
4195 // Create an implicit call expr that calls it.
4196 CXXConversionDecl *Conv = cast<CXXConversionDecl>(Method);
4197 ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Conv,
4198 HadMultipleCandidates);
4199 if (Result.isInvalid())
4200 return ExprError();
4201 // Record usage of conversion in an implicit cast.
4202 Result = ImplicitCastExpr::Create(S.Context, Result.get()->getType(),
4203 CK_UserDefinedConversion, Result.get(),
4204 nullptr, Result.get()->getValueKind(),
4206
4207 return S.MaybeBindToTemporary(Result.get());
4208 }
4209 }
4210}
4211
4214 const ImplicitConversionSequence &ICS,
4215 AssignmentAction Action,
4217 // C++ [over.match.oper]p7: [...] operands of class type are converted [...]
4219 !From->getType()->isRecordType())
4220 return From;
4221
4222 switch (ICS.getKind()) {
4224 ExprResult Res = PerformImplicitConversion(From, ToType, ICS.Standard,
4225 Action, CCK);
4226 if (Res.isInvalid())
4227 return ExprError();
4228 From = Res.get();
4229 break;
4230 }
4231
4233
4236 QualType BeforeToType;
4237 assert(FD && "no conversion function for user-defined conversion seq");
4238 if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(FD)) {
4239 CastKind = CK_UserDefinedConversion;
4240
4241 // If the user-defined conversion is specified by a conversion function,
4242 // the initial standard conversion sequence converts the source type to
4243 // the implicit object parameter of the conversion function.
4244 BeforeToType = Context.getTagDeclType(Conv->getParent());
4245 } else {
4246 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(FD);
4247 CastKind = CK_ConstructorConversion;
4248 // Do no conversion if dealing with ... for the first conversion.
4250 // If the user-defined conversion is specified by a constructor, the
4251 // initial standard conversion sequence converts the source type to
4252 // the type required by the argument of the constructor
4253 BeforeToType = Ctor->getParamDecl(0)->getType().getNonReferenceType();
4254 }
4255 }
4256 // Watch out for ellipsis conversion.
4258 ExprResult Res =
4259 PerformImplicitConversion(From, BeforeToType,
4261 CCK);
4262 if (Res.isInvalid())
4263 return ExprError();
4264 From = Res.get();
4265 }
4266
4268 *this, From->getBeginLoc(), ToType.getNonReferenceType(), CastKind,
4269 cast<CXXMethodDecl>(FD), ICS.UserDefined.FoundConversionFunction,
4271
4272 if (CastArg.isInvalid())
4273 return ExprError();
4274
4275 From = CastArg.get();
4276
4277 // C++ [over.match.oper]p7:
4278 // [...] the second standard conversion sequence of a user-defined
4279 // conversion sequence is not applied.
4281 return From;
4282
4283 return PerformImplicitConversion(From, ToType, ICS.UserDefined.After,
4284 AA_Converting, CCK);
4285 }
4286
4288 ICS.DiagnoseAmbiguousConversion(*this, From->getExprLoc(),
4289 PDiag(diag::err_typecheck_ambiguous_condition)
4290 << From->getSourceRange());
4291 return ExprError();
4292
4295 llvm_unreachable("bad conversion");
4296
4299 CheckAssignmentConstraints(From->getExprLoc(), ToType, From->getType());
4300 bool Diagnosed = DiagnoseAssignmentResult(
4301 ConvTy == Compatible ? Incompatible : ConvTy, From->getExprLoc(),
4302 ToType, From->getType(), From, Action);
4303 assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
4304 return ExprError();
4305 }
4306
4307 // Everything went well.
4308 return From;
4309}
4310
4311// adjustVectorType - Compute the intermediate cast type casting elements of the
4312// from type to the elements of the to type without resizing the vector.
4314 QualType ToType, QualType *ElTy = nullptr) {
4315 auto *ToVec = ToType->castAs<VectorType>();
4316 QualType ElType = ToVec->getElementType();
4317 if (ElTy)
4318 *ElTy = ElType;
4319 if (!FromTy->isVectorType())
4320 return ElType;
4321 auto *FromVec = FromTy->castAs<VectorType>();
4322 return Context.getExtVectorType(ElType, FromVec->getNumElements());
4323}
4324
4327 const StandardConversionSequence& SCS,
4328 AssignmentAction Action,
4330 bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
4332
4333 // Overall FIXME: we are recomputing too many types here and doing far too
4334 // much extra work. What this means is that we need to keep track of more
4335 // information that is computed when we try the implicit conversion initially,
4336 // so that we don't need to recompute anything here.
4337 QualType FromType = From->getType();
4338
4339 if (SCS.CopyConstructor) {
4340 // FIXME: When can ToType be a reference type?
4341 assert(!ToType->isReferenceType());
4342 if (SCS.Second == ICK_Derived_To_Base) {
4343 SmallVector<Expr*, 8> ConstructorArgs;
4345 cast<CXXConstructorDecl>(SCS.CopyConstructor), ToType, From,
4346 /*FIXME:ConstructLoc*/ SourceLocation(), ConstructorArgs))
4347 return ExprError();
4348 return BuildCXXConstructExpr(
4349 /*FIXME:ConstructLoc*/ SourceLocation(), ToType,
4350 SCS.FoundCopyConstructor, SCS.CopyConstructor, ConstructorArgs,
4351 /*HadMultipleCandidates*/ false,
4352 /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
4354 }
4355 return BuildCXXConstructExpr(
4356 /*FIXME:ConstructLoc*/ SourceLocation(), ToType,
4358 /*HadMultipleCandidates*/ false,
4359 /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
4361 }
4362
4363 // Resolve overloaded function references.
4364 if (Context.hasSameType(FromType, Context.OverloadTy)) {
4367 true, Found);
4368 if (!Fn)
4369 return ExprError();
4370
4371 if (DiagnoseUseOfDecl(Fn, From->getBeginLoc()))
4372 return ExprError();
4373
4375 if (Res.isInvalid())
4376 return ExprError();
4377
4378 // We might get back another placeholder expression if we resolved to a
4379 // builtin.
4380 Res = CheckPlaceholderExpr(Res.get());
4381 if (Res.isInvalid())
4382 return ExprError();
4383
4384 From = Res.get();
4385 FromType = From->getType();
4386 }
4387
4388 // If we're converting to an atomic type, first convert to the corresponding
4389 // non-atomic type.
4390 QualType ToAtomicType;
4391 if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>()) {
4392 ToAtomicType = ToType;
4393 ToType = ToAtomic->getValueType();
4394 }
4395
4396 QualType InitialFromType = FromType;
4397 // Perform the first implicit conversion.
4398 switch (SCS.First) {
4399 case ICK_Identity:
4400 if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
4401 FromType = FromAtomic->getValueType().getUnqualifiedType();
4402 From = ImplicitCastExpr::Create(Context, FromType, CK_AtomicToNonAtomic,
4403 From, /*BasePath=*/nullptr, VK_PRValue,
4405 }
4406 break;
4407
4408 case ICK_Lvalue_To_Rvalue: {
4409 assert(From->getObjectKind() != OK_ObjCProperty);
4410 ExprResult FromRes = DefaultLvalueConversion(From);
4411 if (FromRes.isInvalid())
4412 return ExprError();
4413
4414 From = FromRes.get();
4415 FromType = From->getType();
4416 break;
4417 }
4418
4420 FromType = Context.getArrayDecayedType(FromType);
4421 From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay, VK_PRValue,
4422 /*BasePath=*/nullptr, CCK)
4423 .get();
4424 break;
4425
4427 FromType = Context.getArrayParameterType(FromType);
4428 From = ImpCastExprToType(From, FromType, CK_HLSLArrayRValue, VK_PRValue,
4429 /*BasePath=*/nullptr, CCK)
4430 .get();
4431 break;
4432
4434 FromType = Context.getPointerType(FromType);
4435 From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
4436 VK_PRValue, /*BasePath=*/nullptr, CCK)
4437 .get();
4438 break;
4439
4440 default:
4441 llvm_unreachable("Improper first standard conversion");
4442 }
4443
4444 // Perform the second implicit conversion
4445 switch (SCS.Second) {
4446 case ICK_Identity:
4447 // C++ [except.spec]p5:
4448 // [For] assignment to and initialization of pointers to functions,
4449 // pointers to member functions, and references to functions: the
4450 // target entity shall allow at least the exceptions allowed by the
4451 // source value in the assignment or initialization.
4452 switch (Action) {
4453 case AA_Assigning:
4454 case AA_Initializing:
4455 // Note, function argument passing and returning are initialization.
4456 case AA_Passing:
4457 case AA_Returning:
4458 case AA_Sending:
4460 if (CheckExceptionSpecCompatibility(From, ToType))
4461 return ExprError();
4462 break;
4463
4464 case AA_Casting:
4465 case AA_Converting:
4466 // Casts and implicit conversions are not initialization, so are not
4467 // checked for exception specification mismatches.
4468 break;
4469 }
4470 // Nothing else to do.
4471 break;
4472
4475 QualType ElTy = ToType;
4476 QualType StepTy = ToType;
4477 if (ToType->isVectorType())
4478 StepTy = adjustVectorType(Context, FromType, ToType, &ElTy);
4479 if (ElTy->isBooleanType()) {
4480 assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
4482 "only enums with fixed underlying type can promote to bool");
4483 From = ImpCastExprToType(From, StepTy, CK_IntegralToBoolean, VK_PRValue,
4484 /*BasePath=*/nullptr, CCK)
4485 .get();
4486 } else {
4487 From = ImpCastExprToType(From, StepTy, CK_IntegralCast, VK_PRValue,
4488 /*BasePath=*/nullptr, CCK)
4489 .get();
4490 }
4491 break;
4492 }
4493
4496 QualType StepTy = ToType;
4497 if (ToType->isVectorType())
4498 StepTy = adjustVectorType(Context, FromType, ToType);
4499 From = ImpCastExprToType(From, StepTy, CK_FloatingCast, VK_PRValue,
4500 /*BasePath=*/nullptr, CCK)
4501 .get();
4502 break;
4503 }
4504
4507 QualType FromEl = From->getType()->castAs<ComplexType>()->getElementType();
4508 QualType ToEl = ToType->castAs<ComplexType>()->getElementType();
4509 CastKind CK;
4510 if (FromEl->isRealFloatingType()) {
4511 if (ToEl->isRealFloatingType())
4512 CK = CK_FloatingComplexCast;
4513 else
4514 CK = CK_FloatingComplexToIntegralComplex;
4515 } else if (ToEl->isRealFloatingType()) {
4516 CK = CK_IntegralComplexToFloatingComplex;
4517 } else {
4518 CK = CK_IntegralComplexCast;
4519 }
4520 From = ImpCastExprToType(From, ToType, CK, VK_PRValue, /*BasePath=*/nullptr,
4521 CCK)
4522 .get();
4523 break;
4524 }
4525
4526 case ICK_Floating_Integral: {
4527 QualType ElTy = ToType;
4528 QualType StepTy = ToType;
4529 if (ToType->isVectorType())
4530 StepTy = adjustVectorType(Context, FromType, ToType, &ElTy);
4531 if (ElTy->isRealFloatingType())
4532 From = ImpCastExprToType(From, StepTy, CK_IntegralToFloating, VK_PRValue,
4533 /*BasePath=*/nullptr, CCK)
4534 .get();
4535 else
4536 From = ImpCastExprToType(From, StepTy, CK_FloatingToIntegral, VK_PRValue,
4537 /*BasePath=*/nullptr, CCK)
4538 .get();
4539 break;
4540 }
4541
4543 assert((FromType->isFixedPointType() || ToType->isFixedPointType()) &&
4544 "Attempting implicit fixed point conversion without a fixed "
4545 "point operand");
4546 if (FromType->isFloatingType())
4547 From = ImpCastExprToType(From, ToType, CK_FloatingToFixedPoint,
4548 VK_PRValue,
4549 /*BasePath=*/nullptr, CCK).get();
4550 else if (ToType->isFloatingType())
4551 From = ImpCastExprToType(From, ToType, CK_FixedPointToFloating,
4552 VK_PRValue,
4553 /*BasePath=*/nullptr, CCK).get();
4554 else if (FromType->isIntegralType(Context))
4555 From = ImpCastExprToType(From, ToType, CK_IntegralToFixedPoint,
4556 VK_PRValue,
4557 /*BasePath=*/nullptr, CCK).get();
4558 else if (ToType->isIntegralType(Context))
4559 From = ImpCastExprToType(From, ToType, CK_FixedPointToIntegral,
4560 VK_PRValue,
4561 /*BasePath=*/nullptr, CCK).get();
4562 else if (ToType->isBooleanType())
4563 From = ImpCastExprToType(From, ToType, CK_FixedPointToBoolean,
4564 VK_PRValue,
4565 /*BasePath=*/nullptr, CCK).get();
4566 else
4567 From = ImpCastExprToType(From, ToType, CK_FixedPointCast,
4568 VK_PRValue,
4569 /*BasePath=*/nullptr, CCK).get();
4570 break;
4571
4573 From = ImpCastExprToType(From, ToType, CK_NoOp, From->getValueKind(),
4574 /*BasePath=*/nullptr, CCK).get();
4575 break;
4576
4579 if (SCS.IncompatibleObjC && Action != AA_Casting) {
4580 // Diagnose incompatible Objective-C conversions
4581 if (Action == AA_Initializing || Action == AA_Assigning)
4582 Diag(From->getBeginLoc(),
4583 diag::ext_typecheck_convert_incompatible_pointer)
4584 << ToType << From->getType() << Action << From->getSourceRange()
4585 << 0;
4586 else
4587 Diag(From->getBeginLoc(),
4588 diag::ext_typecheck_convert_incompatible_pointer)
4589 << From->getType() << ToType << Action << From->getSourceRange()
4590 << 0;
4591
4592 if (From->getType()->isObjCObjectPointerType() &&
4593 ToType->isObjCObjectPointerType())
4595 } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
4596 !ObjC().CheckObjCARCUnavailableWeakConversion(ToType,
4597 From->getType())) {
4598 if (Action == AA_Initializing)
4599 Diag(From->getBeginLoc(), diag::err_arc_weak_unavailable_assign);
4600 else
4601 Diag(From->getBeginLoc(), diag::err_arc_convesion_of_weak_unavailable)
4602 << (Action == AA_Casting) << From->getType() << ToType
4603 << From->getSourceRange();
4604 }
4605
4606 // Defer address space conversion to the third conversion.
4607 QualType FromPteeType = From->getType()->getPointeeType();
4608 QualType ToPteeType = ToType->getPointeeType();
4609 QualType NewToType = ToType;
4610 if (!FromPteeType.isNull() && !ToPteeType.isNull() &&
4611 FromPteeType.getAddressSpace() != ToPteeType.getAddressSpace()) {
4612 NewToType = Context.removeAddrSpaceQualType(ToPteeType);
4613 NewToType = Context.getAddrSpaceQualType(NewToType,
4614 FromPteeType.getAddressSpace());
4615 if (ToType->isObjCObjectPointerType())
4616 NewToType = Context.getObjCObjectPointerType(NewToType);
4617 else if (ToType->isBlockPointerType())
4618 NewToType = Context.getBlockPointerType(NewToType);
4619 else
4620 NewToType = Context.getPointerType(NewToType);
4621 }
4622
4623 CastKind Kind;
4624 CXXCastPath BasePath;
4625 if (CheckPointerConversion(From, NewToType, Kind, BasePath, CStyle))
4626 return ExprError();
4627
4628 // Make sure we extend blocks if necessary.
4629 // FIXME: doing this here is really ugly.
4630 if (Kind == CK_BlockPointerToObjCPointerCast) {
4631 ExprResult E = From;
4633 From = E.get();
4634 }
4636 ObjC().CheckObjCConversion(SourceRange(), NewToType, From, CCK);
4637 From = ImpCastExprToType(From, NewToType, Kind, VK_PRValue, &BasePath, CCK)
4638 .get();
4639 break;
4640 }
4641
4642 case ICK_Pointer_Member: {
4643 CastKind Kind;
4644 CXXCastPath BasePath;
4645 if (CheckMemberPointerConversion(From, ToType, Kind, BasePath, CStyle))
4646 return ExprError();
4647 if (CheckExceptionSpecCompatibility(From, ToType))
4648 return ExprError();
4649
4650 // We may not have been able to figure out what this member pointer resolved
4651 // to up until this exact point. Attempt to lock-in it's inheritance model.
4653 (void)isCompleteType(From->getExprLoc(), From->getType());
4654 (void)isCompleteType(From->getExprLoc(), ToType);
4655 }
4656
4657 From =
4658 ImpCastExprToType(From, ToType, Kind, VK_PRValue, &BasePath, CCK).get();
4659 break;
4660 }
4661
4663 // Perform half-to-boolean conversion via float.
4664 if (From->getType()->isHalfType()) {
4665 From = ImpCastExprToType(From, Context.FloatTy, CK_FloatingCast).get();
4666 FromType = Context.FloatTy;
4667 }
4668 QualType ElTy = FromType;
4669 QualType StepTy = ToType;
4670 if (FromType->isVectorType()) {
4671 if (getLangOpts().HLSL)
4672 StepTy = adjustVectorType(Context, FromType, ToType);
4673 ElTy = FromType->castAs<VectorType>()->getElementType();
4674 }
4675
4676 From = ImpCastExprToType(From, StepTy, ScalarTypeToBooleanCastKind(ElTy),
4677 VK_PRValue,
4678 /*BasePath=*/nullptr, CCK)
4679 .get();
4680 break;
4681 }
4682
4683 case ICK_Derived_To_Base: {
4684 CXXCastPath BasePath;
4686 From->getType(), ToType.getNonReferenceType(), From->getBeginLoc(),
4687 From->getSourceRange(), &BasePath, CStyle))
4688 return ExprError();
4689
4690 From = ImpCastExprToType(From, ToType.getNonReferenceType(),
4691 CK_DerivedToBase, From->getValueKind(),
4692 &BasePath, CCK).get();
4693 break;
4694 }
4695
4697 From = ImpCastExprToType(From, ToType, CK_BitCast, VK_PRValue,
4698 /*BasePath=*/nullptr, CCK)
4699 .get();
4700 break;
4701
4704 From = ImpCastExprToType(From, ToType, CK_BitCast, VK_PRValue,
4705 /*BasePath=*/nullptr, CCK)
4706 .get();
4707 break;
4708
4709 case ICK_Vector_Splat: {
4710 // Vector splat from any arithmetic type to a vector.
4711 Expr *Elem = prepareVectorSplat(ToType, From).get();
4712 From = ImpCastExprToType(Elem, ToType, CK_VectorSplat, VK_PRValue,
4713 /*BasePath=*/nullptr, CCK)
4714 .get();
4715 break;
4716 }
4717
4718 case ICK_Complex_Real:
4719 // Case 1. x -> _Complex y
4720 if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
4721 QualType ElType = ToComplex->getElementType();
4722 bool isFloatingComplex = ElType->isRealFloatingType();
4723
4724 // x -> y
4725 if (Context.hasSameUnqualifiedType(ElType, From->getType())) {
4726 // do nothing
4727 } else if (From->getType()->isRealFloatingType()) {
4728 From = ImpCastExprToType(From, ElType,
4729 isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get();
4730 } else {
4731 assert(From->getType()->isIntegerType());
4732 From = ImpCastExprToType(From, ElType,
4733 isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get();
4734 }
4735 // y -> _Complex y
4736 From = ImpCastExprToType(From, ToType,
4737 isFloatingComplex ? CK_FloatingRealToComplex
4738 : CK_IntegralRealToComplex).get();
4739
4740 // Case 2. _Complex x -> y
4741 } else {
4742 auto *FromComplex = From->getType()->castAs<ComplexType>();
4743 QualType ElType = FromComplex->getElementType();
4744 bool isFloatingComplex = ElType->isRealFloatingType();
4745
4746 // _Complex x -> x
4747 From = ImpCastExprToType(From, ElType,
4748 isFloatingComplex ? CK_FloatingComplexToReal
4749 : CK_IntegralComplexToReal,
4750 VK_PRValue, /*BasePath=*/nullptr, CCK)
4751 .get();
4752
4753 // x -> y
4754 if (Context.hasSameUnqualifiedType(ElType, ToType)) {
4755 // do nothing
4756 } else if (ToType->isRealFloatingType()) {
4757 From = ImpCastExprToType(From, ToType,
4758 isFloatingComplex ? CK_FloatingCast
4759 : CK_IntegralToFloating,
4760 VK_PRValue, /*BasePath=*/nullptr, CCK)
4761 .get();
4762 } else {
4763 assert(ToType->isIntegerType());
4764 From = ImpCastExprToType(From, ToType,
4765 isFloatingComplex ? CK_FloatingToIntegral
4766 : CK_IntegralCast,
4767 VK_PRValue, /*BasePath=*/nullptr, CCK)
4768 .get();
4769 }