clang 20.0.0git
SemaExprCXX.cpp
Go to the documentation of this file.
1//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// Implements semantic analysis for C++ expressions.
11///
12//===----------------------------------------------------------------------===//
13
14#include "TreeTransform.h"
15#include "TypeLocBuilder.h"
17#include "clang/AST/ASTLambda.h"
19#include "clang/AST/CharUnits.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/ExprCXX.h"
23#include "clang/AST/ExprObjC.h"
25#include "clang/AST/Type.h"
26#include "clang/AST/TypeLoc.h"
34#include "clang/Sema/DeclSpec.h"
37#include "clang/Sema/Lookup.h"
39#include "clang/Sema/Scope.h"
41#include "clang/Sema/SemaCUDA.h"
44#include "clang/Sema/SemaObjC.h"
45#include "clang/Sema/SemaPPC.h"
46#include "clang/Sema/Template.h"
48#include "llvm/ADT/APInt.h"
49#include "llvm/ADT/STLExtras.h"
50#include "llvm/ADT/STLForwardCompat.h"
51#include "llvm/ADT/StringExtras.h"
52#include "llvm/Support/ErrorHandling.h"
53#include "llvm/Support/TypeSize.h"
54#include <optional>
55using namespace clang;
56using namespace sema;
57
59 SourceLocation NameLoc,
60 const IdentifierInfo &Name) {
62
63 // Convert the nested-name-specifier into a type.
65 switch (NNS->getKind()) {
68 Type = QualType(NNS->getAsType(), 0);
69 break;
70
72 // Strip off the last layer of the nested-name-specifier and build a
73 // typename type for it.
74 assert(NNS->getAsIdentifier() == &Name && "not a constructor name");
77 break;
78
83 llvm_unreachable("Nested name specifier is not a type for inheriting ctor");
84 }
85
86 // This reference to the type is located entirely at the location of the
87 // final identifier in the qualified-id.
90}
91
93 SourceLocation NameLoc, Scope *S,
94 CXXScopeSpec &SS, bool EnteringContext) {
95 CXXRecordDecl *CurClass = getCurrentClass(S, &SS);
96 assert(CurClass && &II == CurClass->getIdentifier() &&
97 "not a constructor name");
98
99 // When naming a constructor as a member of a dependent context (eg, in a
100 // friend declaration or an inherited constructor declaration), form an
101 // unresolved "typename" type.
102 if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
104 SS.getScopeRep(), &II);
105 return ParsedType::make(T);
106 }
107
108 if (SS.isNotEmpty() && RequireCompleteDeclContext(SS, CurClass))
109 return ParsedType();
110
111 // Find the injected-class-name declaration. Note that we make no attempt to
112 // diagnose cases where the injected-class-name is shadowed: the only
113 // declaration that can validly shadow the injected-class-name is a
114 // non-static data member, and if the class contains both a non-static data
115 // member and a constructor then it is ill-formed (we check that in
116 // CheckCompletedCXXClass).
117 CXXRecordDecl *InjectedClassName = nullptr;
118 for (NamedDecl *ND : CurClass->lookup(&II)) {
119 auto *RD = dyn_cast<CXXRecordDecl>(ND);
120 if (RD && RD->isInjectedClassName()) {
121 InjectedClassName = RD;
122 break;
123 }
124 }
125 if (!InjectedClassName) {
126 if (!CurClass->isInvalidDecl()) {
127 // FIXME: RequireCompleteDeclContext doesn't check dependent contexts
128 // properly. Work around it here for now.
130 diag::err_incomplete_nested_name_spec) << CurClass << SS.getRange();
131 }
132 return ParsedType();
133 }
134
135 QualType T = Context.getTypeDeclType(InjectedClassName);
136 DiagnoseUseOfDecl(InjectedClassName, NameLoc);
137 MarkAnyDeclReferenced(NameLoc, InjectedClassName, /*OdrUse=*/false);
138
139 return ParsedType::make(T);
140}
141
143 SourceLocation NameLoc, Scope *S,
144 CXXScopeSpec &SS, ParsedType ObjectTypePtr,
145 bool EnteringContext) {
146 // Determine where to perform name lookup.
147
148 // FIXME: This area of the standard is very messy, and the current
149 // wording is rather unclear about which scopes we search for the
150 // destructor name; see core issues 399 and 555. Issue 399 in
151 // particular shows where the current description of destructor name
152 // lookup is completely out of line with existing practice, e.g.,
153 // this appears to be ill-formed:
154 //
155 // namespace N {
156 // template <typename T> struct S {
157 // ~S();
158 // };
159 // }
160 //
161 // void f(N::S<int>* s) {
162 // s->N::S<int>::~S();
163 // }
164 //
165 // See also PR6358 and PR6359.
166 //
167 // For now, we accept all the cases in which the name given could plausibly
168 // be interpreted as a correct destructor name, issuing off-by-default
169 // extension diagnostics on the cases that don't strictly conform to the
170 // C++20 rules. This basically means we always consider looking in the
171 // nested-name-specifier prefix, the complete nested-name-specifier, and
172 // the scope, and accept if we find the expected type in any of the three
173 // places.
174
175 if (SS.isInvalid())
176 return nullptr;
177
178 // Whether we've failed with a diagnostic already.
179 bool Failed = false;
180
183
184 // If we have an object type, it's because we are in a
185 // pseudo-destructor-expression or a member access expression, and
186 // we know what type we're looking for.
187 QualType SearchType =
188 ObjectTypePtr ? GetTypeFromParser(ObjectTypePtr) : QualType();
189
190 auto CheckLookupResult = [&](LookupResult &Found) -> ParsedType {
191 auto IsAcceptableResult = [&](NamedDecl *D) -> bool {
192 auto *Type = dyn_cast<TypeDecl>(D->getUnderlyingDecl());
193 if (!Type)
194 return false;
195
196 if (SearchType.isNull() || SearchType->isDependentType())
197 return true;
198
200 return Context.hasSameUnqualifiedType(T, SearchType);
201 };
202
203 unsigned NumAcceptableResults = 0;
204 for (NamedDecl *D : Found) {
205 if (IsAcceptableResult(D))
206 ++NumAcceptableResults;
207
208 // Don't list a class twice in the lookup failure diagnostic if it's
209 // found by both its injected-class-name and by the name in the enclosing
210 // scope.
211 if (auto *RD = dyn_cast<CXXRecordDecl>(D))
212 if (RD->isInjectedClassName())
213 D = cast<NamedDecl>(RD->getParent());
214
215 if (FoundDeclSet.insert(D).second)
216 FoundDecls.push_back(D);
217 }
218
219 // As an extension, attempt to "fix" an ambiguity by erasing all non-type
220 // results, and all non-matching results if we have a search type. It's not
221 // clear what the right behavior is if destructor lookup hits an ambiguity,
222 // but other compilers do generally accept at least some kinds of
223 // ambiguity.
224 if (Found.isAmbiguous() && NumAcceptableResults == 1) {
225 Diag(NameLoc, diag::ext_dtor_name_ambiguous);
226 LookupResult::Filter F = Found.makeFilter();
227 while (F.hasNext()) {
228 NamedDecl *D = F.next();
229 if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
230 Diag(D->getLocation(), diag::note_destructor_type_here)
232 else
233 Diag(D->getLocation(), diag::note_destructor_nontype_here);
234
235 if (!IsAcceptableResult(D))
236 F.erase();
237 }
238 F.done();
239 }
240
241 if (Found.isAmbiguous())
242 Failed = true;
243
244 if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
245 if (IsAcceptableResult(Type)) {
247 MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
248 return CreateParsedType(
251 }
252 }
253
254 return nullptr;
255 };
256
257 bool IsDependent = false;
258
259 auto LookupInObjectType = [&]() -> ParsedType {
260 if (Failed || SearchType.isNull())
261 return nullptr;
262
263 IsDependent |= SearchType->isDependentType();
264
265 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
266 DeclContext *LookupCtx = computeDeclContext(SearchType);
267 if (!LookupCtx)
268 return nullptr;
269 LookupQualifiedName(Found, LookupCtx);
270 return CheckLookupResult(Found);
271 };
272
273 auto LookupInNestedNameSpec = [&](CXXScopeSpec &LookupSS) -> ParsedType {
274 if (Failed)
275 return nullptr;
276
277 IsDependent |= isDependentScopeSpecifier(LookupSS);
278 DeclContext *LookupCtx = computeDeclContext(LookupSS, EnteringContext);
279 if (!LookupCtx)
280 return nullptr;
281
282 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
283 if (RequireCompleteDeclContext(LookupSS, LookupCtx)) {
284 Failed = true;
285 return nullptr;
286 }
287 LookupQualifiedName(Found, LookupCtx);
288 return CheckLookupResult(Found);
289 };
290
291 auto LookupInScope = [&]() -> ParsedType {
292 if (Failed || !S)
293 return nullptr;
294
295 LookupResult Found(*this, &II, NameLoc, LookupDestructorName);
296 LookupName(Found, S);
297 return CheckLookupResult(Found);
298 };
299
300 // C++2a [basic.lookup.qual]p6:
301 // In a qualified-id of the form
302 //
303 // nested-name-specifier[opt] type-name :: ~ type-name
304 //
305 // the second type-name is looked up in the same scope as the first.
306 //
307 // We interpret this as meaning that if you do a dual-scope lookup for the
308 // first name, you also do a dual-scope lookup for the second name, per
309 // C++ [basic.lookup.classref]p4:
310 //
311 // If the id-expression in a class member access is a qualified-id of the
312 // form
313 //
314 // class-name-or-namespace-name :: ...
315 //
316 // the class-name-or-namespace-name following the . or -> is first looked
317 // up in the class of the object expression and the name, if found, is used.
318 // Otherwise, it is looked up in the context of the entire
319 // postfix-expression.
320 //
321 // This looks in the same scopes as for an unqualified destructor name:
322 //
323 // C++ [basic.lookup.classref]p3:
324 // If the unqualified-id is ~ type-name, the type-name is looked up
325 // in the context of the entire postfix-expression. If the type T
326 // of the object expression is of a class type C, the type-name is
327 // also looked up in the scope of class C. At least one of the
328 // lookups shall find a name that refers to cv T.
329 //
330 // FIXME: The intent is unclear here. Should type-name::~type-name look in
331 // the scope anyway if it finds a non-matching name declared in the class?
332 // If both lookups succeed and find a dependent result, which result should
333 // we retain? (Same question for p->~type-name().)
334
335 if (NestedNameSpecifier *Prefix =
336 SS.isSet() ? SS.getScopeRep()->getPrefix() : nullptr) {
337 // This is
338 //
339 // nested-name-specifier type-name :: ~ type-name
340 //
341 // Look for the second type-name in the nested-name-specifier.
342 CXXScopeSpec PrefixSS;
343 PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
344 if (ParsedType T = LookupInNestedNameSpec(PrefixSS))
345 return T;
346 } else {
347 // This is one of
348 //
349 // type-name :: ~ type-name
350 // ~ type-name
351 //
352 // Look in the scope and (if any) the object type.
353 if (ParsedType T = LookupInScope())
354 return T;
355 if (ParsedType T = LookupInObjectType())
356 return T;
357 }
358
359 if (Failed)
360 return nullptr;
361
362 if (IsDependent) {
363 // We didn't find our type, but that's OK: it's dependent anyway.
364
365 // FIXME: What if we have no nested-name-specifier?
366 QualType T =
368 SS.getWithLocInContext(Context), II, NameLoc);
369 return ParsedType::make(T);
370 }
371
372 // The remaining cases are all non-standard extensions imitating the behavior
373 // of various other compilers.
374 unsigned NumNonExtensionDecls = FoundDecls.size();
375
376 if (SS.isSet()) {
377 // For compatibility with older broken C++ rules and existing code,
378 //
379 // nested-name-specifier :: ~ type-name
380 //
381 // also looks for type-name within the nested-name-specifier.
382 if (ParsedType T = LookupInNestedNameSpec(SS)) {
383 Diag(SS.getEndLoc(), diag::ext_dtor_named_in_wrong_scope)
384 << SS.getRange()
386 ("::" + II.getName()).str());
387 return T;
388 }
389
390 // For compatibility with other compilers and older versions of Clang,
391 //
392 // nested-name-specifier type-name :: ~ type-name
393 //
394 // also looks for type-name in the scope. Unfortunately, we can't
395 // reasonably apply this fallback for dependent nested-name-specifiers.
396 if (SS.isValid() && SS.getScopeRep()->getPrefix()) {
397 if (ParsedType T = LookupInScope()) {
398 Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
400 Diag(FoundDecls.back()->getLocation(), diag::note_destructor_type_here)
402 return T;
403 }
404 }
405 }
406
407 // We didn't find anything matching; tell the user what we did find (if
408 // anything).
409
410 // Don't tell the user about declarations we shouldn't have found.
411 FoundDecls.resize(NumNonExtensionDecls);
412
413 // List types before non-types.
414 std::stable_sort(FoundDecls.begin(), FoundDecls.end(),
415 [](NamedDecl *A, NamedDecl *B) {
416 return isa<TypeDecl>(A->getUnderlyingDecl()) >
417 isa<TypeDecl>(B->getUnderlyingDecl());
418 });
419
420 // Suggest a fixit to properly name the destroyed type.
421 auto MakeFixItHint = [&]{
422 const CXXRecordDecl *Destroyed = nullptr;
423 // FIXME: If we have a scope specifier, suggest its last component?
424 if (!SearchType.isNull())
425 Destroyed = SearchType->getAsCXXRecordDecl();
426 else if (S)
427 Destroyed = dyn_cast_or_null<CXXRecordDecl>(S->getEntity());
428 if (Destroyed)
430 Destroyed->getNameAsString());
431 return FixItHint();
432 };
433
434 if (FoundDecls.empty()) {
435 // FIXME: Attempt typo-correction?
436 Diag(NameLoc, diag::err_undeclared_destructor_name)
437 << &II << MakeFixItHint();
438 } else if (!SearchType.isNull() && FoundDecls.size() == 1) {
439 if (auto *TD = dyn_cast<TypeDecl>(FoundDecls[0]->getUnderlyingDecl())) {
440 assert(!SearchType.isNull() &&
441 "should only reject a type result if we have a search type");
443 Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
444 << T << SearchType << MakeFixItHint();
445 } else {
446 Diag(NameLoc, diag::err_destructor_expr_nontype)
447 << &II << MakeFixItHint();
448 }
449 } else {
450 Diag(NameLoc, SearchType.isNull() ? diag::err_destructor_name_nontype
451 : diag::err_destructor_expr_mismatch)
452 << &II << SearchType << MakeFixItHint();
453 }
454
455 for (NamedDecl *FoundD : FoundDecls) {
456 if (auto *TD = dyn_cast<TypeDecl>(FoundD->getUnderlyingDecl()))
457 Diag(FoundD->getLocation(), diag::note_destructor_type_here)
459 else
460 Diag(FoundD->getLocation(), diag::note_destructor_nontype_here)
461 << FoundD;
462 }
463
464 return nullptr;
465}
466
468 ParsedType ObjectType) {
470 return nullptr;
471
473 Diag(DS.getTypeSpecTypeLoc(), diag::err_decltype_auto_invalid);
474 return nullptr;
475 }
476
478 "unexpected type in getDestructorType");
480
481 // If we know the type of the object, check that the correct destructor
482 // type was named now; we can give better diagnostics this way.
483 QualType SearchType = GetTypeFromParser(ObjectType);
484 if (!SearchType.isNull() && !SearchType->isDependentType() &&
485 !Context.hasSameUnqualifiedType(T, SearchType)) {
486 Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch)
487 << T << SearchType;
488 return nullptr;
489 }
490
491 return ParsedType::make(T);
492}
493
495 const UnqualifiedId &Name, bool IsUDSuffix) {
496 assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
497 if (!IsUDSuffix) {
498 // [over.literal] p8
499 //
500 // double operator""_Bq(long double); // OK: not a reserved identifier
501 // double operator"" _Bq(long double); // ill-formed, no diagnostic required
502 const IdentifierInfo *II = Name.Identifier;
504 SourceLocation Loc = Name.getEndLoc();
506 if (auto Hint = FixItHint::CreateReplacement(
507 Name.getSourceRange(),
508 (StringRef("operator\"\"") + II->getName()).str());
509 isReservedInAllContexts(Status)) {
510 Diag(Loc, diag::warn_reserved_extern_symbol)
511 << II << static_cast<int>(Status) << Hint;
512 } else {
513 Diag(Loc, diag::warn_deprecated_literal_operator_id) << II << Hint;
514 }
515 }
516 }
517
518 if (!SS.isValid())
519 return false;
520
521 switch (SS.getScopeRep()->getKind()) {
525 // Per C++11 [over.literal]p2, literal operators can only be declared at
526 // namespace scope. Therefore, this unqualified-id cannot name anything.
527 // Reject it early, because we have no AST representation for this in the
528 // case where the scope is dependent.
529 Diag(Name.getBeginLoc(), diag::err_literal_operator_id_outside_namespace)
530 << SS.getScopeRep();
531 return true;
532
537 return false;
538 }
539
540 llvm_unreachable("unknown nested name specifier kind");
541}
542
544 SourceLocation TypeidLoc,
545 TypeSourceInfo *Operand,
546 SourceLocation RParenLoc) {
547 // C++ [expr.typeid]p4:
548 // The top-level cv-qualifiers of the lvalue expression or the type-id
549 // that is the operand of typeid are always ignored.
550 // If the type of the type-id is a class type or a reference to a class
551 // type, the class shall be completely-defined.
552 Qualifiers Quals;
553 QualType T
554 = Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(),
555 Quals);
556 if (T->getAs<RecordType>() &&
557 RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
558 return ExprError();
559
561 return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << T);
562
563 if (CheckQualifiedFunctionForTypeId(T, TypeidLoc))
564 return ExprError();
565
566 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand,
567 SourceRange(TypeidLoc, RParenLoc));
568}
569
571 SourceLocation TypeidLoc,
572 Expr *E,
573 SourceLocation RParenLoc) {
574 bool WasEvaluated = false;
575 if (E && !E->isTypeDependent()) {
576 if (E->hasPlaceholderType()) {
578 if (result.isInvalid()) return ExprError();
579 E = result.get();
580 }
581
582 QualType T = E->getType();
583 if (const RecordType *RecordT = T->getAs<RecordType>()) {
584 CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getDecl());
585 // C++ [expr.typeid]p3:
586 // [...] If the type of the expression is a class type, the class
587 // shall be completely-defined.
588 if (RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
589 return ExprError();
590
591 // C++ [expr.typeid]p3:
592 // When typeid is applied to an expression other than an glvalue of a
593 // polymorphic class type [...] [the] expression is an unevaluated
594 // operand. [...]
595 if (RecordD->isPolymorphic() && E->isGLValue()) {
596 if (isUnevaluatedContext()) {
597 // The operand was processed in unevaluated context, switch the
598 // context and recheck the subexpression.
600 if (Result.isInvalid())
601 return ExprError();
602 E = Result.get();
603 }
604
605 // We require a vtable to query the type at run time.
606 MarkVTableUsed(TypeidLoc, RecordD);
607 WasEvaluated = true;
608 }
609 }
610
612 if (Result.isInvalid())
613 return ExprError();
614 E = Result.get();
615
616 // C++ [expr.typeid]p4:
617 // [...] If the type of the type-id is a reference to a possibly
618 // cv-qualified type, the result of the typeid expression refers to a
619 // std::type_info object representing the cv-unqualified referenced
620 // type.
621 Qualifiers Quals;
622 QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
623 if (!Context.hasSameType(T, UnqualT)) {
624 T = UnqualT;
625 E = ImpCastExprToType(E, UnqualT, CK_NoOp, E->getValueKind()).get();
626 }
627 }
628
630 return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid)
631 << E->getType());
632 else if (!inTemplateInstantiation() &&
633 E->HasSideEffects(Context, WasEvaluated)) {
634 // The expression operand for typeid is in an unevaluated expression
635 // context, so side effects could result in unintended consequences.
636 Diag(E->getExprLoc(), WasEvaluated
637 ? diag::warn_side_effects_typeid
638 : diag::warn_side_effects_unevaluated_context);
639 }
640
641 return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E,
642 SourceRange(TypeidLoc, RParenLoc));
643}
644
645/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
648 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
649 // typeid is not supported in OpenCL.
650 if (getLangOpts().OpenCLCPlusPlus) {
651 return ExprError(Diag(OpLoc, diag::err_openclcxx_not_supported)
652 << "typeid");
653 }
654
655 // Find the std::type_info type.
656 if (!getStdNamespace())
657 return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
658
659 if (!CXXTypeInfoDecl) {
660 IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info");
661 LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
664 // Microsoft's typeinfo doesn't have type_info in std but in the global
665 // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153.
666 if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) {
669 }
670 if (!CXXTypeInfoDecl)
671 return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
672 }
673
674 if (!getLangOpts().RTTI) {
675 return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti));
676 }
677
679
680 if (isType) {
681 // The operand is a type; handle it as such.
682 TypeSourceInfo *TInfo = nullptr;
684 &TInfo);
685 if (T.isNull())
686 return ExprError();
687
688 if (!TInfo)
689 TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
690
691 return BuildCXXTypeId(TypeInfoType, OpLoc, TInfo, RParenLoc);
692 }
693
694 // The operand is an expression.
696 BuildCXXTypeId(TypeInfoType, OpLoc, (Expr *)TyOrExpr, RParenLoc);
697
698 if (!getLangOpts().RTTIData && !Result.isInvalid())
699 if (auto *CTE = dyn_cast<CXXTypeidExpr>(Result.get()))
700 if (CTE->isPotentiallyEvaluated() && !CTE->isMostDerived(Context))
701 Diag(OpLoc, diag::warn_no_typeid_with_rtti_disabled)
702 << (getDiagnostics().getDiagnosticOptions().getFormat() ==
704 return Result;
705}
706
707/// Grabs __declspec(uuid()) off a type, or returns 0 if we cannot resolve to
708/// a single GUID.
709static void
712 // Optionally remove one level of pointer, reference or array indirection.
713 const Type *Ty = QT.getTypePtr();
714 if (QT->isPointerOrReferenceType())
715 Ty = QT->getPointeeType().getTypePtr();
716 else if (QT->isArrayType())
717 Ty = Ty->getBaseElementTypeUnsafe();
718
719 const auto *TD = Ty->getAsTagDecl();
720 if (!TD)
721 return;
722
723 if (const auto *Uuid = TD->getMostRecentDecl()->getAttr<UuidAttr>()) {
724 UuidAttrs.insert(Uuid);
725 return;
726 }
727
728 // __uuidof can grab UUIDs from template arguments.
729 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(TD)) {
730 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
731 for (const TemplateArgument &TA : TAL.asArray()) {
732 const UuidAttr *UuidForTA = nullptr;
733 if (TA.getKind() == TemplateArgument::Type)
734 getUuidAttrOfType(SemaRef, TA.getAsType(), UuidAttrs);
735 else if (TA.getKind() == TemplateArgument::Declaration)
736 getUuidAttrOfType(SemaRef, TA.getAsDecl()->getType(), UuidAttrs);
737
738 if (UuidForTA)
739 UuidAttrs.insert(UuidForTA);
740 }
741 }
742}
743
745 SourceLocation TypeidLoc,
746 TypeSourceInfo *Operand,
747 SourceLocation RParenLoc) {
748 MSGuidDecl *Guid = nullptr;
749 if (!Operand->getType()->isDependentType()) {
751 getUuidAttrOfType(*this, Operand->getType(), UuidAttrs);
752 if (UuidAttrs.empty())
753 return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
754 if (UuidAttrs.size() > 1)
755 return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
756 Guid = UuidAttrs.back()->getGuidDecl();
757 }
758
759 return new (Context)
760 CXXUuidofExpr(Type, Operand, Guid, SourceRange(TypeidLoc, RParenLoc));
761}
762
764 Expr *E, SourceLocation RParenLoc) {
765 MSGuidDecl *Guid = nullptr;
766 if (!E->getType()->isDependentType()) {
768 // A null pointer results in {00000000-0000-0000-0000-000000000000}.
770 } else {
772 getUuidAttrOfType(*this, E->getType(), UuidAttrs);
773 if (UuidAttrs.empty())
774 return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
775 if (UuidAttrs.size() > 1)
776 return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids));
777 Guid = UuidAttrs.back()->getGuidDecl();
778 }
779 }
780
781 return new (Context)
782 CXXUuidofExpr(Type, E, Guid, SourceRange(TypeidLoc, RParenLoc));
783}
784
785/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
788 bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
789 QualType GuidType = Context.getMSGuidType();
790 GuidType.addConst();
791
792 if (isType) {
793 // The operand is a type; handle it as such.
794 TypeSourceInfo *TInfo = nullptr;
796 &TInfo);
797 if (T.isNull())
798 return ExprError();
799
800 if (!TInfo)
801 TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
802
803 return BuildCXXUuidof(GuidType, OpLoc, TInfo, RParenLoc);
804 }
805
806 // The operand is an expression.
807 return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
808}
809
812 assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
813 "Unknown C++ Boolean value!");
814 return new (Context)
815 CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc);
816}
817
821}
822
825 bool IsThrownVarInScope = false;
826 if (Ex) {
827 // C++0x [class.copymove]p31:
828 // When certain criteria are met, an implementation is allowed to omit the
829 // copy/move construction of a class object [...]
830 //
831 // - in a throw-expression, when the operand is the name of a
832 // non-volatile automatic object (other than a function or catch-
833 // clause parameter) whose scope does not extend beyond the end of the
834 // innermost enclosing try-block (if there is one), the copy/move
835 // operation from the operand to the exception object (15.1) can be
836 // omitted by constructing the automatic object directly into the
837 // exception object
838 if (const auto *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
839 if (const auto *Var = dyn_cast<VarDecl>(DRE->getDecl());
840 Var && Var->hasLocalStorage() &&
841 !Var->getType().isVolatileQualified()) {
842 for (; S; S = S->getParent()) {
843 if (S->isDeclScope(Var)) {
844 IsThrownVarInScope = true;
845 break;
846 }
847
848 // FIXME: Many of the scope checks here seem incorrect.
849 if (S->getFlags() &
852 break;
853 }
854 }
855 }
856
857 return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
858}
859
861 bool IsThrownVarInScope) {
862 const llvm::Triple &T = Context.getTargetInfo().getTriple();
863 const bool IsOpenMPGPUTarget =
864 getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN());
865 // Don't report an error if 'throw' is used in system headers or in an OpenMP
866 // target region compiled for a GPU architecture.
867 if (!IsOpenMPGPUTarget && !getLangOpts().CXXExceptions &&
868 !getSourceManager().isInSystemHeader(OpLoc) && !getLangOpts().CUDA) {
869 // Delay error emission for the OpenMP device code.
870 targetDiag(OpLoc, diag::err_exceptions_disabled) << "throw";
871 }
872
873 // In OpenMP target regions, we replace 'throw' with a trap on GPU targets.
874 if (IsOpenMPGPUTarget)
875 targetDiag(OpLoc, diag::warn_throw_not_valid_on_target) << T.str();
876
877 // Exceptions aren't allowed in CUDA device code.
878 if (getLangOpts().CUDA)
879 CUDA().DiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions)
880 << "throw" << llvm::to_underlying(CUDA().CurrentTarget());
881
882 if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
883 Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw";
884
885 // Exceptions that escape a compute construct are ill-formed.
886 if (getLangOpts().OpenACC && getCurScope() &&
887 getCurScope()->isInOpenACCComputeConstructScope(Scope::TryScope))
888 Diag(OpLoc, diag::err_acc_branch_in_out_compute_construct)
889 << /*throw*/ 2 << /*out of*/ 0;
890
891 if (Ex && !Ex->isTypeDependent()) {
892 // Initialize the exception result. This implicitly weeds out
893 // abstract types or types with inaccessible copy constructors.
894
895 // C++0x [class.copymove]p31:
896 // When certain criteria are met, an implementation is allowed to omit the
897 // copy/move construction of a class object [...]
898 //
899 // - in a throw-expression, when the operand is the name of a
900 // non-volatile automatic object (other than a function or
901 // catch-clause
902 // parameter) whose scope does not extend beyond the end of the
903 // innermost enclosing try-block (if there is one), the copy/move
904 // operation from the operand to the exception object (15.1) can be
905 // omitted by constructing the automatic object directly into the
906 // exception object
907 NamedReturnInfo NRInfo =
908 IsThrownVarInScope ? getNamedReturnInfo(Ex) : NamedReturnInfo();
909
910 QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType());
911 if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex))
912 return ExprError();
913
914 InitializedEntity Entity =
915 InitializedEntity::InitializeException(OpLoc, ExceptionObjectTy);
916 ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRInfo, Ex);
917 if (Res.isInvalid())
918 return ExprError();
919 Ex = Res.get();
920 }
921
922 // PPC MMA non-pointer types are not allowed as throw expr types.
923 if (Ex && Context.getTargetInfo().getTriple().isPPC64())
924 PPC().CheckPPCMMAType(Ex->getType(), Ex->getBeginLoc());
925
926 return new (Context)
927 CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope);
928}
929
930static void
932 llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen,
933 llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases,
934 llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen,
935 bool ParentIsPublic) {
936 for (const CXXBaseSpecifier &BS : RD->bases()) {
937 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
938 bool NewSubobject;
939 // Virtual bases constitute the same subobject. Non-virtual bases are
940 // always distinct subobjects.
941 if (BS.isVirtual())
942 NewSubobject = VBases.insert(BaseDecl).second;
943 else
944 NewSubobject = true;
945
946 if (NewSubobject)
947 ++SubobjectsSeen[BaseDecl];
948
949 // Only add subobjects which have public access throughout the entire chain.
950 bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public;
951 if (PublicPath)
952 PublicSubobjectsSeen.insert(BaseDecl);
953
954 // Recurse on to each base subobject.
955 collectPublicBases(BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen,
956 PublicPath);
957 }
958}
959
962 llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen;
963 llvm::SmallSet<CXXRecordDecl *, 2> VBases;
964 llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen;
965 SubobjectsSeen[RD] = 1;
966 PublicSubobjectsSeen.insert(RD);
967 collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen,
968 /*ParentIsPublic=*/true);
969
970 for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) {
971 // Skip ambiguous objects.
972 if (SubobjectsSeen[PublicSubobject] > 1)
973 continue;
974
975 Objects.push_back(PublicSubobject);
976 }
977}
978
980 QualType ExceptionObjectTy, Expr *E) {
981 // If the type of the exception would be an incomplete type or a pointer
982 // to an incomplete type other than (cv) void the program is ill-formed.
983 QualType Ty = ExceptionObjectTy;
984 bool isPointer = false;
985 if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
986 Ty = Ptr->getPointeeType();
987 isPointer = true;
988 }
989
990 // Cannot throw WebAssembly reference type.
992 Diag(ThrowLoc, diag::err_wasm_reftype_tc) << 0 << E->getSourceRange();
993 return true;
994 }
995
996 // Cannot throw WebAssembly table.
997 if (isPointer && Ty.isWebAssemblyReferenceType()) {
998 Diag(ThrowLoc, diag::err_wasm_table_art) << 2 << E->getSourceRange();
999 return true;
1000 }
1001
1002 if (!isPointer || !Ty->isVoidType()) {
1003 if (RequireCompleteType(ThrowLoc, Ty,
1004 isPointer ? diag::err_throw_incomplete_ptr
1005 : diag::err_throw_incomplete,
1006 E->getSourceRange()))
1007 return true;
1008
1009 if (!isPointer && Ty->isSizelessType()) {
1010 Diag(ThrowLoc, diag::err_throw_sizeless) << Ty << E->getSourceRange();
1011 return true;
1012 }
1013
1014 if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy,
1015 diag::err_throw_abstract_type, E))
1016 return true;
1017 }
1018
1019 // If the exception has class type, we need additional handling.
1021 if (!RD)
1022 return false;
1023
1024 // If we are throwing a polymorphic class type or pointer thereof,
1025 // exception handling will make use of the vtable.
1026 MarkVTableUsed(ThrowLoc, RD);
1027
1028 // If a pointer is thrown, the referenced object will not be destroyed.
1029 if (isPointer)
1030 return false;
1031
1032 // If the class has a destructor, we must be able to call it.
1033 if (!RD->hasIrrelevantDestructor()) {
1037 PDiag(diag::err_access_dtor_exception) << Ty);
1039 return true;
1040 }
1041 }
1042
1043 // The MSVC ABI creates a list of all types which can catch the exception
1044 // object. This list also references the appropriate copy constructor to call
1045 // if the object is caught by value and has a non-trivial copy constructor.
1047 // We are only interested in the public, unambiguous bases contained within
1048 // the exception object. Bases which are ambiguous or otherwise
1049 // inaccessible are not catchable types.
1050 llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects;
1051 getUnambiguousPublicSubobjects(RD, UnambiguousPublicSubobjects);
1052
1053 for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) {
1054 // Attempt to lookup the copy constructor. Various pieces of machinery
1055 // will spring into action, like template instantiation, which means this
1056 // cannot be a simple walk of the class's decls. Instead, we must perform
1057 // lookup and overload resolution.
1058 CXXConstructorDecl *CD = LookupCopyingConstructor(Subobject, 0);
1059 if (!CD || CD->isDeleted())
1060 continue;
1061
1062 // Mark the constructor referenced as it is used by this throw expression.
1064
1065 // Skip this copy constructor if it is trivial, we don't need to record it
1066 // in the catchable type data.
1067 if (CD->isTrivial())
1068 continue;
1069
1070 // The copy constructor is non-trivial, create a mapping from this class
1071 // type to this constructor.
1072 // N.B. The selection of copy constructor is not sensitive to this
1073 // particular throw-site. Lookup will be performed at the catch-site to
1074 // ensure that the copy constructor is, in fact, accessible (via
1075 // friendship or any other means).
1077
1078 // We don't keep the instantiated default argument expressions around so
1079 // we must rebuild them here.
1080 for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
1081 if (CheckCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)))
1082 return true;
1083 }
1084 }
1085 }
1086
1087 // Under the Itanium C++ ABI, memory for the exception object is allocated by
1088 // the runtime with no ability for the compiler to request additional
1089 // alignment. Warn if the exception type requires alignment beyond the minimum
1090 // guaranteed by the target C++ runtime.
1092 CharUnits TypeAlign = Context.getTypeAlignInChars(Ty);
1093 CharUnits ExnObjAlign = Context.getExnObjectAlignment();
1094 if (ExnObjAlign < TypeAlign) {
1095 Diag(ThrowLoc, diag::warn_throw_underaligned_obj);
1096 Diag(ThrowLoc, diag::note_throw_underaligned_obj)
1097 << Ty << (unsigned)TypeAlign.getQuantity()
1098 << (unsigned)ExnObjAlign.getQuantity();
1099 }
1100 }
1101 if (!isPointer && getLangOpts().AssumeNothrowExceptionDtor) {
1102 if (CXXDestructorDecl *Dtor = RD->getDestructor()) {
1103 auto Ty = Dtor->getType();
1104 if (auto *FT = Ty.getTypePtr()->getAs<FunctionProtoType>()) {
1105 if (!isUnresolvedExceptionSpec(FT->getExceptionSpecType()) &&
1106 !FT->isNothrow())
1107 Diag(ThrowLoc, diag::err_throw_object_throwing_dtor) << RD;
1108 }
1109 }
1110 }
1111
1112 return false;
1113}
1114
1116 ArrayRef<FunctionScopeInfo *> FunctionScopes, QualType ThisTy,
1117 DeclContext *CurSemaContext, ASTContext &ASTCtx) {
1118
1119 QualType ClassType = ThisTy->getPointeeType();
1120 LambdaScopeInfo *CurLSI = nullptr;
1121 DeclContext *CurDC = CurSemaContext;
1122
1123 // Iterate through the stack of lambdas starting from the innermost lambda to
1124 // the outermost lambda, checking if '*this' is ever captured by copy - since
1125 // that could change the cv-qualifiers of the '*this' object.
1126 // The object referred to by '*this' starts out with the cv-qualifiers of its
1127 // member function. We then start with the innermost lambda and iterate
1128 // outward checking to see if any lambda performs a by-copy capture of '*this'
1129 // - and if so, any nested lambda must respect the 'constness' of that
1130 // capturing lamdbda's call operator.
1131 //
1132
1133 // Since the FunctionScopeInfo stack is representative of the lexical
1134 // nesting of the lambda expressions during initial parsing (and is the best
1135 // place for querying information about captures about lambdas that are
1136 // partially processed) and perhaps during instantiation of function templates
1137 // that contain lambda expressions that need to be transformed BUT not
1138 // necessarily during instantiation of a nested generic lambda's function call
1139 // operator (which might even be instantiated at the end of the TU) - at which
1140 // time the DeclContext tree is mature enough to query capture information
1141 // reliably - we use a two pronged approach to walk through all the lexically
1142 // enclosing lambda expressions:
1143 //
1144 // 1) Climb down the FunctionScopeInfo stack as long as each item represents
1145 // a Lambda (i.e. LambdaScopeInfo) AND each LSI's 'closure-type' is lexically
1146 // enclosed by the call-operator of the LSI below it on the stack (while
1147 // tracking the enclosing DC for step 2 if needed). Note the topmost LSI on
1148 // the stack represents the innermost lambda.
1149 //
1150 // 2) If we run out of enclosing LSI's, check if the enclosing DeclContext
1151 // represents a lambda's call operator. If it does, we must be instantiating
1152 // a generic lambda's call operator (represented by the Current LSI, and
1153 // should be the only scenario where an inconsistency between the LSI and the
1154 // DeclContext should occur), so climb out the DeclContexts if they
1155 // represent lambdas, while querying the corresponding closure types
1156 // regarding capture information.
1157
1158 // 1) Climb down the function scope info stack.
1159 for (int I = FunctionScopes.size();
1160 I-- && isa<LambdaScopeInfo>(FunctionScopes[I]) &&
1161 (!CurLSI || !CurLSI->Lambda || CurLSI->Lambda->getDeclContext() ==
1162 cast<LambdaScopeInfo>(FunctionScopes[I])->CallOperator);
1163 CurDC = getLambdaAwareParentOfDeclContext(CurDC)) {
1164 CurLSI = cast<LambdaScopeInfo>(FunctionScopes[I]);
1165
1166 if (!CurLSI->isCXXThisCaptured())
1167 continue;
1168
1169 auto C = CurLSI->getCXXThisCapture();
1170
1171 if (C.isCopyCapture()) {
1172 if (CurLSI->lambdaCaptureShouldBeConst())
1173 ClassType.addConst();
1174 return ASTCtx.getPointerType(ClassType);
1175 }
1176 }
1177
1178 // 2) We've run out of ScopeInfos but check 1. if CurDC is a lambda (which
1179 // can happen during instantiation of its nested generic lambda call
1180 // operator); 2. if we're in a lambda scope (lambda body).
1181 if (CurLSI && isLambdaCallOperator(CurDC)) {
1183 "While computing 'this' capture-type for a generic lambda, when we "
1184 "run out of enclosing LSI's, yet the enclosing DC is a "
1185 "lambda-call-operator we must be (i.e. Current LSI) in a generic "
1186 "lambda call oeprator");
1187 assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
1188
1189 auto IsThisCaptured =
1190 [](CXXRecordDecl *Closure, bool &IsByCopy, bool &IsConst) {
1191 IsConst = false;
1192 IsByCopy = false;
1193 for (auto &&C : Closure->captures()) {
1194 if (C.capturesThis()) {
1195 if (C.getCaptureKind() == LCK_StarThis)
1196 IsByCopy = true;
1197 if (Closure->getLambdaCallOperator()->isConst())
1198 IsConst = true;
1199 return true;
1200 }
1201 }
1202 return false;
1203 };
1204
1205 bool IsByCopyCapture = false;
1206 bool IsConstCapture = false;
1207 CXXRecordDecl *Closure = cast<CXXRecordDecl>(CurDC->getParent());
1208 while (Closure &&
1209 IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
1210 if (IsByCopyCapture) {
1211 if (IsConstCapture)
1212 ClassType.addConst();
1213 return ASTCtx.getPointerType(ClassType);
1214 }
1215 Closure = isLambdaCallOperator(Closure->getParent())
1216 ? cast<CXXRecordDecl>(Closure->getParent()->getParent())
1217 : nullptr;
1218 }
1219 }
1220 return ThisTy;
1221}
1222
1226
1227 if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) {
1228 if (method && method->isImplicitObjectMemberFunction())
1229 ThisTy = method->getThisType().getNonReferenceType();
1230 }
1231
1233 inTemplateInstantiation() && isa<CXXRecordDecl>(DC)) {
1234
1235 // This is a lambda call operator that is being instantiated as a default
1236 // initializer. DC must point to the enclosing class type, so we can recover
1237 // the 'this' type from it.
1238 QualType ClassTy = Context.getTypeDeclType(cast<CXXRecordDecl>(DC));
1239 // There are no cv-qualifiers for 'this' within default initializers,
1240 // per [expr.prim.general]p4.
1241 ThisTy = Context.getPointerType(ClassTy);
1242 }
1243
1244 // If we are within a lambda's call operator, the cv-qualifiers of 'this'
1245 // might need to be adjusted if the lambda or any of its enclosing lambda's
1246 // captures '*this' by copy.
1247 if (!ThisTy.isNull() && isLambdaCallOperator(CurContext))
1250 return ThisTy;
1251}
1252
1254 Decl *ContextDecl,
1255 Qualifiers CXXThisTypeQuals,
1256 bool Enabled)
1257 : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
1258{
1259 if (!Enabled || !ContextDecl)
1260 return;
1261
1262 CXXRecordDecl *Record = nullptr;
1263 if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(ContextDecl))
1264 Record = Template->getTemplatedDecl();
1265 else
1266 Record = cast<CXXRecordDecl>(ContextDecl);
1267
1269 T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
1270
1272 S.Context.getLangOpts().HLSL ? T : S.Context.getPointerType(T);
1273
1274 this->Enabled = true;
1275}
1276
1277
1279 if (Enabled) {
1280 S.CXXThisTypeOverride = OldCXXThisTypeOverride;
1281 }
1282}
1283
1285 SourceLocation DiagLoc = LSI->IntroducerRange.getEnd();
1286 assert(!LSI->isCXXThisCaptured());
1287 // [=, this] {}; // until C++20: Error: this when = is the default
1289 !Sema.getLangOpts().CPlusPlus20)
1290 return;
1291 Sema.Diag(DiagLoc, diag::note_lambda_this_capture_fixit)
1293 DiagLoc, LSI->NumExplicitCaptures > 0 ? ", this" : "this");
1294}
1295
1297 bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
1298 const bool ByCopy) {
1299 // We don't need to capture this in an unevaluated context.
1300 if (isUnevaluatedContext() && !Explicit)
1301 return true;
1302
1303 assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
1304
1305 const int MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
1306 ? *FunctionScopeIndexToStopAt
1307 : FunctionScopes.size() - 1;
1308
1309 // Check that we can capture the *enclosing object* (referred to by '*this')
1310 // by the capturing-entity/closure (lambda/block/etc) at
1311 // MaxFunctionScopesIndex-deep on the FunctionScopes stack.
1312
1313 // Note: The *enclosing object* can only be captured by-value by a
1314 // closure that is a lambda, using the explicit notation:
1315 // [*this] { ... }.
1316 // Every other capture of the *enclosing object* results in its by-reference
1317 // capture.
1318
1319 // For a closure 'L' (at MaxFunctionScopesIndex in the FunctionScopes
1320 // stack), we can capture the *enclosing object* only if:
1321 // - 'L' has an explicit byref or byval capture of the *enclosing object*
1322 // - or, 'L' has an implicit capture.
1323 // AND
1324 // -- there is no enclosing closure
1325 // -- or, there is some enclosing closure 'E' that has already captured the
1326 // *enclosing object*, and every intervening closure (if any) between 'E'
1327 // and 'L' can implicitly capture the *enclosing object*.
1328 // -- or, every enclosing closure can implicitly capture the
1329 // *enclosing object*
1330
1331
1332 unsigned NumCapturingClosures = 0;
1333 for (int idx = MaxFunctionScopesIndex; idx >= 0; idx--) {
1334 if (CapturingScopeInfo *CSI =
1335 dyn_cast<CapturingScopeInfo>(FunctionScopes[idx])) {
1336 if (CSI->CXXThisCaptureIndex != 0) {
1337 // 'this' is already being captured; there isn't anything more to do.
1338 CSI->Captures[CSI->CXXThisCaptureIndex - 1].markUsed(BuildAndDiagnose);
1339 break;
1340 }
1341 LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI);
1343 // This context can't implicitly capture 'this'; fail out.
1344 if (BuildAndDiagnose) {
1346 Diag(Loc, diag::err_this_capture)
1347 << (Explicit && idx == MaxFunctionScopesIndex);
1348 if (!Explicit)
1349 buildLambdaThisCaptureFixit(*this, LSI);
1350 }
1351 return true;
1352 }
1353 if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
1354 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
1355 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
1356 CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion ||
1357 (Explicit && idx == MaxFunctionScopesIndex)) {
1358 // Regarding (Explicit && idx == MaxFunctionScopesIndex): only the first
1359 // iteration through can be an explicit capture, all enclosing closures,
1360 // if any, must perform implicit captures.
1361
1362 // This closure can capture 'this'; continue looking upwards.
1363 NumCapturingClosures++;
1364 continue;
1365 }
1366 // This context can't implicitly capture 'this'; fail out.
1367 if (BuildAndDiagnose) {
1369 Diag(Loc, diag::err_this_capture)
1370 << (Explicit && idx == MaxFunctionScopesIndex);
1371 }
1372 if (!Explicit)
1373 buildLambdaThisCaptureFixit(*this, LSI);
1374 return true;
1375 }
1376 break;
1377 }
1378 if (!BuildAndDiagnose) return false;
1379
1380 // If we got here, then the closure at MaxFunctionScopesIndex on the
1381 // FunctionScopes stack, can capture the *enclosing object*, so capture it
1382 // (including implicit by-reference captures in any enclosing closures).
1383
1384 // In the loop below, respect the ByCopy flag only for the closure requesting
1385 // the capture (i.e. first iteration through the loop below). Ignore it for
1386 // all enclosing closure's up to NumCapturingClosures (since they must be
1387 // implicitly capturing the *enclosing object* by reference (see loop
1388 // above)).
1389 assert((!ByCopy ||
1390 isa<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
1391 "Only a lambda can capture the enclosing object (referred to by "
1392 "*this) by copy");
1393 QualType ThisTy = getCurrentThisType();
1394 for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
1395 --idx, --NumCapturingClosures) {
1396 CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
1397
1398 // The type of the corresponding data member (not a 'this' pointer if 'by
1399 // copy').
1400 QualType CaptureType = ByCopy ? ThisTy->getPointeeType() : ThisTy;
1401
1402 bool isNested = NumCapturingClosures > 1;
1403 CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
1404 }
1405 return false;
1406}
1407
1409 // C++20 [expr.prim.this]p1:
1410 // The keyword this names a pointer to the object for which an
1411 // implicit object member function is invoked or a non-static
1412 // data member's initializer is evaluated.
1413 QualType ThisTy = getCurrentThisType();
1414
1415 if (CheckCXXThisType(Loc, ThisTy))
1416 return ExprError();
1417
1418 return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false);
1419}
1420
1422 if (!Type.isNull())
1423 return false;
1424
1425 // C++20 [expr.prim.this]p3:
1426 // If a declaration declares a member function or member function template
1427 // of a class X, the expression this is a prvalue of type
1428 // "pointer to cv-qualifier-seq X" wherever X is the current class between
1429 // the optional cv-qualifier-seq and the end of the function-definition,
1430 // member-declarator, or declarator. It shall not appear within the
1431 // declaration of either a static member function or an explicit object
1432 // member function of the current class (although its type and value
1433 // category are defined within such member functions as they are within
1434 // an implicit object member function).
1436 const auto *Method = dyn_cast<CXXMethodDecl>(DC);
1437 if (Method && Method->isExplicitObjectMemberFunction()) {
1438 Diag(Loc, diag::err_invalid_this_use) << 1;
1440 Diag(Loc, diag::err_invalid_this_use) << 1;
1441 } else {
1442 Diag(Loc, diag::err_invalid_this_use) << 0;
1443 }
1444 return true;
1445}
1446
1448 bool IsImplicit) {
1449 auto *This = CXXThisExpr::Create(Context, Loc, Type, IsImplicit);
1450 MarkThisReferenced(This);
1451 return This;
1452}
1453
1455 CheckCXXThisCapture(This->getExprLoc());
1456 if (This->isTypeDependent())
1457 return;
1458
1459 // Check if 'this' is captured by value in a lambda with a dependent explicit
1460 // object parameter, and mark it as type-dependent as well if so.
1461 auto IsDependent = [&]() {
1462 for (auto *Scope : llvm::reverse(FunctionScopes)) {
1463 auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope);
1464 if (!LSI)
1465 continue;
1466
1467 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
1468 LSI->AfterParameterList)
1469 return false;
1470
1471 // If this lambda captures 'this' by value, then 'this' is dependent iff
1472 // this lambda has a dependent explicit object parameter. If we can't
1473 // determine whether it does (e.g. because the CXXMethodDecl's type is
1474 // null), assume it doesn't.
1475 if (LSI->isCXXThisCaptured()) {
1476 if (!LSI->getCXXThisCapture().isCopyCapture())
1477 continue;
1478
1479 const auto *MD = LSI->CallOperator;
1480 if (MD->getType().isNull())
1481 return false;
1482
1483 const auto *Ty = MD->getType()->getAs<FunctionProtoType>();
1484 return Ty && MD->isExplicitObjectMemberFunction() &&
1485 Ty->getParamType(0)->isDependentType();
1486 }
1487 }
1488 return false;
1489 }();
1490
1491 This->setCapturedByCopyInLambdaWithExplicitObjectParameter(IsDependent);
1492}
1493
1495 // If we're outside the body of a member function, then we'll have a specified
1496 // type for 'this'.
1498 return false;
1499
1500 // Determine whether we're looking into a class that's currently being
1501 // defined.
1502 CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
1503 return Class && Class->isBeingDefined();
1504}
1505
1508 SourceLocation LParenOrBraceLoc,
1509 MultiExprArg exprs,
1510 SourceLocation RParenOrBraceLoc,
1511 bool ListInitialization) {
1512 if (!TypeRep)
1513 return ExprError();
1514
1515 TypeSourceInfo *TInfo;
1516 QualType Ty = GetTypeFromParser(TypeRep, &TInfo);
1517 if (!TInfo)
1519
1520 auto Result = BuildCXXTypeConstructExpr(TInfo, LParenOrBraceLoc, exprs,
1521 RParenOrBraceLoc, ListInitialization);
1522 // Avoid creating a non-type-dependent expression that contains typos.
1523 // Non-type-dependent expressions are liable to be discarded without
1524 // checking for embedded typos.
1525 if (!Result.isInvalid() && Result.get()->isInstantiationDependent() &&
1526 !Result.get()->isTypeDependent())
1528 else if (Result.isInvalid())
1530 RParenOrBraceLoc, exprs, Ty);
1531 return Result;
1532}
1533
1536 SourceLocation LParenOrBraceLoc,
1537 MultiExprArg Exprs,
1538 SourceLocation RParenOrBraceLoc,
1539 bool ListInitialization) {
1540 QualType Ty = TInfo->getType();
1541 SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
1542
1543 assert((!ListInitialization || Exprs.size() == 1) &&
1544 "List initialization must have exactly one expression.");
1545 SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
1546
1547 InitializedEntity Entity =
1549 InitializationKind Kind =
1550 Exprs.size()
1551 ? ListInitialization
1553 TyBeginLoc, LParenOrBraceLoc, RParenOrBraceLoc)
1554 : InitializationKind::CreateDirect(TyBeginLoc, LParenOrBraceLoc,
1555 RParenOrBraceLoc)
1556 : InitializationKind::CreateValue(TyBeginLoc, LParenOrBraceLoc,
1557 RParenOrBraceLoc);
1558
1559 // C++17 [expr.type.conv]p1:
1560 // If the type is a placeholder for a deduced class type, [...perform class
1561 // template argument deduction...]
1562 // C++23:
1563 // Otherwise, if the type contains a placeholder type, it is replaced by the
1564 // type determined by placeholder type deduction.
1565 DeducedType *Deduced = Ty->getContainedDeducedType();
1566 if (Deduced && !Deduced->isDeduced() &&
1567 isa<DeducedTemplateSpecializationType>(Deduced)) {
1569 Kind, Exprs);
1570 if (Ty.isNull())
1571 return ExprError();
1572 Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
1573 } else if (Deduced && !Deduced->isDeduced()) {
1574 MultiExprArg Inits = Exprs;
1575 if (ListInitialization) {
1576 auto *ILE = cast<InitListExpr>(Exprs[0]);
1577 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
1578 }
1579
1580 if (Inits.empty())
1581 return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_init_no_expression)
1582 << Ty << FullRange);
1583 if (Inits.size() > 1) {
1584 Expr *FirstBad = Inits[1];
1585 return ExprError(Diag(FirstBad->getBeginLoc(),
1586 diag::err_auto_expr_init_multiple_expressions)
1587 << Ty << FullRange);
1588 }
1589 if (getLangOpts().CPlusPlus23) {
1590 if (Ty->getAs<AutoType>())
1591 Diag(TyBeginLoc, diag::warn_cxx20_compat_auto_expr) << FullRange;
1592 }
1593 Expr *Deduce = Inits[0];
1594 if (isa<InitListExpr>(Deduce))
1595 return ExprError(
1596 Diag(Deduce->getBeginLoc(), diag::err_auto_expr_init_paren_braces)
1597 << ListInitialization << Ty << FullRange);
1599 TemplateDeductionInfo Info(Deduce->getExprLoc());
1601 DeduceAutoType(TInfo->getTypeLoc(), Deduce, DeducedType, Info);
1604 return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_deduction_failure)
1605 << Ty << Deduce->getType() << FullRange
1606 << Deduce->getSourceRange());
1607 if (DeducedType.isNull()) {
1609 return ExprError();
1610 }
1611
1612 Ty = DeducedType;
1613 Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
1614 }
1615
1618 Context, Ty.getNonReferenceType(), TInfo, LParenOrBraceLoc, Exprs,
1619 RParenOrBraceLoc, ListInitialization);
1620
1621 // C++ [expr.type.conv]p1:
1622 // If the expression list is a parenthesized single expression, the type
1623 // conversion expression is equivalent (in definedness, and if defined in
1624 // meaning) to the corresponding cast expression.
1625 if (Exprs.size() == 1 && !ListInitialization &&
1626 !isa<InitListExpr>(Exprs[0])) {
1627 Expr *Arg = Exprs[0];
1628 return BuildCXXFunctionalCastExpr(TInfo, Ty, LParenOrBraceLoc, Arg,
1629 RParenOrBraceLoc);
1630 }
1631
1632 // For an expression of the form T(), T shall not be an array type.
1633 QualType ElemTy = Ty;
1634 if (Ty->isArrayType()) {
1635 if (!ListInitialization)
1636 return ExprError(Diag(TyBeginLoc, diag::err_value_init_for_array_type)
1637 << FullRange);
1638 ElemTy = Context.getBaseElementType(Ty);
1639 }
1640
1641 // Only construct objects with object types.
1642 // The standard doesn't explicitly forbid function types here, but that's an
1643 // obvious oversight, as there's no way to dynamically construct a function
1644 // in general.
1645 if (Ty->isFunctionType())
1646 return ExprError(Diag(TyBeginLoc, diag::err_init_for_function_type)
1647 << Ty << FullRange);
1648
1649 // C++17 [expr.type.conv]p2:
1650 // If the type is cv void and the initializer is (), the expression is a
1651 // prvalue of the specified type that performs no initialization.
1652 if (!Ty->isVoidType() &&
1653 RequireCompleteType(TyBeginLoc, ElemTy,
1654 diag::err_invalid_incomplete_type_use, FullRange))
1655 return ExprError();
1656
1657 // Otherwise, the expression is a prvalue of the specified type whose
1658 // result object is direct-initialized (11.6) with the initializer.
1659 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
1660 ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Exprs);
1661
1662 if (Result.isInvalid())
1663 return Result;
1664
1665 Expr *Inner = Result.get();
1666 if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Inner))
1667 Inner = BTE->getSubExpr();
1668 if (auto *CE = dyn_cast<ConstantExpr>(Inner);
1669 CE && CE->isImmediateInvocation())
1670 Inner = CE->getSubExpr();
1671 if (!isa<CXXTemporaryObjectExpr>(Inner) &&
1672 !isa<CXXScalarValueInitExpr>(Inner)) {
1673 // If we created a CXXTemporaryObjectExpr, that node also represents the
1674 // functional cast. Otherwise, create an explicit cast to represent
1675 // the syntactic form of a functional-style cast that was used here.
1676 //
1677 // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr
1678 // would give a more consistent AST representation than using a
1679 // CXXTemporaryObjectExpr. It's also weird that the functional cast
1680 // is sometimes handled by initialization and sometimes not.
1681 QualType ResultType = Result.get()->getType();
1682 SourceRange Locs = ListInitialization
1683 ? SourceRange()
1684 : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
1686 Context, ResultType, Expr::getValueKindForType(Ty), TInfo, CK_NoOp,
1687 Result.get(), /*Path=*/nullptr, CurFPFeatureOverrides(),
1688 Locs.getBegin(), Locs.getEnd());
1689 }
1690
1691 return Result;
1692}
1693
1695 // [CUDA] Ignore this function, if we can't call it.
1696 const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
1697 if (getLangOpts().CUDA) {
1698 auto CallPreference = CUDA().IdentifyPreference(Caller, Method);
1699 // If it's not callable at all, it's not the right function.
1700 if (CallPreference < SemaCUDA::CFP_WrongSide)
1701 return false;
1702 if (CallPreference == SemaCUDA::CFP_WrongSide) {
1703 // Maybe. We have to check if there are better alternatives.
1705 Method->getDeclContext()->lookup(Method->getDeclName());
1706 for (const auto *D : R) {
1707 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1708 if (CUDA().IdentifyPreference(Caller, FD) > SemaCUDA::CFP_WrongSide)
1709 return false;
1710 }
1711 }
1712 // We've found no better variants.
1713 }
1714 }
1715
1717 bool Result = Method->isUsualDeallocationFunction(PreventedBy);
1718
1719 if (Result || !getLangOpts().CUDA || PreventedBy.empty())
1720 return Result;
1721
1722 // In case of CUDA, return true if none of the 1-argument deallocator
1723 // functions are actually callable.
1724 return llvm::none_of(PreventedBy, [&](const FunctionDecl *FD) {
1725 assert(FD->getNumParams() == 1 &&
1726 "Only single-operand functions should be in PreventedBy");
1727 return CUDA().IdentifyPreference(Caller, FD) >= SemaCUDA::CFP_HostDevice;
1728 });
1729}
1730
1731/// Determine whether the given function is a non-placement
1732/// deallocation function.
1734 if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
1735 return S.isUsualDeallocationFunction(Method);
1736
1737 if (FD->getOverloadedOperator() != OO_Delete &&
1738 FD->getOverloadedOperator() != OO_Array_Delete)
1739 return false;
1740
1741 unsigned UsualParams = 1;
1742
1743 if (S.getLangOpts().SizedDeallocation && UsualParams < FD->getNumParams() &&
1745 FD->getParamDecl(UsualParams)->getType(),
1746 S.Context.getSizeType()))
1747 ++UsualParams;
1748
1749 if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
1751 FD->getParamDecl(UsualParams)->getType(),
1753 ++UsualParams;
1754
1755 return UsualParams == FD->getNumParams();
1756}
1757
1758namespace {
1759 struct UsualDeallocFnInfo {
1760 UsualDeallocFnInfo() : Found(), FD(nullptr) {}
1761 UsualDeallocFnInfo(Sema &S, DeclAccessPair Found)
1762 : Found(Found), FD(dyn_cast<FunctionDecl>(Found->getUnderlyingDecl())),
1763 Destroying(false), HasSizeT(false), HasAlignValT(false),
1764 CUDAPref(SemaCUDA::CFP_Native) {
1765 // A function template declaration is never a usual deallocation function.
1766 if (!FD)
1767 return;
1768 unsigned NumBaseParams = 1;
1769 if (FD->isDestroyingOperatorDelete()) {
1770 Destroying = true;
1771 ++NumBaseParams;
1772 }
1773
1774 if (NumBaseParams < FD->getNumParams() &&
1776 FD->getParamDecl(NumBaseParams)->getType(),
1777 S.Context.getSizeType())) {
1778 ++NumBaseParams;
1779 HasSizeT = true;
1780 }
1781
1782 if (NumBaseParams < FD->getNumParams() &&
1783 FD->getParamDecl(NumBaseParams)->getType()->isAlignValT()) {
1784 ++NumBaseParams;
1785 HasAlignValT = true;
1786 }
1787
1788 // In CUDA, determine how much we'd like / dislike to call this.
1789 if (S.getLangOpts().CUDA)
1790 CUDAPref = S.CUDA().IdentifyPreference(
1791 S.getCurFunctionDecl(/*AllowLambda=*/true), FD);
1792 }
1793
1794 explicit operator bool() const { return FD; }
1795
1796 bool isBetterThan(const UsualDeallocFnInfo &Other, bool WantSize,
1797 bool WantAlign) const {
1798 // C++ P0722:
1799 // A destroying operator delete is preferred over a non-destroying
1800 // operator delete.
1801 if (Destroying != Other.Destroying)
1802 return Destroying;
1803
1804 // C++17 [expr.delete]p10:
1805 // If the type has new-extended alignment, a function with a parameter
1806 // of type std::align_val_t is preferred; otherwise a function without
1807 // such a parameter is preferred
1808 if (HasAlignValT != Other.HasAlignValT)
1809 return HasAlignValT == WantAlign;
1810
1811 if (HasSizeT != Other.HasSizeT)
1812 return HasSizeT == WantSize;
1813
1814 // Use CUDA call preference as a tiebreaker.
1815 return CUDAPref > Other.CUDAPref;
1816 }
1817
1819 FunctionDecl *FD;
1820 bool Destroying, HasSizeT, HasAlignValT;
1822 };
1823}
1824
1825/// Determine whether a type has new-extended alignment. This may be called when
1826/// the type is incomplete (for a delete-expression with an incomplete pointee
1827/// type), in which case it will conservatively return false if the alignment is
1828/// not known.
1829static bool hasNewExtendedAlignment(Sema &S, QualType AllocType) {
1830 return S.getLangOpts().AlignedAllocation &&
1831 S.getASTContext().getTypeAlignIfKnown(AllocType) >
1833}
1834
1835/// Select the correct "usual" deallocation function to use from a selection of
1836/// deallocation functions (either global or class-scope).
1837static UsualDeallocFnInfo resolveDeallocationOverload(
1838 Sema &S, LookupResult &R, bool WantSize, bool WantAlign,
1839 llvm::SmallVectorImpl<UsualDeallocFnInfo> *BestFns = nullptr) {
1840 UsualDeallocFnInfo Best;
1841
1842 for (auto I = R.begin(), E = R.end(); I != E; ++I) {
1843 UsualDeallocFnInfo Info(S, I.getPair());
1844 if (!Info || !isNonPlacementDeallocationFunction(S, Info.FD) ||
1845 Info.CUDAPref == SemaCUDA::CFP_Never)
1846 continue;
1847
1848 if (!Best) {
1849 Best = Info;
1850 if (BestFns)
1851 BestFns->push_back(Info);
1852 continue;
1853 }
1854
1855 if (Best.isBetterThan(Info, WantSize, WantAlign))
1856 continue;
1857
1858 // If more than one preferred function is found, all non-preferred
1859 // functions are eliminated from further consideration.
1860 if (BestFns && Info.isBetterThan(Best, WantSize, WantAlign))
1861 BestFns->clear();
1862
1863 Best = Info;
1864 if (BestFns)
1865 BestFns->push_back(Info);
1866 }
1867
1868 return Best;
1869}
1870
1871/// Determine whether a given type is a class for which 'delete[]' would call
1872/// a member 'operator delete[]' with a 'size_t' parameter. This implies that
1873/// we need to store the array size (even if the type is
1874/// trivially-destructible).
1876 QualType allocType) {
1877 const RecordType *record =
1878 allocType->getBaseElementTypeUnsafe()->getAs<RecordType>();
1879 if (!record) return false;
1880
1881 // Try to find an operator delete[] in class scope.
1882
1883 DeclarationName deleteName =
1884 S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
1885 LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
1886 S.LookupQualifiedName(ops, record->getDecl());
1887
1888 // We're just doing this for information.
1889 ops.suppressDiagnostics();
1890
1891 // Very likely: there's no operator delete[].
1892 if (ops.empty()) return false;
1893
1894 // If it's ambiguous, it should be illegal to call operator delete[]
1895 // on this thing, so it doesn't matter if we allocate extra space or not.
1896 if (ops.isAmbiguous()) return false;
1897
1898 // C++17 [expr.delete]p10:
1899 // If the deallocation functions have class scope, the one without a
1900 // parameter of type std::size_t is selected.
1901 auto Best = resolveDeallocationOverload(
1902 S, ops, /*WantSize*/false,
1903 /*WantAlign*/hasNewExtendedAlignment(S, allocType));
1904 return Best && Best.HasSizeT;
1905}
1906
1908Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
1909 SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
1910 SourceLocation PlacementRParen, SourceRange TypeIdParens,
1912 std::optional<Expr *> ArraySize;
1913 // If the specified type is an array, unwrap it and save the expression.
1914 if (D.getNumTypeObjects() > 0 &&
1915 D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
1916 DeclaratorChunk &Chunk = D.getTypeObject(0);
1917 if (D.getDeclSpec().hasAutoTypeSpec())
1918 return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto)
1919 << D.getSourceRange());
1920 if (Chunk.Arr.hasStatic)
1921 return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new)
1922 << D.getSourceRange());
1923 if (!Chunk.Arr.NumElts && !Initializer)
1924 return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size)
1925 << D.getSourceRange());
1926
1927 ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts);
1928 D.DropFirstTypeObject();
1929 }
1930
1931 // Every dimension shall be of constant size.
1932 if (ArraySize) {
1933 for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
1934 if (D.getTypeObject(I).Kind != DeclaratorChunk::Array)
1935 break;
1936
1937 DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
1938 if (Expr *NumElts = (Expr *)Array.NumElts) {
1939 if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
1940 // FIXME: GCC permits constant folding here. We should either do so consistently
1941 // or not do so at all, rather than changing behavior in C++14 onwards.
1942 if (getLangOpts().CPlusPlus14) {
1943 // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
1944 // shall be a converted constant expression (5.19) of type std::size_t
1945 // and shall evaluate to a strictly positive value.
1946 llvm::APSInt Value(Context.getIntWidth(Context.getSizeType()));
1947 Array.NumElts
1950 .get();
1951 } else {
1952 Array.NumElts =
1954 NumElts, nullptr, diag::err_new_array_nonconst, AllowFold)
1955 .get();
1956 }
1957 if (!Array.NumElts)
1958 return ExprError();
1959 }
1960 }
1961 }
1962 }
1963
1965 QualType AllocType = TInfo->getType();
1966 if (D.isInvalidType())
1967 return ExprError();
1968
1969 SourceRange DirectInitRange;
1970 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
1971 DirectInitRange = List->getSourceRange();
1972
1973 return BuildCXXNew(SourceRange(StartLoc, D.getEndLoc()), UseGlobal,
1974 PlacementLParen, PlacementArgs, PlacementRParen,
1975 TypeIdParens, AllocType, TInfo, ArraySize, DirectInitRange,
1976 Initializer);
1977}
1978
1980 Expr *Init, bool IsCPlusPlus20) {
1981 if (!Init)
1982 return true;
1983 if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init))
1984 return IsCPlusPlus20 || PLE->getNumExprs() == 0;
1985 if (isa<ImplicitValueInitExpr>(Init))
1986 return true;
1987 else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init))
1988 return !CCE->isListInitialization() &&
1989 CCE->getConstructor()->isDefaultConstructor();
1990 else if (Style == CXXNewInitializationStyle::Braces) {
1991 assert(isa<InitListExpr>(Init) &&
1992 "Shouldn't create list CXXConstructExprs for arrays.");
1993 return true;
1994 }
1995 return false;
1996}
1997
1998bool
2000 if (!getLangOpts().AlignedAllocationUnavailable)
2001 return false;
2002 if (FD.isDefined())
2003 return false;
2004 std::optional<unsigned> AlignmentParam;
2005 if (FD.isReplaceableGlobalAllocationFunction(&AlignmentParam) &&
2006 AlignmentParam)
2007 return true;
2008 return false;
2009}
2010
2011// Emit a diagnostic if an aligned allocation/deallocation function that is not
2012// implemented in the standard library is selected.
2016 const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
2017 StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
2018 getASTContext().getTargetInfo().getPlatformName());
2019 VersionTuple OSVersion = alignedAllocMinVersion(T.getOS());
2020
2022 bool IsDelete = Kind == OO_Delete || Kind == OO_Array_Delete;
2023 Diag(Loc, diag::err_aligned_allocation_unavailable)
2024 << IsDelete << FD.getType().getAsString() << OSName
2025 << OSVersion.getAsString() << OSVersion.empty();
2026 Diag(Loc, diag::note_silence_aligned_allocation_unavailable);
2027 }
2028}
2029
2031 SourceLocation PlacementLParen,
2032 MultiExprArg PlacementArgs,
2033 SourceLocation PlacementRParen,
2034 SourceRange TypeIdParens, QualType AllocType,
2035 TypeSourceInfo *AllocTypeInfo,
2036 std::optional<Expr *> ArraySize,
2037 SourceRange DirectInitRange, Expr *Initializer) {
2038 SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
2039 SourceLocation StartLoc = Range.getBegin();
2040
2041 CXXNewInitializationStyle InitStyle;
2042 if (DirectInitRange.isValid()) {
2043 assert(Initializer && "Have parens but no initializer.");
2045 } else if (isa_and_nonnull<InitListExpr>(Initializer))
2047 else {
2048 assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
2049 isa<CXXConstructExpr>(Initializer)) &&
2050 "Initializer expression that cannot have been implicitly created.");
2052 }
2053
2054 MultiExprArg Exprs(&Initializer, Initializer ? 1 : 0);
2055 if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) {
2056 assert(InitStyle == CXXNewInitializationStyle::Parens &&
2057 "paren init for non-call init");
2058 Exprs = MultiExprArg(List->getExprs(), List->getNumExprs());
2059 }
2060
2061 // C++11 [expr.new]p15:
2062 // A new-expression that creates an object of type T initializes that
2063 // object as follows:
2064 InitializationKind Kind = [&] {
2065 switch (InitStyle) {
2066 // - If the new-initializer is omitted, the object is default-
2067 // initialized (8.5); if no initialization is performed,
2068 // the object has indeterminate value
2070 return InitializationKind::CreateDefault(TypeRange.getBegin());
2071 // - Otherwise, the new-initializer is interpreted according to the
2072 // initialization rules of 8.5 for direct-initialization.
2074 return InitializationKind::CreateDirect(TypeRange.getBegin(),
2075 DirectInitRange.getBegin(),
2076 DirectInitRange.getEnd());
2079 Initializer->getBeginLoc(),
2080 Initializer->getEndLoc());
2081 }
2082 llvm_unreachable("Unknown initialization kind");
2083 }();
2084
2085 // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
2086 auto *Deduced = AllocType->getContainedDeducedType();
2087 if (Deduced && !Deduced->isDeduced() &&
2088 isa<DeducedTemplateSpecializationType>(Deduced)) {
2089 if (ArraySize)
2090 return ExprError(
2091 Diag(*ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
2092 diag::err_deduced_class_template_compound_type)
2093 << /*array*/ 2
2094 << (*ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
2095
2096 InitializedEntity Entity
2097 = InitializedEntity::InitializeNew(StartLoc, AllocType);
2099 AllocTypeInfo, Entity, Kind, Exprs);
2100 if (AllocType.isNull())
2101 return ExprError();
2102 } else if (Deduced && !Deduced->isDeduced()) {
2103 MultiExprArg Inits = Exprs;
2104 bool Braced = (InitStyle == CXXNewInitializationStyle::Braces);
2105 if (Braced) {
2106 auto *ILE = cast<InitListExpr>(Exprs[0]);
2107 Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
2108 }
2109
2110 if (InitStyle == CXXNewInitializationStyle::None || Inits.empty())
2111 return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
2112 << AllocType << TypeRange);
2113 if (Inits.size() > 1) {
2114 Expr *FirstBad = Inits[1];
2115 return ExprError(Diag(FirstBad->getBeginLoc(),
2116 diag::err_auto_new_ctor_multiple_expressions)
2117 << AllocType << TypeRange);
2118 }
2119 if (Braced && !getLangOpts().CPlusPlus17)
2120 Diag(Initializer->getBeginLoc(), diag::ext_auto_new_list_init)
2121 << AllocType << TypeRange;
2122 Expr *Deduce = Inits[0];
2123 if (isa<InitListExpr>(Deduce))
2124 return ExprError(
2125 Diag(Deduce->getBeginLoc(), diag::err_auto_expr_init_paren_braces)
2126 << Braced << AllocType << TypeRange);
2128 TemplateDeductionInfo Info(Deduce->getExprLoc());
2130 DeduceAutoType(AllocTypeInfo->getTypeLoc(), Deduce, DeducedType, Info);
2133 return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
2134 << AllocType << Deduce->getType() << TypeRange
2135 << Deduce->getSourceRange());
2136 if (DeducedType.isNull()) {
2138 return ExprError();
2139 }
2140 AllocType = DeducedType;
2141 }
2142
2143 // Per C++0x [expr.new]p5, the type being constructed may be a
2144 // typedef of an array type.
2145 if (!ArraySize) {
2146 if (const ConstantArrayType *Array
2147 = Context.getAsConstantArrayType(AllocType)) {
2148 ArraySize = IntegerLiteral::Create(Context, Array->getSize(),
2150 TypeRange.getEnd());
2151 AllocType = Array->getElementType();
2152 }
2153 }
2154
2155 if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
2156 return ExprError();
2157
2158 if (ArraySize && !checkArrayElementAlignment(AllocType, TypeRange.getBegin()))
2159 return ExprError();
2160
2161 // In ARC, infer 'retaining' for the allocated
2162 if (getLangOpts().ObjCAutoRefCount &&
2163 AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2164 AllocType->isObjCLifetimeType()) {
2165 AllocType = Context.getLifetimeQualifiedType(AllocType,
2166 AllocType->getObjCARCImplicitLifetime());
2167 }
2168
2169 QualType ResultType = Context.getPointerType(AllocType);
2170
2171 if (ArraySize && *ArraySize &&
2172 (*ArraySize)->getType()->isNonOverloadPlaceholderType()) {
2173 ExprResult result = CheckPlaceholderExpr(*ArraySize);
2174 if (result.isInvalid()) return ExprError();
2175 ArraySize = result.get();
2176 }
2177 // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
2178 // integral or enumeration type with a non-negative value."
2179 // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
2180 // enumeration type, or a class type for which a single non-explicit
2181 // conversion function to integral or unscoped enumeration type exists.
2182 // C++1y [expr.new]p6: The expression [...] is implicitly converted to
2183 // std::size_t.
2184 std::optional<uint64_t> KnownArraySize;
2185 if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
2186 ExprResult ConvertedSize;
2187 if (getLangOpts().CPlusPlus14) {
2188 assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
2189
2190 ConvertedSize = PerformImplicitConversion(*ArraySize, Context.getSizeType(),
2192
2193 if (!ConvertedSize.isInvalid() &&
2194 (*ArraySize)->getType()->getAs<RecordType>())
2195 // Diagnose the compatibility of this conversion.
2196 Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion)
2197 << (*ArraySize)->getType() << 0 << "'size_t'";
2198 } else {
2199 class SizeConvertDiagnoser : public ICEConvertDiagnoser {
2200 protected:
2201 Expr *ArraySize;
2202
2203 public:
2204 SizeConvertDiagnoser(Expr *ArraySize)
2205 : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
2206 ArraySize(ArraySize) {}
2207
2208 SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
2209 QualType T) override {
2210 return S.Diag(Loc, diag::err_array_size_not_integral)
2211 << S.getLangOpts().CPlusPlus11 << T;
2212 }
2213
2214 SemaDiagnosticBuilder diagnoseIncomplete(
2215 Sema &S, SourceLocation Loc, QualType T) override {
2216 return S.Diag(Loc, diag::err_array_size_incomplete_type)
2217 << T << ArraySize->getSourceRange();
2218 }
2219
2220 SemaDiagnosticBuilder diagnoseExplicitConv(
2221 Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
2222 return S.Diag(Loc, diag::err_array_size_explicit_conversion) << T << ConvTy;
2223 }
2224
2225 SemaDiagnosticBuilder noteExplicitConv(
2226 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2227 return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
2228 << ConvTy->isEnumeralType() << ConvTy;
2229 }
2230
2231 SemaDiagnosticBuilder diagnoseAmbiguous(
2232 Sema &S, SourceLocation Loc, QualType T) override {
2233 return S.Diag(Loc, diag::err_array_size_ambiguous_conversion) << T;
2234 }
2235
2236 SemaDiagnosticBuilder noteAmbiguous(
2237 Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
2238 return S.Diag(Conv->getLocation(), diag::note_array_size_conversion)
2239 << ConvTy->isEnumeralType() << ConvTy;
2240 }
2241
2242 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
2243 QualType T,
2244 QualType ConvTy) override {
2245 return S.Diag(Loc,
2246 S.getLangOpts().CPlusPlus11
2247 ? diag::warn_cxx98_compat_array_size_conversion
2248 : diag::ext_array_size_conversion)
2249 << T << ConvTy->isEnumeralType() << ConvTy;
2250 }
2251 } SizeDiagnoser(*ArraySize);
2252
2253 ConvertedSize = PerformContextualImplicitConversion(StartLoc, *ArraySize,
2254 SizeDiagnoser);
2255 }
2256 if (ConvertedSize.isInvalid())
2257 return ExprError();
2258
2259 ArraySize = ConvertedSize.get();
2260 QualType SizeType = (*ArraySize)->getType();
2261
2262 if (!SizeType->isIntegralOrUnscopedEnumerationType())
2263 return ExprError();
2264
2265 // C++98 [expr.new]p7:
2266 // The expression in a direct-new-declarator shall have integral type
2267 // with a non-negative value.
2268 //
2269 // Let's see if this is a constant < 0. If so, we reject it out of hand,
2270 // per CWG1464. Otherwise, if it's not a constant, we must have an
2271 // unparenthesized array type.
2272
2273 // We've already performed any required implicit conversion to integer or
2274 // unscoped enumeration type.
2275 // FIXME: Per CWG1464, we are required to check the value prior to
2276 // converting to size_t. This will never find a negative array size in
2277 // C++14 onwards, because Value is always unsigned here!
2278 if (std::optional<llvm::APSInt> Value =
2279 (*ArraySize)->getIntegerConstantExpr(Context)) {
2280 if (Value->isSigned() && Value->isNegative()) {
2281 return ExprError(Diag((*ArraySize)->getBeginLoc(),
2282 diag::err_typecheck_negative_array_size)
2283 << (*ArraySize)->getSourceRange());
2284 }
2285
2286 if (!AllocType->isDependentType()) {
2287 unsigned ActiveSizeBits =
2289 if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
2290 return ExprError(
2291 Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large)
2292 << toString(*Value, 10) << (*ArraySize)->getSourceRange());
2293 }
2294
2295 KnownArraySize = Value->getZExtValue();
2296 } else if (TypeIdParens.isValid()) {
2297 // Can't have dynamic array size when the type-id is in parentheses.
2298 Diag((*ArraySize)->getBeginLoc(), diag::ext_new_paren_array_nonconst)
2299 << (*ArraySize)->getSourceRange()
2300 << FixItHint::CreateRemoval(TypeIdParens.getBegin())
2301 << FixItHint::CreateRemoval(TypeIdParens.getEnd());
2302
2303 TypeIdParens = SourceRange();
2304 }
2305
2306 // Note that we do *not* convert the argument in any way. It can
2307 // be signed, larger than size_t, whatever.
2308 }
2309
2310 FunctionDecl *OperatorNew = nullptr;
2311 FunctionDecl *OperatorDelete = nullptr;
2312 unsigned Alignment =
2313 AllocType->isDependentType() ? 0 : Context.getTypeAlign(AllocType);
2314 unsigned NewAlignment = Context.getTargetInfo().getNewAlign();
2315 bool PassAlignment = getLangOpts().AlignedAllocation &&
2316 Alignment > NewAlignment;
2317
2318 if (CheckArgsForPlaceholders(PlacementArgs))
2319 return ExprError();
2320
2322 if (!AllocType->isDependentType() &&
2323 !Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
2325 StartLoc, SourceRange(PlacementLParen, PlacementRParen), Scope, Scope,
2326 AllocType, ArraySize.has_value(), PassAlignment, PlacementArgs,
2327 OperatorNew, OperatorDelete))
2328 return ExprError();
2329
2330 // If this is an array allocation, compute whether the usual array
2331 // deallocation function for the type has a size_t parameter.
2332 bool UsualArrayDeleteWantsSize = false;
2333 if (ArraySize && !AllocType->isDependentType())
2334 UsualArrayDeleteWantsSize =
2335 doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType);
2336
2337 SmallVector<Expr *, 8> AllPlaceArgs;
2338 if (OperatorNew) {
2339 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
2340 VariadicCallType CallType = Proto->isVariadic() ? VariadicFunction
2342
2343 // We've already converted the placement args, just fill in any default
2344 // arguments. Skip the first parameter because we don't have a corresponding
2345 // argument. Skip the second parameter too if we're passing in the
2346 // alignment; we've already filled it in.
2347 unsigned NumImplicitArgs = PassAlignment ? 2 : 1;
2348 if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto,
2349 NumImplicitArgs, PlacementArgs, AllPlaceArgs,
2350 CallType))
2351 return ExprError();
2352
2353 if (!AllPlaceArgs.empty())
2354 PlacementArgs = AllPlaceArgs;
2355
2356 // We would like to perform some checking on the given `operator new` call,
2357 // but the PlacementArgs does not contain the implicit arguments,
2358 // namely allocation size and maybe allocation alignment,
2359 // so we need to conjure them.
2360
2361 QualType SizeTy = Context.getSizeType();
2362 unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
2363
2364 llvm::APInt SingleEltSize(
2365 SizeTyWidth, Context.getTypeSizeInChars(AllocType).getQuantity());
2366
2367 // How many bytes do we want to allocate here?
2368 std::optional<llvm::APInt> AllocationSize;
2369 if (!ArraySize && !AllocType->isDependentType()) {
2370 // For non-array operator new, we only want to allocate one element.
2371 AllocationSize = SingleEltSize;
2372 } else if (KnownArraySize && !AllocType->isDependentType()) {
2373 // For array operator new, only deal with static array size case.
2374 bool Overflow;
2375 AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
2376 .umul_ov(SingleEltSize, Overflow);
2377 (void)Overflow;
2378 assert(
2379 !Overflow &&
2380 "Expected that all the overflows would have been handled already.");
2381 }
2382
2383 IntegerLiteral AllocationSizeLiteral(
2384 Context, AllocationSize.value_or(llvm::APInt::getZero(SizeTyWidth)),
2385 SizeTy, SourceLocation());
2386 // Otherwise, if we failed to constant-fold the allocation size, we'll
2387 // just give up and pass-in something opaque, that isn't a null pointer.
2388 OpaqueValueExpr OpaqueAllocationSize(SourceLocation(), SizeTy, VK_PRValue,
2389 OK_Ordinary, /*SourceExpr=*/nullptr);
2390
2391 // Let's synthesize the alignment argument in case we will need it.
2392 // Since we *really* want to allocate these on stack, this is slightly ugly
2393 // because there might not be a `std::align_val_t` type.
2395 QualType AlignValT =
2397 IntegerLiteral AlignmentLiteral(
2398 Context,
2399 llvm::APInt(Context.getTypeSize(SizeTy),
2400 Alignment / Context.getCharWidth()),
2401 SizeTy, SourceLocation());
2402 ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
2403 CK_IntegralCast, &AlignmentLiteral,
2405
2406 // Adjust placement args by prepending conjured size and alignment exprs.
2408 CallArgs.reserve(NumImplicitArgs + PlacementArgs.size());
2409 CallArgs.emplace_back(AllocationSize
2410 ? static_cast<Expr *>(&AllocationSizeLiteral)
2411 : &OpaqueAllocationSize);
2412 if (PassAlignment)
2413 CallArgs.emplace_back(&DesiredAlignment);
2414 CallArgs.insert(CallArgs.end(), PlacementArgs.begin(), PlacementArgs.end());
2415
2416 DiagnoseSentinelCalls(OperatorNew, PlacementLParen, CallArgs);
2417
2418 checkCall(OperatorNew, Proto, /*ThisArg=*/nullptr, CallArgs,
2419 /*IsMemberFunction=*/false, StartLoc, Range, CallType);
2420
2421 // Warn if the type is over-aligned and is being allocated by (unaligned)
2422 // global operator new.
2423 if (PlacementArgs.empty() && !PassAlignment &&
2424 (OperatorNew->isImplicit() ||
2425 (OperatorNew->getBeginLoc().isValid() &&
2426 getSourceManager().isInSystemHeader(OperatorNew->getBeginLoc())))) {
2427 if (Alignment > NewAlignment)
2428 Diag(StartLoc, diag::warn_overaligned_type)
2429 << AllocType
2430 << unsigned(Alignment / Context.getCharWidth())
2431 << unsigned(NewAlignment / Context.getCharWidth());
2432 }
2433 }
2434
2435 // Array 'new' can't have any initializers except empty parentheses.
2436 // Initializer lists are also allowed, in C++11. Rely on the parser for the
2437 // dialect distinction.
2438 if (ArraySize && !isLegalArrayNewInitializer(InitStyle, Initializer,
2440 SourceRange InitRange(Exprs.front()->getBeginLoc(),
2441 Exprs.back()->getEndLoc());
2442 Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
2443 return ExprError();
2444 }
2445
2446 // If we can perform the initialization, and we've not already done so,
2447 // do it now.
2448 if (!AllocType->isDependentType() &&
2450 // The type we initialize is the complete type, including the array bound.
2451 QualType InitType;
2452 if (KnownArraySize)
2453 InitType = Context.getConstantArrayType(
2454 AllocType,
2455 llvm::APInt(Context.getTypeSize(Context.getSizeType()),
2456 *KnownArraySize),
2457 *ArraySize, ArraySizeModifier::Normal, 0);
2458 else if (ArraySize)
2459 InitType = Context.getIncompleteArrayType(AllocType,
2461 else
2462 InitType = AllocType;
2463
2464 InitializedEntity Entity
2465 = InitializedEntity::InitializeNew(StartLoc, InitType);
2466 InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
2467 ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind, Exprs);
2468 if (FullInit.isInvalid())
2469 return ExprError();
2470
2471 // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
2472 // we don't want the initialized object to be destructed.
2473 // FIXME: We should not create these in the first place.
2474 if (CXXBindTemporaryExpr *Binder =
2475 dyn_cast_or_null<CXXBindTemporaryExpr>(FullInit.get()))
2476 FullInit = Binder->getSubExpr();
2477
2478 Initializer = FullInit.get();
2479
2480 // FIXME: If we have a KnownArraySize, check that the array bound of the
2481 // initializer is no greater than that constant value.
2482
2483 if (ArraySize && !*ArraySize) {
2484 auto *CAT = Context.getAsConstantArrayType(Initializer->getType());
2485 if (CAT) {
2486 // FIXME: Track that the array size was inferred rather than explicitly
2487 // specified.
2488 ArraySize = IntegerLiteral::Create(
2489 Context, CAT->getSize(), Context.getSizeType(), TypeRange.getEnd());
2490 } else {
2491 Diag(TypeRange.getEnd(), diag::err_new_array_size_unknown_from_init)
2492 << Initializer->getSourceRange();
2493 }
2494 }
2495 }
2496
2497 // Mark the new and delete operators as referenced.
2498 if (OperatorNew) {
2499 if (DiagnoseUseOfDecl(OperatorNew, StartLoc))
2500 return ExprError();
2501 MarkFunctionReferenced(StartLoc, OperatorNew);
2502 }
2503 if (OperatorDelete) {
2504 if (DiagnoseUseOfDecl(OperatorDelete, StartLoc))
2505 return ExprError();
2506 MarkFunctionReferenced(StartLoc, OperatorDelete);
2507 }
2508
2509 return CXXNewExpr::Create(Context, UseGlobal, OperatorNew, OperatorDelete,
2510 PassAlignment, UsualArrayDeleteWantsSize,
2511 PlacementArgs, TypeIdParens, ArraySize, InitStyle,
2512 Initializer, ResultType, AllocTypeInfo, Range,
2513 DirectInitRange);
2514}
2515
2517 SourceRange R) {
2518 // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
2519 // abstract class type or array thereof.
2520 if (AllocType->isFunctionType())
2521 return Diag(Loc, diag::err_bad_new_type)
2522 << AllocType << 0 << R;
2523 else if (AllocType->isReferenceType())
2524 return Diag(Loc, diag::err_bad_new_type)
2525 << AllocType << 1 << R;
2526 else if (!AllocType->isDependentType() &&
2528 Loc, AllocType, diag::err_new_incomplete_or_sizeless_type, R))
2529 return true;
2530 else if (RequireNonAbstractType(Loc, AllocType,
2531 diag::err_allocation_of_abstract_type))
2532 return true;
2533 else if (AllocType->isVariablyModifiedType())
2534 return Diag(Loc, diag::err_variably_modified_new_type)
2535 << AllocType;
2536 else if (AllocType.getAddressSpace() != LangAS::Default &&
2537 !getLangOpts().OpenCLCPlusPlus)
2538 return Diag(Loc, diag::err_address_space_qualified_new)
2539 << AllocType.getUnqualifiedType()
2541 else if (getLangOpts().ObjCAutoRefCount) {
2542 if (const ArrayType *AT = Context.getAsArrayType(AllocType)) {
2543 QualType BaseAllocType = Context.getBaseElementType(AT);
2544 if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
2545 BaseAllocType->isObjCLifetimeType())
2546 return Diag(Loc, diag::err_arc_new_array_without_ownership)
2547 << BaseAllocType;
2548 }
2549 }
2550
2551 return false;
2552}
2553
2556 bool &PassAlignment, FunctionDecl *&Operator,
2557 OverloadCandidateSet *AlignedCandidates, Expr *AlignArg, bool Diagnose) {
2558 OverloadCandidateSet Candidates(R.getNameLoc(),
2560 for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
2561 Alloc != AllocEnd; ++Alloc) {
2562 // Even member operator new/delete are implicitly treated as
2563 // static, so don't use AddMemberCandidate.
2564 NamedDecl *D = (*Alloc)->getUnderlyingDecl();
2565
2566 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
2567 S.AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(),
2568 /*ExplicitTemplateArgs=*/nullptr, Args,
2569 Candidates,
2570 /*SuppressUserConversions=*/false);
2571 continue;
2572 }
2573
2574 FunctionDecl *Fn = cast<FunctionDecl>(D);
2575 S.AddOverloadCandidate(Fn, Alloc.getPair(), Args, Candidates,
2576 /*SuppressUserConversions=*/false);
2577 }
2578
2579 // Do the resolution.
2581 switch (Candidates.BestViableFunction(S, R.getNameLoc(), Best)) {
2582 case OR_Success: {
2583 // Got one!
2584 FunctionDecl *FnDecl = Best->Function;
2586 Best->FoundDecl) == Sema::AR_inaccessible)
2587 return true;
2588
2589 Operator = FnDecl;
2590 return false;
2591 }
2592
2594 // C++17 [expr.new]p13:
2595 // If no matching function is found and the allocated object type has
2596 // new-extended alignment, the alignment argument is removed from the
2597 // argument list, and overload resolution is performed again.
2598 if (PassAlignment) {
2599 PassAlignment = false;
2600 AlignArg = Args[1];
2601 Args.erase(Args.begin() + 1);
2602 return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
2603 Operator, &Candidates, AlignArg,
2604 Diagnose);
2605 }
2606
2607 // MSVC will fall back on trying to find a matching global operator new
2608 // if operator new[] cannot be found. Also, MSVC will leak by not
2609 // generating a call to operator delete or operator delete[], but we
2610 // will not replicate that bug.
2611 // FIXME: Find out how this interacts with the std::align_val_t fallback
2612 // once MSVC implements it.
2613 if (R.getLookupName().getCXXOverloadedOperator() == OO_Array_New &&
2614 S.Context.getLangOpts().MSVCCompat) {
2615 R.clear();
2618 // FIXME: This will give bad diagnostics pointing at the wrong functions.
2619 return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
2620 Operator, /*Candidates=*/nullptr,
2621 /*AlignArg=*/nullptr, Diagnose);
2622 }
2623
2624 if (Diagnose) {
2625 // If this is an allocation of the form 'new (p) X' for some object
2626 // pointer p (or an expression that will decay to such a pointer),
2627 // diagnose the missing inclusion of <new>.
2628 if (!R.isClassLookup() && Args.size() == 2 &&
2629 (Args[1]->getType()->isObjectPointerType() ||
2630 Args[1]->getType()->isArrayType())) {
2631 S.Diag(R.getNameLoc(), diag::err_need_header_before_placement_new)
2632 << R.getLookupName() << Range;
2633 // Listing the candidates is unlikely to be useful; skip it.
2634 return true;
2635 }
2636
2637 // Finish checking all candidates before we note any. This checking can
2638 // produce additional diagnostics so can't be interleaved with our
2639 // emission of notes.
2640 //
2641 // For an aligned allocation, separately check the aligned and unaligned
2642 // candidates with their respective argument lists.
2645 llvm::SmallVector<Expr*, 4> AlignedArgs;
2646 if (AlignedCandidates) {
2647 auto IsAligned = [](OverloadCandidate &C) {
2648 return C.Function->getNumParams() > 1 &&
2649 C.Function->getParamDecl(1)->getType()->isAlignValT();
2650 };
2651 auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
2652
2653 AlignedArgs.reserve(Args.size() + 1);
2654 AlignedArgs.push_back(Args[0]);
2655 AlignedArgs.push_back(AlignArg);
2656 AlignedArgs.append(Args.begin() + 1, Args.end());
2657 AlignedCands = AlignedCandidates->CompleteCandidates(
2658 S, OCD_AllCandidates, AlignedArgs, R.getNameLoc(), IsAligned);
2659
2660 Cands = Candidates.CompleteCandidates(S, OCD_AllCandidates, Args,
2661 R.getNameLoc(), IsUnaligned);
2662 } else {
2663 Cands = Candidates.CompleteCandidates(S, OCD_AllCandidates, Args,
2664 R.getNameLoc());
2665 }
2666
2667 S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
2668 << R.getLookupName() << Range;
2669 if (AlignedCandidates)
2670 AlignedCandidates->NoteCandidates(S, AlignedArgs, AlignedCands, "",
2671 R.getNameLoc());
2672 Candidates.NoteCandidates(S, Args, Cands, "", R.getNameLoc());
2673 }
2674 return true;
2675
2676 case OR_Ambiguous:
2677 if (Diagnose) {
2678 Candidates.NoteCandidates(
2680 S.PDiag(diag::err_ovl_ambiguous_call)
2681 << R.getLookupName() << Range),
2682 S, OCD_AmbiguousCandidates, Args);
2683 }
2684 return true;
2685
2686 case OR_Deleted: {
2687 if (Diagnose)
2689 Candidates, Best->Function, Args);
2690 return true;
2691 }
2692 }
2693 llvm_unreachable("Unreachable, bad result from BestViableFunction");
2694}
2695
2697 AllocationFunctionScope NewScope,
2698 AllocationFunctionScope DeleteScope,
2699 QualType AllocType, bool IsArray,
2700 bool &PassAlignment, MultiExprArg PlaceArgs,
2701 FunctionDecl *&OperatorNew,
2702 FunctionDecl *&OperatorDelete,
2703 bool Diagnose) {
2704 // --- Choosing an allocation function ---
2705 // C++ 5.3.4p8 - 14 & 18
2706 // 1) If looking in AFS_Global scope for allocation functions, only look in
2707 // the global scope. Else, if AFS_Class, only look in the scope of the
2708 // allocated class. If AFS_Both, look in both.
2709 // 2) If an array size is given, look for operator new[], else look for
2710 // operator new.
2711 // 3) The first argument is always size_t. Append the arguments from the
2712 // placement form.
2713
2714 SmallVector<Expr*, 8> AllocArgs;
2715 AllocArgs.reserve((PassAlignment ? 2 : 1) + PlaceArgs.size());
2716
2717 // We don't care about the actual value of these arguments.
2718 // FIXME: Should the Sema create the expression and embed it in the syntax
2719 // tree? Or should the consumer just recalculate the value?
2720 // FIXME: Using a dummy value will interact poorly with attribute enable_if.
2721 QualType SizeTy = Context.getSizeType();
2722 unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
2723 IntegerLiteral Size(Context, llvm::APInt::getZero(SizeTyWidth), SizeTy,
2724 SourceLocation());
2725 AllocArgs.push_back(&Size);
2726
2727 QualType AlignValT = Context.VoidTy;
2728 if (PassAlignment) {
2731 }
2732 CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
2733 if (PassAlignment)
2734 AllocArgs.push_back(&Align);
2735
2736 AllocArgs.insert(AllocArgs.end(), PlaceArgs.begin(), PlaceArgs.end());
2737
2738 // C++ [expr.new]p8:
2739 // If the allocated type is a non-array type, the allocation
2740 // function's name is operator new and the deallocation function's
2741 // name is operator delete. If the allocated type is an array
2742 // type, the allocation function's name is operator new[] and the
2743 // deallocation function's name is operator delete[].
2745 IsArray ? OO_Array_New : OO_New);
2746
2747 QualType AllocElemType = Context.getBaseElementType(AllocType);
2748
2749 // Find the allocation function.
2750 {
2751 LookupResult R(*this, NewName, StartLoc, LookupOrdinaryName);
2752
2753 // C++1z [expr.new]p9:
2754 // If the new-expression begins with a unary :: operator, the allocation
2755 // function's name is looked up in the global scope. Otherwise, if the
2756 // allocated type is a class type T or array thereof, the allocation
2757 // function's name is looked up in the scope of T.
2758 if (AllocElemType->isRecordType() && NewScope != AFS_Global)
2759 LookupQualifiedName(R, AllocElemType->getAsCXXRecordDecl());
2760
2761 // We can see ambiguity here if the allocation function is found in
2762 // multiple base classes.
2763 if (R.isAmbiguous())
2764 return true;
2765
2766 // If this lookup fails to find the name, or if the allocated type is not
2767 // a class type, the allocation function's name is looked up in the
2768 // global scope.
2769 if (R.empty()) {
2770 if (NewScope == AFS_Class)
2771 return true;
2772
2774 }
2775
2776 if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
2777 if (PlaceArgs.empty()) {
2778 Diag(StartLoc, diag::err_openclcxx_not_supported) << "default new";
2779 } else {
2780 Diag(StartLoc, diag::err_openclcxx_placement_new);
2781 }
2782 return true;
2783 }
2784
2785 assert(!R.empty() && "implicitly declared allocation functions not found");
2786 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
2787
2788 // We do our own custom access checks below.
2790
2791 if (resolveAllocationOverload(*this, R, Range, AllocArgs, PassAlignment,
2792 OperatorNew, /*Candidates=*/nullptr,
2793 /*AlignArg=*/nullptr, Diagnose))
2794 return true;
2795 }
2796
2797 // We don't need an operator delete if we're running under -fno-exceptions.
2798 if (!getLangOpts().Exceptions) {
2799 OperatorDelete = nullptr;
2800 return false;
2801 }
2802
2803 // Note, the name of OperatorNew might have been changed from array to
2804 // non-array by resolveAllocationOverload.
2806 OperatorNew->getDeclName().getCXXOverloadedOperator() == OO_Array_New
2807 ? OO_Array_Delete
2808 : OO_Delete);
2809
2810 // C++ [expr.new]p19:
2811 //
2812 // If the new-expression begins with a unary :: operator, the
2813 // deallocation function's name is looked up in the global
2814 // scope. Otherwise, if the allocated type is a class type T or an
2815 // array thereof, the deallocation function's name is looked up in
2816 // the scope of T. If this lookup fails to find the name, or if
2817 // the allocated type is not a class type or array thereof, the
2818 // deallocation function's name is looked up in the global scope.
2819 LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
2820 if (AllocElemType->isRecordType() && DeleteScope != AFS_Global) {
2821 auto *RD =
2822 cast<CXXRecordDecl>(AllocElemType->castAs<RecordType>()->getDecl());
2823 LookupQualifiedName(FoundDelete, RD);
2824 }
2825 if (FoundDelete.isAmbiguous())
2826 return true; // FIXME: clean up expressions?
2827
2828 // Filter out any destroying operator deletes. We can't possibly call such a
2829 // function in this context, because we're handling the case where the object
2830 // was not successfully constructed.
2831 // FIXME: This is not covered by the language rules yet.
2832 {
2833 LookupResult::Filter Filter = FoundDelete.makeFilter();
2834 while (Filter.hasNext()) {
2835 auto *FD = dyn_cast<FunctionDecl>(Filter.next()->getUnderlyingDecl());
2836 if (FD && FD->isDestroyingOperatorDelete())
2837 Filter.erase();
2838 }
2839 Filter.done();
2840 }
2841
2842 bool FoundGlobalDelete = FoundDelete.empty();
2843 if (FoundDelete.empty()) {
2844 FoundDelete.clear(LookupOrdinaryName);
2845
2846 if (DeleteScope == AFS_Class)
2847 return true;
2848
2851 }
2852
2853 FoundDelete.suppressDiagnostics();
2854
2856
2857 // Whether we're looking for a placement operator delete is dictated
2858 // by whether we selected a placement operator new, not by whether
2859 // we had explicit placement arguments. This matters for things like
2860 // struct A { void *operator new(size_t, int = 0); ... };
2861 // A *a = new A()
2862 //
2863 // We don't have any definition for what a "placement allocation function"
2864 // is, but we assume it's any allocation function whose
2865 // parameter-declaration-clause is anything other than (size_t).
2866 //
2867 // FIXME: Should (size_t, std::align_val_t) also be considered non-placement?
2868 // This affects whether an exception from the constructor of an overaligned
2869 // type uses the sized or non-sized form of aligned operator delete.
2870 bool isPlacementNew = !PlaceArgs.empty() || OperatorNew->param_size() != 1 ||
2871 OperatorNew->isVariadic();
2872
2873 if (isPlacementNew) {
2874 // C++ [expr.new]p20:
2875 // A declaration of a placement deallocation function matches the
2876 // declaration of a placement allocation function if it has the
2877 // same number of parameters and, after parameter transformations
2878 // (8.3.5), all parameter types except the first are
2879 // identical. [...]
2880 //
2881 // To perform this comparison, we compute the function type that
2882 // the deallocation function should have, and use that type both
2883 // for template argument deduction and for comparison purposes.
2884 QualType ExpectedFunctionType;
2885 {
2886 auto *Proto = OperatorNew->getType()->castAs<FunctionProtoType>();
2887
2888 SmallVector<QualType, 4> ArgTypes;
2889 ArgTypes.push_back(Context.VoidPtrTy);
2890 for (unsigned I = 1, N = Proto->getNumParams(); I < N; ++I)
2891 ArgTypes.push_back(Proto->getParamType(I));
2892
2894 // FIXME: This is not part of the standard's rule.
2895 EPI.Variadic = Proto->isVariadic();
2896
2897 ExpectedFunctionType
2898 = Context.getFunctionType(Context.VoidTy, ArgTypes, EPI);
2899 }
2900
2901 for (LookupResult::iterator D = FoundDelete.begin(),
2902 DEnd = FoundDelete.end();
2903 D != DEnd; ++D) {
2904 FunctionDecl *Fn = nullptr;
2905 if (FunctionTemplateDecl *FnTmpl =
2906 dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) {
2907 // Perform template argument deduction to try to match the
2908 // expected function type.
2909 TemplateDeductionInfo Info(StartLoc);
2910 if (DeduceTemplateArguments(FnTmpl, nullptr, ExpectedFunctionType, Fn,
2912 continue;
2913 } else
2914 Fn = cast<FunctionDecl>((*D)->getUnderlyingDecl());
2915
2916 if (Context.hasSameType(adjustCCAndNoReturn(Fn->getType(),
2917 ExpectedFunctionType,
2918 /*AdjustExcpetionSpec*/true),
2919 ExpectedFunctionType))
2920 Matches.push_back(std::make_pair(D.getPair(), Fn));
2921 }
2922
2923 if (getLangOpts().CUDA)
2924 CUDA().EraseUnwantedMatches(getCurFunctionDecl(/*AllowLambda=*/true),
2925 Matches);
2926 } else {
2927 // C++1y [expr.new]p22:
2928 // For a non-placement allocation function, the normal deallocation
2929 // function lookup is used
2930 //
2931 // Per [expr.delete]p10, this lookup prefers a member operator delete
2932 // without a size_t argument, but prefers a non-member operator delete
2933 // with a size_t where possible (which it always is in this case).
2935 UsualDeallocFnInfo Selected = resolveDeallocationOverload(
2936 *this, FoundDelete, /*WantSize*/ FoundGlobalDelete,
2937 /*WantAlign*/ hasNewExtendedAlignment(*this, AllocElemType),
2938 &BestDeallocFns);
2939 if (Selected)
2940 Matches.push_back(std::make_pair(Selected.Found, Selected.FD));
2941 else {
2942 // If we failed to select an operator, all remaining functions are viable
2943 // but ambiguous.
2944 for (auto Fn : BestDeallocFns)
2945 Matches.push_back(std::make_pair(Fn.Found, Fn.FD));
2946 }
2947 }
2948
2949 // C++ [expr.new]p20:
2950 // [...] If the lookup finds a single matching deallocation
2951 // function, that function will be called; otherwise, no
2952 // deallocation function will be called.
2953 if (Matches.size() == 1) {
2954 OperatorDelete = Matches[0].second;
2955
2956 // C++1z [expr.new]p23:
2957 // If the lookup finds a usual deallocation function (3.7.4.2)
2958 // with a parameter of type std::size_t and that function, considered
2959 // as a placement deallocation function, would have been
2960 // selected as a match for the allocation function, the program
2961 // is ill-formed.
2962 if (getLangOpts().CPlusPlus11 && isPlacementNew &&
2963 isNonPlacementDeallocationFunction(*this, OperatorDelete)) {
2964 UsualDeallocFnInfo Info(*this,
2965 DeclAccessPair::make(OperatorDelete, AS_public));
2966 // Core issue, per mail to core reflector, 2016-10-09:
2967 // If this is a member operator delete, and there is a corresponding
2968 // non-sized member operator delete, this isn't /really/ a sized
2969 // deallocation function, it just happens to have a size_t parameter.
2970 bool IsSizedDelete = Info.HasSizeT;
2971 if (IsSizedDelete && !FoundGlobalDelete) {
2972 auto NonSizedDelete =
2973 resolveDeallocationOverload(*this, FoundDelete, /*WantSize*/false,
2974 /*WantAlign*/Info.HasAlignValT);
2975 if (NonSizedDelete && !NonSizedDelete.HasSizeT &&
2976 NonSizedDelete.HasAlignValT == Info.HasAlignValT)
2977 IsSizedDelete = false;
2978 }
2979
2980 if (IsSizedDelete) {
2981 SourceRange R = PlaceArgs.empty()
2982 ? SourceRange()
2983 : SourceRange(PlaceArgs.front()->getBeginLoc(),
2984 PlaceArgs.back()->getEndLoc());
2985 Diag(StartLoc, diag::err_placement_new_non_placement_delete) << R;
2986 if (!OperatorDelete->isImplicit())
2987 Diag(OperatorDelete->getLocation(), diag::note_previous_decl)
2988 << DeleteName;
2989 }
2990 }
2991
2992 CheckAllocationAccess(StartLoc, Range, FoundDelete.getNamingClass(),
2993 Matches[0].first);
2994 } else if (!Matches.empty()) {
2995 // We found multiple suitable operators. Per [expr.new]p20, that means we
2996 // call no 'operator delete' function, but we should at least warn the user.
2997 // FIXME: Suppress this warning if the construction cannot throw.
2998 Diag(StartLoc, diag::warn_ambiguous_suitable_delete_function_found)
2999 << DeleteName << AllocElemType;
3000
3001 for (auto &Match : Matches)
3002 Diag(Match.second->getLocation(),
3003 diag::note_member_declared_here) << DeleteName;
3004 }
3005
3006 return false;
3007}
3008
3011 return;
3012
3013 // The implicitly declared new and delete operators
3014 // are not supported in OpenCL.
3015 if (getLangOpts().OpenCLCPlusPlus)
3016 return;
3017
3018 // C++ [basic.stc.dynamic.general]p2:
3019 // The library provides default definitions for the global allocation
3020 // and deallocation functions. Some global allocation and deallocation
3021 // functions are replaceable ([new.delete]); these are attached to the
3022 // global module ([module.unit]).
3023 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3024 PushGlobalModuleFragment(SourceLocation());
3025
3026 // C++ [basic.std.dynamic]p2:
3027 // [...] The following allocation and deallocation functions (18.4) are
3028 // implicitly declared in global scope in each translation unit of a
3029 // program
3030 //
3031 // C++03:
3032 // void* operator new(std::size_t) throw(std::bad_alloc);
3033 // void* operator new[](std::size_t) throw(std::bad_alloc);
3034 // void operator delete(void*) throw();
3035 // void operator delete[](void*) throw();
3036 // C++11:
3037 // void* operator new(std::size_t);
3038 // void* operator new[](std::size_t);
3039 // void operator delete(void*) noexcept;
3040 // void operator delete[](void*) noexcept;
3041 // C++1y:
3042 // void* operator new(std::size_t);
3043 // void* operator new[](std::size_t);
3044 // void operator delete(void*) noexcept;
3045 // void operator delete[](void*) noexcept;
3046 // void operator delete(void*, std::size_t) noexcept;
3047 // void operator delete[](void*, std::size_t) noexcept;
3048 //
3049 // These implicit declarations introduce only the function names operator
3050 // new, operator new[], operator delete, operator delete[].
3051 //
3052 // Here, we need to refer to std::bad_alloc, so we will implicitly declare
3053 // "std" or "bad_alloc" as necessary to form the exception specification.
3054 // However, we do not make these implicit declarations visible to name
3055 // lookup.
3056 if (!StdBadAlloc && !getLangOpts().CPlusPlus11) {
3057 // The "std::bad_alloc" class has not yet been declared, so build it
3058 // implicitly.
3062 &PP.getIdentifierTable().get("bad_alloc"), nullptr);
3063 getStdBadAlloc()->setImplicit(true);
3064
3065 // The implicitly declared "std::bad_alloc" should live in global module
3066 // fragment.
3067 if (TheGlobalModuleFragment) {
3070 getStdBadAlloc()->setLocalOwningModule(TheGlobalModuleFragment);
3071 }
3072 }
3073 if (!StdAlignValT && getLangOpts().AlignedAllocation) {
3074 // The "std::align_val_t" enum class has not yet been declared, so build it
3075 // implicitly.
3076 auto *AlignValT = EnumDecl::Create(
3078 &PP.getIdentifierTable().get("align_val_t"), nullptr, true, true, true);
3079
3080 // The implicitly declared "std::align_val_t" should live in global module
3081 // fragment.
3082 if (TheGlobalModuleFragment) {
3083 AlignValT->setModuleOwnershipKind(
3085 AlignValT->setLocalOwningModule(TheGlobalModuleFragment);
3086 }
3087
3088 AlignValT->setIntegerType(Context.getSizeType());
3089 AlignValT->setPromotionType(Context.getSizeType());
3090 AlignValT->setImplicit(true);
3091
3092 StdAlignValT = AlignValT;
3093 }
3094
3096
3098 QualType SizeT = Context.getSizeType();
3099
3100 auto DeclareGlobalAllocationFunctions = [&](OverloadedOperatorKind Kind,
3101 QualType Return, QualType Param) {
3103 Params.push_back(Param);
3104
3105 // Create up to four variants of the function (sized/aligned).
3106 bool HasSizedVariant = getLangOpts().SizedDeallocation &&
3107 (Kind == OO_Delete || Kind == OO_Array_Delete);
3108 bool HasAlignedVariant = getLangOpts().AlignedAllocation;
3109
3110 int NumSizeVariants = (HasSizedVariant ? 2 : 1);
3111 int NumAlignVariants = (HasAlignedVariant ? 2 : 1);
3112 for (int Sized = 0; Sized < NumSizeVariants; ++Sized) {
3113 if (Sized)
3114 Params.push_back(SizeT);
3115
3116 for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
3117 if (Aligned)
3118 Params.push_back(Context.getTypeDeclType(getStdAlignValT()));
3119
3121 Context.DeclarationNames.getCXXOperatorName(Kind), Return, Params);
3122
3123 if (Aligned)
3124 Params.pop_back();
3125 }
3126 }
3127 };
3128
3129 DeclareGlobalAllocationFunctions(OO_New, VoidPtr, SizeT);
3130 DeclareGlobalAllocationFunctions(OO_Array_New, VoidPtr, SizeT);
3131 DeclareGlobalAllocationFunctions(OO_Delete, Context.VoidTy, VoidPtr);
3132 DeclareGlobalAllocationFunctions(OO_Array_Delete, Context.VoidTy, VoidPtr);
3133
3134 if (getLangOpts().CPlusPlusModules && getCurrentModule())
3135 PopGlobalModuleFragment();
3136}
3137
3138/// DeclareGlobalAllocationFunction - Declares a single implicit global
3139/// allocation function if it doesn't already exist.
3141 QualType Return,
3142 ArrayRef<QualType> Params) {
3144
3145 // Check if this function is already declared.
3146 DeclContext::lookup_result R = GlobalCtx->lookup(Name);
3147 for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end();
3148 Alloc != AllocEnd; ++Alloc) {
3149 // Only look at non-template functions, as it is the predefined,
3150 // non-templated allocation function we are trying to declare here.
3151 if (FunctionDecl *Func = dyn_cast<FunctionDecl>(*Alloc)) {
3152 if (Func->getNumParams() == Params.size()) {
3154 for (auto *P : Func->parameters())
3155 FuncParams.push_back(
3156 Context.getCanonicalType(P->getType().getUnqualifiedType()));
3157 if (llvm::ArrayRef(FuncParams) == Params) {
3158 // Make the function visible to name lookup, even if we found it in
3159 // an unimported module. It either is an implicitly-declared global
3160 // allocation function, or is suppressing that function.
3161 Func->setVisibleDespiteOwningModule();
3162 return;
3163 }
3164 }
3165 }
3166 }
3167
3169 /*IsVariadic=*/false, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
3170
3171 QualType BadAllocType;
3172 bool HasBadAllocExceptionSpec
3173 = (Name.getCXXOverloadedOperator() == OO_New ||
3174 Name.getCXXOverloadedOperator() == OO_Array_New);
3175 if (HasBadAllocExceptionSpec) {
3176 if (!getLangOpts().CPlusPlus11) {
3177 BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
3178 assert(StdBadAlloc && "Must have std::bad_alloc declared");
3180 EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
3181 }
3182 if (getLangOpts().NewInfallible) {
3184 }
3185 } else {
3186 EPI.ExceptionSpec =
3188 }
3189
3190 auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
3191 QualType FnType = Context.getFunctionType(Return, Params, EPI);
3193 Context, GlobalCtx, SourceLocation(), SourceLocation(), Name, FnType,
3194 /*TInfo=*/nullptr, SC_None, getCurFPFeatures().isFPConstrained(), false,
3195 true);
3196 Alloc->setImplicit();
3197 // Global allocation functions should always be visible.
3198 Alloc->setVisibleDespiteOwningModule();
3199
3200 if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible &&
3201 !getLangOpts().CheckNew)
3202 Alloc->addAttr(
3203 ReturnsNonNullAttr::CreateImplicit(Context, Alloc->getLocation()));
3204
3205 // C++ [basic.stc.dynamic.general]p2:
3206 // The library provides default definitions for the global allocation
3207 // and deallocation functions. Some global allocation and deallocation
3208 // functions are replaceable ([new.delete]); these are attached to the
3209 // global module ([module.unit]).
3210 //
3211 // In the language wording, these functions are attched to the global
3212 // module all the time. But in the implementation, the global module
3213 // is only meaningful when we're in a module unit. So here we attach
3214 // these allocation functions to global module conditionally.
3215 if (TheGlobalModuleFragment) {
3216 Alloc->setModuleOwnershipKind(
3218 Alloc->setLocalOwningModule(TheGlobalModuleFragment);
3219 }
3220
3222 Alloc->addAttr(VisibilityAttr::CreateImplicit(
3224 ? VisibilityAttr::Hidden
3226 ? VisibilityAttr::Protected
3227 : VisibilityAttr::Default));
3228
3230 for (QualType T : Params) {
3231 ParamDecls.push_back(ParmVarDecl::Create(
3232 Context, Alloc, SourceLocation(), SourceLocation(), nullptr, T,
3233 /*TInfo=*/nullptr, SC_None, nullptr));
3234 ParamDecls.back()->setImplicit();
3235 }
3236 Alloc->setParams(ParamDecls);
3237 if (ExtraAttr)
3238 Alloc->addAttr(ExtraAttr);
3241 IdResolver.tryAddTopLevelDecl(Alloc, Name);
3242 };
3243
3244 if (!LangOpts.CUDA)
3245 CreateAllocationFunctionDecl(nullptr);
3246 else {
3247 // Host and device get their own declaration so each can be
3248 // defined or re-declared independently.
3249 CreateAllocationFunctionDecl(CUDAHostAttr::CreateImplicit(Context));
3250 CreateAllocationFunctionDecl(CUDADeviceAttr::CreateImplicit(Context));
3251 }
3252}
3253
3255 bool CanProvideSize,
3256 bool Overaligned,
3257 DeclarationName Name) {
3259
3260 LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName);
3262
3263 // FIXME: It's possible for this to result in ambiguity, through a
3264 // user-declared variadic operator delete or the enable_if attribute. We
3265 // should probably not consider those cases to be usual deallocation
3266 // functions. But for now we just make an arbitrary choice in that case.
3267 auto Result = resolveDeallocationOverload(*this, FoundDelete, CanProvideSize,
3268 Overaligned);
3269 assert(Result.FD && "operator delete missing from global scope?");
3270 return Result.FD;
3271}
3272
3274 CXXRecordDecl *RD) {
3276
3277 FunctionDecl *OperatorDelete = nullptr;
3278 if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete))
3279 return nullptr;
3280 if (OperatorDelete)
3281 return OperatorDelete;
3282
3283 // If there's no class-specific operator delete, look up the global
3284 // non-array delete.
3287 Name);
3288}
3289
3291 DeclarationName Name,
3292 FunctionDecl *&Operator, bool Diagnose,
3293 bool WantSize, bool WantAligned) {
3294 LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
3295 // Try to find operator delete/operator delete[] in class scope.
3297
3298 if (Found.isAmbiguous())
3299 return true;
3300
3301 Found.suppressDiagnostics();
3302
3303 bool Overaligned =
3304 WantAligned || hasNewExtendedAlignment(*this, Context.getRecordType(RD));
3305
3306 // C++17 [expr.delete]p10:
3307 // If the deallocation functions have class scope, the one without a
3308 // parameter of type std::size_t is selected.
3310 resolveDeallocationOverload(*this, Found, /*WantSize*/ WantSize,
3311 /*WantAlign*/ Overaligned, &Matches);
3312
3313 // If we could find an overload, use it.
3314 if (Matches.size() == 1) {
3315 Operator = cast<CXXMethodDecl>(Matches[0].FD);
3316
3317 // FIXME: DiagnoseUseOfDecl?
3318 if (Operator->isDeleted()) {
3319 if (Diagnose) {
3320 StringLiteral *Msg = Operator->getDeletedMessage();
3321 Diag(StartLoc, diag::err_deleted_function_use)
3322 << (Msg != nullptr) << (Msg ? Msg->getString() : StringRef());
3323 NoteDeletedFunction(Operator);
3324 }
3325 return true;
3326 }
3327
3328 if (CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(),
3329 Matches[0].Found, Diagnose) == AR_inaccessible)
3330 return true;
3331
3332 return false;
3333 }
3334
3335 // We found multiple suitable operators; complain about the ambiguity.
3336 // FIXME: The standard doesn't say to do this; it appears that the intent
3337 // is that this should never happen.
3338 if (!Matches.empty()) {
3339 if (Diagnose) {
3340 Diag(StartLoc, diag::err_ambiguous_suitable_delete_member_function_found)
3341 << Name << RD;
3342 for (auto &Match : Matches)
3343 Diag(Match.FD->getLocation(), diag::note_member_declared_here) << Name;
3344 }
3345 return true;
3346 }
3347
3348 // We did find operator delete/operator delete[] declarations, but
3349 // none of them were suitable.
3350 if (!Found.empty()) {
3351 if (Diagnose) {
3352 Diag(StartLoc, diag::err_no_suitable_delete_member_function_found)
3353 << Name << RD;
3354
3355 for (NamedDecl *D : Found)
3356 Diag(D->getUnderlyingDecl()->getLocation(),
3357 diag::note_member_declared_here) << Name;
3358 }
3359 return true;
3360 }
3361
3362 Operator = nullptr;
3363 return false;
3364}
3365
3366namespace {
3367/// Checks whether delete-expression, and new-expression used for
3368/// initializing deletee have the same array form.
3369class MismatchingNewDeleteDetector {
3370public:
3371 enum MismatchResult {
3372 /// Indicates that there is no mismatch or a mismatch cannot be proven.
3373 NoMismatch,
3374 /// Indicates that variable is initialized with mismatching form of \a new.
3375 VarInitMismatches,
3376 /// Indicates that member is initialized with mismatching form of \a new.
3377 MemberInitMismatches,
3378 /// Indicates that 1 or more constructors' definitions could not been
3379 /// analyzed, and they will be checked again at the end of translation unit.
3380 AnalyzeLater
3381 };
3382
3383 /// \param EndOfTU True, if this is the final analysis at the end of
3384 /// translation unit. False, if this is the initial analysis at the point
3385 /// delete-expression was encountered.
3386 explicit MismatchingNewDeleteDetector(bool EndOfTU)
3387 : Field(nullptr), IsArrayForm(false), EndOfTU(EndOfTU),
3388 HasUndefinedConstructors(false) {}
3389
3390 /// Checks whether pointee of a delete-expression is initialized with
3391 /// matching form of new-expression.
3392 ///
3393 /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the
3394 /// point where delete-expression is encountered, then a warning will be
3395 /// issued immediately. If return value is \c AnalyzeLater at the point where
3396 /// delete-expression is seen, then member will be analyzed at the end of
3397 /// translation unit. \c AnalyzeLater is returned iff at least one constructor
3398 /// couldn't be analyzed. If at least one constructor initializes the member
3399 /// with matching type of new, the return value is \c NoMismatch.
3400 MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE);
3401 /// Analyzes a class member.
3402 /// \param Field Class member to analyze.
3403 /// \param DeleteWasArrayForm Array form-ness of the delete-expression used
3404 /// for deleting the \p Field.
3405 MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm);
3407 /// List of mismatching new-expressions used for initialization of the pointee
3409 /// Indicates whether delete-expression was in array form.
3410 bool IsArrayForm;
3411
3412private:
3413 const bool EndOfTU;
3414 /// Indicates that there is at least one constructor without body.
3415 bool HasUndefinedConstructors;
3416 /// Returns \c CXXNewExpr from given initialization expression.
3417 /// \param E Expression used for initializing pointee in delete-expression.
3418 /// E can be a single-element \c InitListExpr consisting of new-expression.
3419 const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E);
3420 /// Returns whether member is initialized with mismatching form of
3421 /// \c new either by the member initializer or in-class initialization.
3422 ///
3423 /// If bodies of all constructors are not visible at the end of translation
3424 /// unit or at least one constructor initializes member with the matching
3425 /// form of \c new, mismatch cannot be proven, and this function will return
3426 /// \c NoMismatch.
3427 MismatchResult analyzeMemberExpr(const MemberExpr *ME);
3428 /// Returns whether variable is initialized with mismatching form of
3429 /// \c new.
3430 ///
3431 /// If variable is initialized with matching form of \c new or variable is not
3432 /// initialized with a \c new expression, this function will return true.
3433 /// If variable is initialized with mismatching form of \c new, returns false.
3434 /// \param D Variable to analyze.
3435 bool hasMatchingVarInit(const DeclRefExpr *D);
3436 /// Checks whether the constructor initializes pointee with mismatching
3437 /// form of \c new.
3438 ///
3439 /// Returns true, if member is initialized with matching form of \c new in
3440 /// member initializer list. Returns false, if member is initialized with the
3441 /// matching form of \c new in this constructor's initializer or given
3442 /// constructor isn't defined at the point where delete-expression is seen, or
3443 /// member isn't initialized by the constructor.
3444 bool hasMatchingNewInCtor(const CXXConstructorDecl *CD);
3445 /// Checks whether member is initialized with matching form of
3446 /// \c new in member initializer list.
3447 bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI);
3448 /// Checks whether member is initialized with mismatching form of \c new by
3449 /// in-class initializer.
3450 MismatchResult analyzeInClassInitializer();
3451};
3452}
3453
3454MismatchingNewDeleteDetector::MismatchResult
3455MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) {
3456 NewExprs.clear();
3457 assert(DE && "Expected delete-expression");
3458 IsArrayForm = DE->isArrayForm();
3459 const Expr *E = DE->getArgument()->IgnoreParenImpCasts();
3460 if (const MemberExpr *ME = dyn_cast<const MemberExpr>(E)) {
3461 return analyzeMemberExpr(ME);
3462 } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(E)) {
3463 if (!hasMatchingVarInit(D))
3464 return VarInitMismatches;
3465 }
3466 return NoMismatch;
3467}
3468
3469const CXXNewExpr *
3470MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) {
3471 assert(E != nullptr && "Expected a valid initializer expression");
3472 E = E->IgnoreParenImpCasts();
3473 if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(E)) {
3474 if (ILE->getNumInits() == 1)
3475 E = dyn_cast<const CXXNewExpr>(ILE->getInit(0)->IgnoreParenImpCasts());
3476 }
3477
3478 return dyn_cast_or_null<const CXXNewExpr>(E);
3479}
3480
3481bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit(
3482 const CXXCtorInitializer *CI) {
3483 const CXXNewExpr *NE = nullptr;
3484 if (Field == CI->getMember() &&
3485 (NE = getNewExprFromInitListOrExpr(CI->getInit()))) {
3486 if (NE->isArray() == IsArrayForm)
3487 return true;
3488 else
3489 NewExprs.push_back(NE);
3490 }
3491 return false;
3492}
3493
3494bool MismatchingNewDeleteDetector::hasMatchingNewInCtor(
3495 const CXXConstructorDecl *CD) {
3496 if (CD->isImplicit())
3497 return false;
3498 const FunctionDecl *Definition = CD;
3500 HasUndefinedConstructors = true;
3501 return EndOfTU;
3502 }
3503 for (const auto *CI : cast<const CXXConstructorDecl>(Definition)->inits()) {
3504 if (hasMatchingNewInCtorInit(CI))
3505 return true;
3506 }
3507 return false;
3508}
3509
3510MismatchingNewDeleteDetector::MismatchResult
3511MismatchingNewDeleteDetector::analyzeInClassInitializer() {
3512 assert(Field != nullptr && "This should be called only for members");
3513 const Expr *InitExpr = Field->getInClassInitializer();
3514 if (!InitExpr)
3515 return EndOfTU ? NoMismatch : AnalyzeLater;
3516 if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(InitExpr)) {
3517 if (NE->isArray() != IsArrayForm) {
3518 NewExprs.push_back(NE);
3519 return MemberInitMismatches;
3520 }
3521 }
3522 return NoMismatch;
3523}
3524
3525MismatchingNewDeleteDetector::MismatchResult
3526MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field,
3527 bool DeleteWasArrayForm) {
3528 assert(Field != nullptr && "Analysis requires a valid class member.");
3529 this->Field = Field;
3530 IsArrayForm = DeleteWasArrayForm;
3531 const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Field->getParent());
3532 for (const auto *CD : RD->ctors()) {
3533 if (hasMatchingNewInCtor(CD))
3534 return NoMismatch;
3535 }
3536 if (HasUndefinedConstructors)
3537 return EndOfTU ? NoMismatch : AnalyzeLater;
3538 if (!NewExprs.empty())
3539 return MemberInitMismatches;
3540 return Field->hasInClassInitializer() ? analyzeInClassInitializer()
3541 : NoMismatch;
3542}
3543
3544MismatchingNewDeleteDetector::MismatchResult
3545MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) {
3546 assert(ME != nullptr && "Expected a member expression");
3547 if (FieldDecl *F = dyn_cast<FieldDecl>(ME->getMemberDecl()))
3548 return analyzeField(F, IsArrayForm);
3549 return NoMismatch;
3550}
3551
3552bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) {
3553 const CXXNewExpr *NE = nullptr;
3554 if (const VarDecl *VD = dyn_cast<const VarDecl>(D->getDecl())) {
3555 if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(VD->getInit())) &&
3556 NE->isArray() != IsArrayForm) {
3557 NewExprs.push_back(NE);
3558 }
3559 }
3560 return NewExprs.empty();
3561}
3562
3563static void
3565 const MismatchingNewDeleteDetector &Detector) {
3566 SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(DeleteLoc);
3567 FixItHint H;
3568 if (!Detector.IsArrayForm)
3569 H = FixItHint::CreateInsertion(EndOfDelete, "[]");
3570 else {
3572 DeleteLoc, tok::l_square, SemaRef.getSourceManager(),
3573 SemaRef.getLangOpts(), true);
3574 if (RSquare.isValid())
3575 H = FixItHint::CreateRemoval(SourceRange(EndOfDelete, RSquare));
3576 }
3577 SemaRef.Diag(DeleteLoc, diag::warn_mismatched_delete_new)
3578 << Detector.IsArrayForm << H;
3579
3580 for (const auto *NE : Detector.NewExprs)
3581 SemaRef.Diag(NE->getExprLoc(), diag::note_allocated_here)
3582 << Detector.IsArrayForm;
3583}
3584
3585void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) {
3586 if (Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation()))
3587 return;
3588 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false);
3589 switch (Detector.analyzeDeleteExpr(DE)) {
3590 case MismatchingNewDeleteDetector::VarInitMismatches:
3591 case MismatchingNewDeleteDetector::MemberInitMismatches: {
3592 DiagnoseMismatchedNewDelete(*this, DE->getBeginLoc(), Detector);
3593 break;
3594 }
3595 case MismatchingNewDeleteDetector::AnalyzeLater: {
3596 DeleteExprs[Detector.Field].push_back(
3597 std::make_pair(DE->getBeginLoc(), DE->isArrayForm()));
3598 break;
3599 }
3600 case MismatchingNewDeleteDetector::NoMismatch:
3601 break;
3602 }
3603}
3604
3605void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
3606 bool DeleteWasArrayForm) {
3607 MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true);
3608 switch (Detector.analyzeField(Field, DeleteWasArrayForm)) {
3609 case MismatchingNewDeleteDetector::VarInitMismatches:
3610 llvm_unreachable("This analysis should have been done for class members.");
3611 case MismatchingNewDeleteDetector::AnalyzeLater:
3612 llvm_unreachable("Analysis cannot be postponed any point beyond end of "
3613 "translation unit.");
3614 case MismatchingNewDeleteDetector::MemberInitMismatches:
3615 DiagnoseMismatchedNewDelete(*this, DeleteLoc, Detector);
3616 break;
3617 case MismatchingNewDeleteDetector::NoMismatch:
3618 break;
3619 }
3620}
3621
3623Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
3624 bool ArrayForm, Expr *ExE) {
3625 // C++ [expr.delete]p1:
3626 // The operand shall have a pointer type, or a class type having a single
3627 // non-explicit conversion function to a pointer type. The result has type
3628 // void.
3629 //
3630 // DR599 amends "pointer type" to "pointer to object type" in both cases.
3631
3632 ExprResult Ex = ExE;
3633 FunctionDecl *OperatorDelete = nullptr;
3634 bool ArrayFormAsWritten = ArrayForm;
3635 bool UsualArrayDeleteWantsSize = false;
3636
3637 if (!Ex.get()->isTypeDependent()) {
3638 // Perform lvalue-to-rvalue cast, if needed.
3639 Ex = DefaultLvalueConversion(Ex.get());
3640 if (Ex.isInvalid())
3641 return ExprError();
3642
3643 QualType Type = Ex.get()->getType();
3644
3645 class DeleteConverter : public ContextualImplicitConverter {
3646 public:
3647 DeleteConverter() : ContextualImplicitConverter(false, true) {}
3648
3649 bool match(QualType ConvType) override {
3650 // FIXME: If we have an operator T* and an operator void*, we must pick
3651 // the operator T*.
3652 if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
3653 if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
3654 return true;
3655 return false;
3656 }
3657
3658 SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc,
3659 QualType T) override {
3660 return S.Diag(Loc, diag::err_delete_operand) << T;
3661 }
3662
3663 SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
3664 QualType T) override {
3665 return S.Diag(Loc, diag::err_delete_incomplete_class_type) << T;
3666 }
3667
3668 SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
3669 QualType T,
3670 QualType ConvTy) override {
3671 return S.Diag(Loc, diag::err_delete_explicit_conversion) << T << ConvTy;
3672 }
3673
3674 SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv,
3675 QualType ConvTy) override {
3676 return S.Diag(Conv->getLocation(), diag::note_delete_conversion)
3677 << ConvTy;
3678 }
3679
3680 SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
3681 QualType T) override {
3682 return S.Diag(Loc, diag::err_ambiguous_delete_operand) << T;
3683 }
3684
3685 SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
3686 QualType ConvTy) override {
3687 return S.Diag(Conv->getLocation(), diag::note_delete_conversion)
3688 << ConvTy;
3689 }
3690
3691 SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
3692 QualType T,
3693 QualType ConvTy) override {
3694 llvm_unreachable("conversion functions are permitted");
3695 }
3696 } Converter;
3697
3698 Ex = PerformContextualImplicitConversion(StartLoc, Ex.get(), Converter);
3699 if (Ex.isInvalid())
3700 return ExprError();
3701 Type = Ex.get()->getType();
3702 if (!Converter.match(Type))
3703 // FIXME: PerformContextualImplicitConversion should return ExprError
3704 // itself in this case.
3705 return ExprError();
3706
3708 QualType PointeeElem = Context.getBaseElementType(Pointee);
3709
3710 if (Pointee.getAddressSpace() != LangAS::Default &&
3711 !getLangOpts().OpenCLCPlusPlus)
3712 return Diag(Ex.get()->getBeginLoc(),
3713 diag::err_address_space_qualified_delete)
3714 << Pointee.getUnqualifiedType()
3716
3717 CXXRecordDecl *PointeeRD = nullptr;
3718 if (Pointee->isVoidType() && !isSFINAEContext()) {
3719 // The C++ standard bans deleting a pointer to a non-object type, which
3720 // effectively bans deletion of "void*". However, most compilers support
3721 // this, so we treat it as a warning unless we're in a SFINAE context.
3722 // But we still prohibit this since C++26.
3723 Diag(StartLoc, LangOpts.CPlusPlus26 ? diag::err_delete_incomplete
3724 : diag::ext_delete_void_ptr_operand)
3725 << (LangOpts.CPlusPlus26 ? Pointee : Type)
3726 << Ex.get()->getSourceRange();
3727 } else if (Pointee->isFunctionType() || Pointee->isVoidType() ||
3728 Pointee->isSizelessType()) {
3729 return ExprError(Diag(StartLoc, diag::err_delete_operand)
3730 << Type << Ex.get()->getSourceRange());
3731 } else if (!Pointee->isDependentType()) {
3732 // FIXME: This can result in errors if the definition was imported from a
3733 // module but is hidden.
3734 if (!RequireCompleteType(StartLoc, Pointee,
3735 LangOpts.CPlusPlus26
3736 ? diag::err_delete_incomplete
3737 : diag::warn_delete_incomplete,
3738 Ex.get())) {
3739 if (const RecordType *RT = PointeeElem->getAs<RecordType>())
3740 PointeeRD = cast<CXXRecordDecl>(RT->getDecl());
3741 }
3742 }
3743
3744 if (Pointee->isArrayType() && !ArrayForm) {
3745 Diag(StartLoc, diag::warn_delete_array_type)
3746 << Type << Ex.get()->getSourceRange()
3748 ArrayForm = true;
3749 }
3750
3752 ArrayForm ? OO_Array_Delete : OO_Delete);
3753
3754 if (PointeeRD) {
3755 if (!UseGlobal &&
3756 FindDeallocationFunction(StartLoc, PointeeRD, DeleteName,
3757 OperatorDelete))
3758 return ExprError();
3759
3760 // If we're allocating an array of records, check whether the
3761 // usual operator delete[] has a size_t parameter.
3762 if (ArrayForm) {
3763 // If the user specifically asked to use the global allocator,
3764 // we'll need to do the lookup into the class.
3765 if (UseGlobal)
3766 UsualArrayDeleteWantsSize =
3767 doesUsualArrayDeleteWantSize(*this, StartLoc, PointeeElem);
3768
3769 // Otherwise, the usual operator delete[] should be the
3770 // function we just found.
3771 else if (isa_and_nonnull<CXXMethodDecl>(OperatorDelete))
3772 UsualArrayDeleteWantsSize =
3773 UsualDeallocFnInfo(*this,
3774 DeclAccessPair::make(OperatorDelete, AS_public))
3775 .HasSizeT;
3776 }
3777
3778 if (!PointeeRD->hasIrrelevantDestructor())
3779 if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
3780 MarkFunctionReferenced(StartLoc,
3781 const_cast<CXXDestructorDecl*>(Dtor));
3782 if (DiagnoseUseOfDecl(Dtor, StartLoc))
3783 return ExprError();
3784 }
3785
3786 CheckVirtualDtorCall(PointeeRD->getDestructor(), StartLoc,
3787 /*IsDelete=*/true, /*CallCanBeVirtual=*/true,
3788 /*WarnOnNonAbstractTypes=*/!ArrayForm,
3789 SourceLocation());
3790 }
3791
3792 if (!OperatorDelete) {
3793 if (getLangOpts().OpenCLCPlusPlus) {
3794 Diag(StartLoc, diag::err_openclcxx_not_supported) << "default delete";
3795 return ExprError();
3796 }
3797
3798 bool IsComplete = isCompleteType(StartLoc, Pointee);
3799 bool CanProvideSize =
3800 IsComplete && (!ArrayForm || UsualArrayDeleteWantsSize ||
3801 Pointee.isDestructedType());
3802 bool Overaligned = hasNewExtendedAlignment(*this, Pointee);
3803
3804 // Look for a global declaration.
3805 OperatorDelete = FindUsualDeallocationFunction(StartLoc, CanProvideSize,
3806 Overaligned, DeleteName);
3807 }
3808
3809 if (OperatorDelete->isInvalidDecl())
3810 return ExprError();
3811
3812 MarkFunctionReferenced(StartLoc, OperatorDelete);
3813
3814 // Check access and ambiguity of destructor if we're going to call it.
3815 // Note that this is required even for a virtual delete.
3816 bool IsVirtualDelete = false;
3817 if (PointeeRD) {
3818 if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
3819 CheckDestructorAccess(Ex.get()->getExprLoc(), Dtor,
3820 PDiag(diag::err_access_dtor) << PointeeElem);
3821 IsVirtualDelete = Dtor->isVirtual();
3822 }
3823 }
3824
3825 DiagnoseUseOfDecl(OperatorDelete, StartLoc);
3826
3827 // Convert the operand to the type of the first parameter of operator
3828 // delete. This is only necessary if we selected a destroying operator
3829 // delete that we are going to call (non-virtually); converting to void*
3830 // is trivial and left to AST consumers to handle.
3831 QualType ParamType = OperatorDelete->getParamDecl(0)->getType();
3832 if (!IsVirtualDelete && !ParamType->getPointeeType()->isVoidType()) {
3833 Qualifiers Qs = Pointee.getQualifiers();
3834 if (Qs.hasCVRQualifiers()) {
3835 // Qualifiers are irrelevant to this conversion; we're only looking
3836 // for access and ambiguity.
3840 Ex = ImpCastExprToType(Ex.get(), Unqual, CK_NoOp);
3841 }
3842 Ex = PerformImplicitConversion(Ex.get(), ParamType, AA_Passing);
3843 if (Ex.isInvalid())
3844 return ExprError();
3845 }
3846 }
3847
3849 Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten,
3850 UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc);
3851 AnalyzeDeleteExprMismatch(Result);
3852 return Result;
3853}
3854
3856 bool IsDelete,
3857 FunctionDecl *&Operator) {
3858
3860 IsDelete ? OO_Delete : OO_New);
3861
3862 LookupResult R(S, NewName, TheCall->getBeginLoc(), Sema::LookupOrdinaryName);
3864 assert(!R.empty() && "implicitly declared allocation functions not found");
3865 assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
3866
3867 // We do our own custom access checks below.
3869
3870 SmallVector<Expr *, 8> Args(TheCall->arguments());
3871 OverloadCandidateSet Candidates(R.getNameLoc(),
3873 for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
3874 FnOvl != FnOvlEnd; ++FnOvl) {
3875 // Even member operator new/delete are implicitly treated as
3876 // static, so don't use AddMemberCandidate.
3877 NamedDecl *D = (*FnOvl)->getUnderlyingDecl();
3878
3879 if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
3880 S.AddTemplateOverloadCandidate(FnTemplate, FnOvl.getPair(),
3881 /*ExplicitTemplateArgs=*/nullptr, Args,
3882 Candidates,
3883 /*SuppressUserConversions=*/false);
3884 continue;
3885 }
3886
3887 FunctionDecl *Fn = cast<FunctionDecl>(D);
3888 S.AddOverloadCandidate(Fn, FnOvl.getPair(), Args, Candidates,
3889 /*SuppressUserConversions=*/false);
3890 }
3891
3892 SourceRange Range = TheCall->getSourceRange();
3893
3894 // Do the resolution.
3896 switch (Candidates.BestViableFunction(S, R.getNameLoc(), Best)) {
3897 case OR_Success: {
3898 // Got one!
3899 FunctionDecl *FnDecl = Best->Function;
3900 assert(R.getNamingClass() == nullptr &&
3901 "class members should not be considered");
3902
3904 S.Diag(R.getNameLoc(), diag::err_builtin_operator_new_delete_not_usual)
3905 << (IsDelete ? 1 : 0) << Range;
3906 S.Diag(FnDecl->getLocation(), diag::note_non_usual_function_declared_here)
3907 << R.getLookupName() << FnDecl->getSourceRange();
3908 return true;
3909 }
3910
3911 Operator = FnDecl;
3912 return false;
3913 }
3914
3916 Candidates.NoteCandidates(
3918 S.PDiag(diag::err_ovl_no_viable_function_in_call)
3919 << R.getLookupName() << Range),
3920 S, OCD_AllCandidates, Args);
3921 return true;
3922
3923 case OR_Ambiguous:
3924 Candidates.NoteCandidates(
3926 S.PDiag(diag::err_ovl_ambiguous_call)
3927 << R.getLookupName() << Range),
3928 S, OCD_AmbiguousCandidates, Args);
3929 return true;
3930
3931 case OR_Deleted:
3933 Candidates, Best->Function, Args);
3934 return true;
3935 }
3936 llvm_unreachable("Unreachable, bad result from BestViableFunction");
3937}
3938
3939ExprResult Sema::BuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
3940 bool IsDelete) {
3941 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
3942 if (!getLangOpts().CPlusPlus) {
3943 Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language)
3944 << (IsDelete ? "__builtin_operator_delete" : "__builtin_operator_new")
3945 << "C++";
3946 return ExprError();
3947 }
3948 // CodeGen assumes it can find the global new and delete to call,
3949 // so ensure that they are declared.
3951
3952 FunctionDecl *OperatorNewOrDelete = nullptr;
3953 if (resolveBuiltinNewDeleteOverload(*this, TheCall, IsDelete,
3954 OperatorNewOrDelete))
3955 return ExprError();
3956 assert(OperatorNewOrDelete && "should be found");
3957
3958 DiagnoseUseOfDecl(OperatorNewOrDelete, TheCall->getExprLoc());
3959 MarkFunctionReferenced(TheCall->getExprLoc(), OperatorNewOrDelete);
3960
3961 TheCall->setType(OperatorNewOrDelete->getReturnType());
3962 for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
3963 QualType ParamTy = OperatorNewOrDelete->getParamDecl(i)->getType();
3964 InitializedEntity Entity =
3967 Entity, TheCall->getArg(i)->getBeginLoc(), TheCall->getArg(i));
3968 if (Arg.isInvalid())
3969 return ExprError();
3970 TheCall->setArg(i, Arg.get());
3971 }
3972 auto Callee = dyn_cast<ImplicitCastExpr>(TheCall->getCallee());
3973 assert(Callee && Callee->getCastKind() == CK_BuiltinFnToFnPtr &&
3974 "Callee expected to be implicit cast to a builtin function pointer");
3975 Callee->setType(OperatorNewOrDelete->getType());
3976
3977 return TheCallResult;
3978}
3979
3981 bool IsDelete, bool CallCanBeVirtual,
3982 bool WarnOnNonAbstractTypes,
3983 SourceLocation DtorLoc) {
3984 if (!dtor || dtor->isVirtual() || !CallCanBeVirtual || isUnevaluatedContext())
3985 return;
3986
3987 // C++ [expr.delete]p3:
3988 // In the first alternative (delete object), if the static type of the
3989 // object to be deleted is different from its dynamic type, the static
3990 // type shall be a base class of the dynamic type of the object to be
3991 // deleted and the static type shall have a virtual destructor or the
3992 // behavior is undefined.
3993 //
3994 const CXXRecordDecl *PointeeRD = dtor->getParent();
3995 // Note: a final class cannot be derived from, no issue there
3996 if (!PointeeRD->isPolymorphic() || PointeeRD->hasAttr<FinalAttr>())
3997 return;
3998
3999 // If the superclass is in a system header, there's nothing that can be done.
4000 // The `delete` (where we emit the warning) can be in a system header,
4001 // what matters for this warning is where the deleted type is defined.
4002 if (getSourceManager().isInSystemHeader(PointeeRD->getLocation()))
4003 return;
4004
4005 QualType ClassType = dtor->getFunctionObjectParameterType();
4006 if (PointeeRD->isAbstract()) {
4007 // If the class is abstract, we warn by default, because we're
4008 // sure the code has undefined behavior.
4009 Diag(Loc, diag::warn_delete_abstract_non_virtual_dtor) << (IsDelete ? 0 : 1)
4010 << ClassType;
4011 } else if (WarnOnNonAbstractTypes) {
4012 // Otherwise, if this is not an array delete, it's a bit suspect,
4013 // but not necessarily wrong.
4014 Diag(Loc, diag::warn_delete_non_virtual_dtor) << (IsDelete ? 0 : 1)
4015 << ClassType;
4016 }
4017 if (!IsDelete) {
4018 std::string TypeStr;
4019 ClassType.getAsStringInternal(TypeStr, getPrintingPolicy());
4020 Diag(DtorLoc, diag::note_delete_non_virtual)
4021 << FixItHint::CreateInsertion(DtorLoc, TypeStr + "::");
4022 }
4023}
4024
4026 SourceLocation StmtLoc,
4027 ConditionKind CK) {
4028 ExprResult E =
4029 CheckConditionVariable(cast<VarDecl>(ConditionVar), StmtLoc, CK);
4030 if (E.isInvalid())
4031 return ConditionError();
4032 return ConditionResult(*this, ConditionVar, MakeFullExpr(E.get(), StmtLoc),
4034}
4035
4037 SourceLocation StmtLoc,
4038 ConditionKind CK) {
4039 if (ConditionVar->isInvalidDecl())
4040 return ExprError();
4041
4042 QualType T = ConditionVar->getType();
4043
4044 // C++ [stmt.select]p2:
4045 // The declarator shall not specify a function or an array.
4046 if (T->isFunctionType())
4047 return ExprError(Diag(ConditionVar->getLocation(),
4048 diag::err_invalid_use_of_function_type)
4049 << ConditionVar->getSourceRange());
4050 else if (T->isArrayType())
4051 return ExprError(Diag(ConditionVar->getLocation(),
4052 diag::err_invalid_use_of_array_type)
4053 << ConditionVar->getSourceRange());
4054
4056 ConditionVar, ConditionVar->getType().getNonReferenceType(), VK_LValue,
4057 ConditionVar->getLocation());
4058
4059 switch (CK) {
4061 return CheckBooleanCondition(StmtLoc, Condition.get());
4062
4064 return CheckBooleanCondition(StmtLoc, Condition.get(), true);
4065
4067 return CheckSwitchCondition(StmtLoc, Condition.get());
4068 }
4069
4070 llvm_unreachable("unexpected condition kind");
4071}
4072
4073ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
4074 // C++11 6.4p4:
4075 // The value of a condition that is an initialized declaration in a statement
4076 // other than a switch statement is the value of the declared variable
4077 // implicitly converted to type bool. If that conversion is ill-formed, the
4078 // program is ill-formed.
4079 // The value of a condition that is an expression is the value of the
4080 // expression, implicitly converted to bool.
4081 //
4082 // C++23 8.5.2p2
4083 // If the if statement is of the form if constexpr, the value of the condition
4084 // is contextually converted to bool and the converted expression shall be
4085 // a constant expression.
4086 //
4087
4089 if (!IsConstexpr || E.isInvalid() || E.get()->isValueDependent())
4090 return E;
4091
4092 // FIXME: Return this value to the caller so they don't need to recompute it.
4093 llvm::APSInt Cond;
4095 E.get(), &Cond,
4096 diag::err_constexpr_if_condition_expression_is_not_constant);
4097 return E;
4098}
4099
4100bool
4102 // Look inside the implicit cast, if it exists.
4103 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(From))
4104 From = Cast->getSubExpr();
4105
4106 // A string literal (2.13.4) that is not a wide string literal can
4107 // be converted to an rvalue of type "pointer to char"; a wide
4108 // string literal can be converted to an rvalue of type "pointer
4109 // to wchar_t" (C++ 4.2p2).
4110 if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From->IgnoreParens()))
4111 if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
4112 if (const BuiltinType *ToPointeeType
4113 = ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
4114 // This conversion is considered only when there is an
4115 // explicit appropriate pointer target type (C++ 4.2p2).
4116 if (!ToPtrType->getPointeeType().hasQualifiers()) {
4117 switch (StrLit->getKind()) {
4121 // We don't allow UTF literals to be implicitly converted
4122 break;
4124 return (ToPointeeType->getKind() == BuiltinType::Char_U ||
4125 ToPointeeType->getKind() == BuiltinType::Char_S);
4128 QualType(ToPointeeType, 0));
4130 assert(false && "Unevaluated string literal in expression");
4131 break;
4132 }
4133 }
4134 }
4135
4136 return false;
4137}
4138
4140 SourceLocation CastLoc,
4141 QualType Ty,
4142 CastKind Kind,
4143 CXXMethodDecl *Method,
4144 DeclAccessPair FoundDecl,
4145 bool HadMultipleCandidates,
4146 Expr *From) {
4147 switch (Kind) {
4148 default: llvm_unreachable("Unhandled cast kind!");
4149 case CK_ConstructorConversion: {
4150 CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Method);
4151 SmallVector<Expr*, 8> ConstructorArgs;
4152
4153 if (S.RequireNonAbstractType(CastLoc, Ty,
4154 diag::err_allocation_of_abstract_type))
4155 return ExprError();
4156
4157 if (S.CompleteConstructorCall(Constructor, Ty, From, CastLoc,
4158 ConstructorArgs))
4159 return ExprError();
4160
4161 S.CheckConstructorAccess(CastLoc, Constructor, FoundDecl,
4163 if (S.DiagnoseUseOfDecl(Method, CastLoc))
4164 return ExprError();
4165
4167 CastLoc, Ty, FoundDecl, cast<CXXConstructorDecl>(Method),
4168 ConstructorArgs, HadMultipleCandidates,
4169 /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
4171 if (Result.isInvalid())
4172 return ExprError();
4173
4174 return S.MaybeBindToTemporary(Result.getAs<Expr>());
4175 }
4176
4177 case CK_UserDefinedConversion: {
4178 assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
4179
4180 S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ nullptr, FoundDecl);
4181 if (S.DiagnoseUseOfDecl(Method, CastLoc))
4182 return ExprError();
4183
4184 // Create an implicit call expr that calls it.
4185 CXXConversionDecl *Conv = cast<CXXConversionDecl>(Method);
4186 ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Conv,
4187 HadMultipleCandidates);
4188 if (Result.isInvalid())
4189 return ExprError();
4190 // Record usage of conversion in an implicit cast.
4191 Result = ImplicitCastExpr::Create(S.Context, Result.get()->getType(),
4192 CK_UserDefinedConversion, Result.get(),
4193 nullptr, Result.get()->getValueKind(),
4195
4196 return S.MaybeBindToTemporary(Result.get());
4197 }
4198 }
4199}
4200
4203 const ImplicitConversionSequence &ICS,
4204 AssignmentAction Action,
4206 // C++ [over.match.oper]p7: [...] operands of class type are converted [...]
4208 !From->getType()->isRecordType())
4209 return From;
4210
4211 switch (ICS.getKind()) {
4213 ExprResult Res = PerformImplicitConversion(From, ToType, ICS.Standard,
4214 Action, CCK);
4215 if (Res.isInvalid())
4216 return ExprError();
4217 From = Res.get();
4218 break;
4219 }
4220
4222
4225 QualType BeforeToType;
4226 assert(FD && "no conversion function for user-defined conversion seq");
4227 if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(FD)) {
4228 CastKind = CK_UserDefinedConversion;
4229
4230 // If the user-defined conversion is specified by a conversion function,
4231 // the initial standard conversion sequence converts the source type to
4232 // the implicit object parameter of the conversion function.
4233 BeforeToType = Context.getTagDeclType(Conv->getParent());
4234 } else {
4235 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(FD);
4236 CastKind = CK_ConstructorConversion;
4237 // Do no conversion if dealing with ... for the first conversion.
4239 // If the user-defined conversion is specified by a constructor, the
4240 // initial standard conversion sequence converts the source type to
4241 // the type required by the argument of the constructor
4242 BeforeToType = Ctor->getParamDecl(0)->getType().getNonReferenceType();
4243 }
4244 }
4245 // Watch out for ellipsis conversion.
4247 ExprResult Res =
4248 PerformImplicitConversion(From, BeforeToType,
4250 CCK);
4251 if (Res.isInvalid())
4252 return ExprError();
4253 From = Res.get();
4254 }
4255
4257 *this, From->getBeginLoc(), ToType.getNonReferenceType(), CastKind,
4258 cast<CXXMethodDecl>(FD), ICS.UserDefined.FoundConversionFunction,
4260
4261 if (CastArg.isInvalid())
4262 return ExprError();
4263
4264 From = CastArg.get();
4265
4266 // C++ [over.match.oper]p7:
4267 // [...] the second standard conversion sequence of a user-defined
4268 // conversion sequence is not applied.
4270 return From;
4271
4272 return PerformImplicitConversion(From, ToType, ICS.UserDefined.After,
4273 AA_Converting, CCK);
4274 }
4275
4277 ICS.DiagnoseAmbiguousConversion(*this, From->getExprLoc(),
4278 PDiag(diag::err_typecheck_ambiguous_condition)
4279 << From->getSourceRange());
4280 return ExprError();
4281
4284 llvm_unreachable("bad conversion");
4285
4288 CheckAssignmentConstraints(From->getExprLoc(), ToType, From->getType());
4289 bool Diagnosed = DiagnoseAssignmentResult(
4290 ConvTy == Compatible ? Incompatible : ConvTy, From->getExprLoc(),
4291 ToType, From->getType(), From, Action);
4292 assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
4293 return ExprError();
4294 }
4295
4296 // Everything went well.
4297 return From;
4298}
4299
4300// adjustVectorType - Compute the intermediate cast type casting elements of the
4301// from type to the elements of the to type without resizing the vector.
4303 QualType ToType, QualType *ElTy = nullptr) {
4304 auto *ToVec = ToType->castAs<VectorType>();
4305 QualType ElType = ToVec->getElementType();
4306 if (ElTy)
4307 *ElTy = ElType;
4308 if (!FromTy->isVectorType())
4309 return ElType;
4310 auto *FromVec = FromTy->castAs<VectorType>();
4311 return Context.getExtVectorType(ElType, FromVec->getNumElements());
4312}
4313
4316 const StandardConversionSequence& SCS,
4317 AssignmentAction Action,
4319 bool CStyle = (CCK == CheckedConversionKind::CStyleCast ||
4321
4322 // Overall FIXME: we are recomputing too many types here and doing far too
4323 // much extra work. What this means is that we need to keep track of more
4324 // information that is computed when we try the implicit conversion initially,
4325 // so that we don't need to recompute anything here.
4326 QualType FromType = From->getType();
4327
4328 if (SCS.CopyConstructor) {
4329 // FIXME: When can ToType be a reference type?
4330 assert(!ToType->isReferenceType());
4331 if (SCS.Second == ICK_Derived_To_Base) {
4332 SmallVector<Expr*, 8> ConstructorArgs;
4334 cast<CXXConstructorDecl>(SCS.CopyConstructor), ToType, From,
4335 /*FIXME:ConstructLoc*/ SourceLocation(), ConstructorArgs))
4336 return ExprError();
4337 return BuildCXXConstructExpr(
4338 /*FIXME:ConstructLoc*/ SourceLocation(), ToType,
4339 SCS.FoundCopyConstructor, SCS.CopyConstructor, ConstructorArgs,
4340 /*HadMultipleCandidates*/ false,
4341 /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
4343 }
4344 return BuildCXXConstructExpr(
4345 /*FIXME:ConstructLoc*/ SourceLocation(), ToType,
4347 /*HadMultipleCandidates*/ false,
4348 /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
4350 }
4351
4352 // Resolve overloaded function references.
4353 if (Context.hasSameType(FromType, Context.OverloadTy)) {
4356 true, Found);
4357 if (!Fn)
4358 return ExprError();
4359
4360 if (DiagnoseUseOfDecl(Fn, From->getBeginLoc()))
4361 return ExprError();
4362
4364 if (Res.isInvalid())
4365 return ExprError();
4366
4367 // We might get back another placeholder expression if we resolved to a
4368 // builtin.
4369 Res = CheckPlaceholderExpr(Res.get());
4370 if (Res.isInvalid())
4371 return ExprError();
4372
4373 From = Res.get();
4374 FromType = From->getType();
4375 }
4376
4377 // If we're converting to an atomic type, first convert to the corresponding
4378 // non-atomic type.
4379 QualType ToAtomicType;
4380 if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>()) {
4381 ToAtomicType = ToType;
4382 ToType = ToAtomic->getValueType();
4383 }
4384
4385 QualType InitialFromType = FromType;
4386 // Perform the first implicit conversion.
4387 switch (SCS.First) {
4388 case ICK_Identity:
4389 if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
4390 FromType = FromAtomic->getValueType().getUnqualifiedType();
4391 From = ImplicitCastExpr::Create(Context, FromType, CK_AtomicToNonAtomic,
4392 From, /*BasePath=*/nullptr, VK_PRValue,
4394 }
4395 break;
4396
4397 case ICK_Lvalue_To_Rvalue: {
4398 assert(From->getObjectKind() != OK_ObjCProperty);
4399 ExprResult FromRes = DefaultLvalueConversion(From);
4400 if (FromRes.isInvalid())
4401 return ExprError();
4402
4403 From = FromRes.get();
4404 FromType = From->getType();
4405 break;
4406 }
4407
4409 FromType = Context.getArrayDecayedType(FromType);
4410 From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay, VK_PRValue,
4411 /*BasePath=*/nullptr, CCK)
4412 .get();
4413 break;
4414
4416 FromType = Context.getArrayParameterType(FromType);
4417 From = ImpCastExprToType(From, FromType, CK_HLSLArrayRValue, VK_PRValue,
4418 /*BasePath=*/nullptr, CCK)
4419 .get();
4420 break;
4421
4423 FromType = Context.getPointerType(FromType);
4424 From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
4425 VK_PRValue, /*BasePath=*/nullptr, CCK)
4426 .get();
4427 break;
4428
4429 default:
4430 llvm_unreachable("Improper first standard conversion");
4431 }
4432
4433 // Perform the second implicit conversion
4434 switch (SCS.Second) {
4435 case ICK_Identity:
4436 // C++ [except.spec]p5:
4437 // [For] assignment to and initialization of pointers to functions,
4438 // pointers to member functions, and references to functions: the
4439 // target entity shall allow at least the exceptions allowed by the
4440 // source value in the assignment or initialization.
4441 switch (Action) {
4442 case AA_Assigning:
4443 case AA_Initializing:
4444 // Note, function argument passing and returning are initialization.
4445 case AA_Passing:
4446 case AA_Returning:
4447 case AA_Sending:
4449 if (CheckExceptionSpecCompatibility(From, ToType))
4450 return ExprError();
4451 break;
4452
4453 case AA_Casting:
4454 case AA_Converting:
4455 // Casts and implicit conversions are not initialization, so are not
4456 // checked for exception specification mismatches.
4457 break;
4458 }
4459 // Nothing else to do.
4460 break;
4461
4464 QualType ElTy = ToType;
4465 QualType StepTy = ToType;
4466 if (ToType->isVectorType())
4467 StepTy = adjustVectorType(Context, FromType, ToType, &ElTy);
4468 if (ElTy->isBooleanType()) {
4469 assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
4471 "only enums with fixed underlying type can promote to bool");
4472 From = ImpCastExprToType(From, StepTy, CK_IntegralToBoolean, VK_PRValue,
4473 /*BasePath=*/nullptr, CCK)
4474 .get();
4475 } else {
4476 From = ImpCastExprToType(From, StepTy, CK_IntegralCast, VK_PRValue,
4477 /*BasePath=*/nullptr, CCK)
4478 .get();
4479 }
4480 break;
4481 }
4482
4485 QualType StepTy = ToType;
4486 if (ToType->isVectorType())
4487 StepTy = adjustVectorType(Context, FromType, ToType);
4488 From = ImpCastExprToType(From, StepTy, CK_FloatingCast, VK_PRValue,
4489 /*BasePath=*/nullptr, CCK)
4490 .get();
4491 break;
4492 }
4493
4496 QualType FromEl = From->getType()->castAs<ComplexType>()->getElementType();
4497 QualType ToEl = ToType->castAs<ComplexType>()->getElementType();
4498 CastKind CK;
4499 if (FromEl->isRealFloatingType()) {
4500 if (ToEl->isRealFloatingType())
4501 CK = CK_FloatingComplexCast;
4502 else
4503 CK = CK_FloatingComplexToIntegralComplex;
4504 } else if (ToEl->isRealFloatingType()) {
4505 CK = CK_IntegralComplexToFloatingComplex;
4506 } else {
4507 CK = CK_IntegralComplexCast;
4508 }
4509 From = ImpCastExprToType(From, ToType, CK, VK_PRValue, /*BasePath=*/nullptr,
4510 CCK)
4511 .get();
4512 break;
4513 }
4514
4515 case ICK_Floating_Integral: {
4516 QualType ElTy = ToType;
4517 QualType StepTy = ToType;
4518 if (ToType->isVectorType())
4519 StepTy = adjustVectorType(Context, FromType, ToType, &ElTy);
4520 if (ElTy->isRealFloatingType())
4521 From = ImpCastExprToType(From, StepTy, CK_IntegralToFloating, VK_PRValue,
4522 /*BasePath=*/nullptr, CCK)
4523 .get();
4524 else
4525 From = ImpCastExprToType(From, StepTy, CK_FloatingToIntegral, VK_PRValue,
4526 /*BasePath=*/nullptr, CCK)
4527 .get();
4528 break;
4529 }
4530
4532 assert((FromType->isFixedPointType() || ToType->isFixedPointType()) &&
4533 "Attempting implicit fixed point conversion without a fixed "
4534 "point operand");
4535 if (FromType->isFloatingType())
4536 From = ImpCastExprToType(From, ToType, CK_FloatingToFixedPoint,
4537 VK_PRValue,
4538 /*BasePath=*/nullptr, CCK).get();
4539 else if (ToType->isFloatingType())
4540 From = ImpCastExprToType(From, ToType, CK_FixedPointToFloating,
4541 VK_PRValue,
4542 /*BasePath=*/nullptr, CCK).get();
4543 else if (FromType->isIntegralType(Context))
4544 From = ImpCastExprToType(From, ToType, CK_IntegralToFixedPoint,
4545 VK_PRValue,
4546 /*BasePath=*/nullptr, CCK).get();
4547 else if (ToType->isIntegralType(Context))
4548 From = ImpCastExprToType(From, ToType, CK_FixedPointToIntegral,
4549 VK_PRValue,
4550 /*BasePath=*/nullptr, CCK).get();
4551 else if (ToType->isBooleanType())
4552 From = ImpCastExprToType(From, ToType, CK_FixedPointToBoolean,
4553 VK_PRValue,
4554 /*BasePath=*/nullptr, CCK).get();
4555 else
4556 From = ImpCastExprToType(From, ToType, CK_FixedPointCast,
4557 VK_PRValue,
4558 /*BasePath=*/nullptr, CCK).get();
4559 break;
4560
4562 From = ImpCastExprToType(From, ToType, CK_NoOp, From->getValueKind(),
4563 /*BasePath=*/nullptr, CCK).get();
4564 break;
4565
4568 if (SCS.IncompatibleObjC && Action != AA_Casting) {
4569 // Diagnose incompatible Objective-C conversions
4570 if (Action == AA_Initializing || Action == AA_Assigning)
4571 Diag(From->getBeginLoc(),
4572 diag::ext_typecheck_convert_incompatible_pointer)
4573 << ToType << From->getType() << Action << From->getSourceRange()
4574 << 0;
4575 else
4576 Diag(From->getBeginLoc(),
4577 diag::ext_typecheck_convert_incompatible_pointer)
4578 << From->getType() << ToType << Action << From->getSourceRange()
4579 << 0;
4580
4581 if (From->getType()->isObjCObjectPointerType() &&
4582 ToType->isObjCObjectPointerType())
4584 } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
4585 !ObjC().CheckObjCARCUnavailableWeakConversion(ToType,
4586 From->getType())) {
4587 if (Action == AA_Initializing)
4588 Diag(From->getBeginLoc(), diag::err_arc_weak_unavailable_assign);
4589 else
4590 Diag(From->getBeginLoc(), diag::err_arc_convesion_of_weak_unavailable)
4591 << (Action == AA_Casting) << From->getType() << ToType
4592 << From->getSourceRange();
4593 }
4594
4595 // Defer address space conversion to the third conversion.
4596 QualType FromPteeType = From->getType()->getPointeeType();
4597 QualType ToPteeType = ToType->getPointeeType();
4598 QualType NewToType = ToType;
4599 if (!FromPteeType.isNull() && !ToPteeType.isNull() &&
4600 FromPteeType.getAddressSpace() != ToPteeType.getAddressSpace()) {
4601 NewToType = Context.removeAddrSpaceQualType(ToPteeType);
4602 NewToType = Context.getAddrSpaceQualType(NewToType,
4603 FromPteeType.getAddressSpace());
4604 if (ToType->isObjCObjectPointerType())
4605 NewToType = Context.getObjCObjectPointerType(NewToType);
4606 else if (ToType->isBlockPointerType())
4607 NewToType = Context.getBlockPointerType(NewToType);
4608 else
4609 NewToType = Context.getPointerType(NewToType);
4610 }
4611
4612 CastKind Kind;
4613 CXXCastPath BasePath;
4614 if (CheckPointerConversion(From, NewToType, Kind, BasePath, CStyle))
4615 return ExprError();
4616
4617 // Make sure we extend blocks if necessary.
4618 // FIXME: doing this here is really ugly.
4619 if (Kind == CK_BlockPointerToObjCPointerCast) {
4620 ExprResult E = From;
4622 From = E.get();
4623 }
4625 ObjC().CheckObjCConversion(SourceRange(), NewToType, From, CCK);
4626 From = ImpCastExprToType(From, NewToType, Kind, VK_PRValue, &BasePath, CCK)
4627 .get();
4628 break;
4629 }
4630
4631 case ICK_Pointer_Member: {
4632 CastKind Kind;
4633 CXXCastPath BasePath;
4634 if (CheckMemberPointerConversion(From, ToType, Kind, BasePath, CStyle))
4635 return ExprError();
4636 if (CheckExceptionSpecCompatibility(From, ToType))
4637 return ExprError();
4638
4639 // We may not have been able to figure out what this member pointer resolved
4640 // to up until this exact point. Attempt to lock-in it's inheritance model.
4642 (void)isCompleteType(From->getExprLoc(), From->getType());
4643 (void)isCompleteType(From->getExprLoc(), ToType);
4644 }
4645
4646 From =
4647 ImpCastExprToType(From, ToType, Kind, VK_PRValue, &BasePath, CCK).get();
4648 break;
4649 }
4650
4652 // Perform half-to-boolean conversion via float.
4653 if (From->getType()->isHalfType()) {
4654 From = ImpCastExprToType(From, Context.FloatTy, CK_FloatingCast).get();
4655 FromType = Context.FloatTy;
4656 }
4657 QualType ElTy = FromType;
4658 QualType StepTy = ToType;
4659 if (FromType->isVectorType()) {
4660 if (getLangOpts().HLSL)
4661 StepTy = adjustVectorType(Context, FromType, ToType);
4662 ElTy = FromType->castAs<VectorType>()->getElementType();
4663 }
4664
4665 From = ImpCastExprToType(From, StepTy, ScalarTypeToBooleanCastKind(ElTy),
4666 VK_PRValue,
4667 /*BasePath=*/nullptr, CCK)
4668 .get();
4669 break;
4670 }
4671
4672 case ICK_Derived_To_Base: {
4673 CXXCastPath BasePath;
4675 From->getType(), ToType.getNonReferenceType(), From->getBeginLoc(),
4676 From->getSourceRange(), &BasePath, CStyle))
4677 return ExprError();
4678
4679 From = ImpCastExprToType(From, ToType.getNonReferenceType(),
4680 CK_DerivedToBase, From->getValueKind(),
4681 &BasePath, CCK).get();
4682 break;
4683 }
4684
4686 From = ImpCastExprToType(From, ToType, CK_BitCast, VK_PRValue,
4687 /*BasePath=*/nullptr, CCK)
4688 .get();
4689 break;
4690
4693 From = ImpCastExprToType(From, ToType, CK_BitCast, VK_PRValue,
4694 /*BasePath=*/nullptr, CCK)
4695 .get();
4696 break;
4697
4698 case ICK_Vector_Splat: {
4699 // Vector splat from any arithmetic type to a vector.
4700 Expr *Elem = prepareVectorSplat(ToType, From).get();
4701 From = ImpCastExprToType(Elem, ToType, CK_VectorSplat, VK_PRValue,
4702 /*BasePath=*/nullptr, CCK)
4703 .get();
4704 break;
4705 }
4706
4707 case ICK_Complex_Real:
4708 // Case 1. x -> _Complex y
4709 if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
4710 QualType ElType = ToComplex->getElementType();
4711 bool isFloatingComplex = ElType->isRealFloatingType();
4712
4713 // x -> y
4714 if (Context.hasSameUnqualifiedType(ElType, From->getType())) {
4715 // do nothing
4716 } else if (From->getType()->isRealFloatingType()) {
4717 From = ImpCastExprToType(From, ElType,
4718 isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get();
4719 } else {
4720 assert(From->getType()->isIntegerType());
4721 From = ImpCastExprToType(From, ElType,
4722 isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get();
4723 }
4724 // y -> _Complex y
4725 From = ImpCastExprToType(From, ToType,
4726 isFloatingComplex ? CK_FloatingRealToComplex
4727 : CK_IntegralRealToComplex).get();
4728
4729 // Case 2. _Complex x -> y
4730 } else {
4731 auto *FromComplex = From->getType()->castAs<ComplexType>();
4732 QualType ElType = FromComplex->getElementType();
4733 bool isFloatingComplex = ElType->isRealFloatingType();
4734
4735 // _Complex x -> x
4736 From = ImpCastExprToType(From, ElType,
4737 isFloatingComplex ? CK_FloatingComplexToReal
4738 : CK_IntegralComplexToReal,
4739 VK_PRValue, /*BasePath=*/nullptr, CCK)
4740 .get();
4741
4742 // x -> y
4743 if (Context.hasSameUnqualifiedType(ElType, ToType)) {
4744 // do nothing
4745 } else if (ToType->isRealFloatingType()) {
4746 From = ImpCastExprToType(From, ToType,
4747 isFloatingComplex ? CK_FloatingCast
4748 : CK_IntegralToFloating,
4749 VK_PRValue, /*BasePath=*/nullptr, CCK)
4750 .get();
4751 } else {
4752 assert(ToType->isIntegerType());
4753 From = ImpCastExprToType(From, ToType,
4754 isFloatingComplex ? CK_FloatingToIntegral
4755 : CK_IntegralCast,
4756 VK_PRValue, /*BasePath=*/nullptr, CCK)
4757 .get();
4758 }
4759 }
4760 break;
4761
4763 LangAS AddrSpaceL =
4765 LangAS AddrSpaceR =
4767 assert(Qualifiers::isAddressSpaceSupersetOf(AddrSpaceL, AddrSpaceR) &&
4768 "Invalid cast");
4769