clang 20.0.0git
SemaChecking.cpp
Go to the documentation of this file.
1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements extra semantic analysis beyond what is enforced
10// by the C type system.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/APValue.h"
16#include "clang/AST/Attr.h"
18#include "clang/AST/CharUnits.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclBase.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
31#include "clang/AST/NSAPI.h"
35#include "clang/AST/Stmt.h"
37#include "clang/AST/Type.h"
38#include "clang/AST/TypeLoc.h"
44#include "clang/Basic/LLVM.h"
57#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
59#include "clang/Sema/Lookup.h"
61#include "clang/Sema/Scope.h"
63#include "clang/Sema/Sema.h"
65#include "clang/Sema/SemaARM.h"
66#include "clang/Sema/SemaBPF.h"
67#include "clang/Sema/SemaHLSL.h"
71#include "clang/Sema/SemaMIPS.h"
73#include "clang/Sema/SemaObjC.h"
75#include "clang/Sema/SemaPPC.h"
78#include "clang/Sema/SemaWasm.h"
79#include "clang/Sema/SemaX86.h"
80#include "llvm/ADT/APFloat.h"
81#include "llvm/ADT/APInt.h"
82#include "llvm/ADT/APSInt.h"
83#include "llvm/ADT/ArrayRef.h"
84#include "llvm/ADT/DenseMap.h"
85#include "llvm/ADT/FoldingSet.h"
86#include "llvm/ADT/STLExtras.h"
87#include "llvm/ADT/SmallBitVector.h"
88#include "llvm/ADT/SmallPtrSet.h"
89#include "llvm/ADT/SmallString.h"
90#include "llvm/ADT/SmallVector.h"
91#include "llvm/ADT/StringExtras.h"
92#include "llvm/ADT/StringRef.h"
93#include "llvm/ADT/StringSet.h"
94#include "llvm/ADT/StringSwitch.h"
95#include "llvm/Support/AtomicOrdering.h"
96#include "llvm/Support/Casting.h"
97#include "llvm/Support/Compiler.h"
98#include "llvm/Support/ConvertUTF.h"
99#include "llvm/Support/ErrorHandling.h"
100#include "llvm/Support/Format.h"
101#include "llvm/Support/Locale.h"
102#include "llvm/Support/MathExtras.h"
103#include "llvm/Support/SaveAndRestore.h"
104#include "llvm/Support/raw_ostream.h"
105#include "llvm/TargetParser/RISCVTargetParser.h"
106#include "llvm/TargetParser/Triple.h"
107#include <algorithm>
108#include <bitset>
109#include <cassert>
110#include <cctype>
111#include <cstddef>
112#include <cstdint>
113#include <functional>
114#include <limits>
115#include <optional>
116#include <string>
117#include <tuple>
118#include <utility>
119
120using namespace clang;
121using namespace sema;
122
124 unsigned ByteNo) const {
125 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
127}
128
129static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
131 return (A << 8) | B;
132}
133
134bool Sema::checkArgCountAtLeast(CallExpr *Call, unsigned MinArgCount) {
135 unsigned ArgCount = Call->getNumArgs();
136 if (ArgCount >= MinArgCount)
137 return false;
138
139 return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
140 << 0 /*function call*/ << MinArgCount << ArgCount
141 << /*is non object*/ 0 << Call->getSourceRange();
142}
143
144bool Sema::checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount) {
145 unsigned ArgCount = Call->getNumArgs();
146 if (ArgCount <= MaxArgCount)
147 return false;
148 return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_many_args_at_most)
149 << 0 /*function call*/ << MaxArgCount << ArgCount
150 << /*is non object*/ 0 << Call->getSourceRange();
151}
152
153bool Sema::checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
154 unsigned MaxArgCount) {
155 return checkArgCountAtLeast(Call, MinArgCount) ||
156 checkArgCountAtMost(Call, MaxArgCount);
157}
158
159bool Sema::checkArgCount(CallExpr *Call, unsigned DesiredArgCount) {
160 unsigned ArgCount = Call->getNumArgs();
161 if (ArgCount == DesiredArgCount)
162 return false;
163
164 if (checkArgCountAtLeast(Call, DesiredArgCount))
165 return true;
166 assert(ArgCount > DesiredArgCount && "should have diagnosed this");
167
168 // Highlight all the excess arguments.
169 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
170 Call->getArg(ArgCount - 1)->getEndLoc());
171
172 return Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
173 << 0 /*function call*/ << DesiredArgCount << ArgCount
174 << /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
175}
176
178 bool HasError = false;
179
180 for (unsigned I = 0; I < Call->getNumArgs(); ++I) {
181 Expr *Arg = Call->getArg(I);
182
183 if (Arg->isValueDependent())
184 continue;
185
186 std::optional<std::string> ArgString = Arg->tryEvaluateString(S.Context);
187 int DiagMsgKind = -1;
188 // Arguments must be pointers to constant strings and cannot use '$'.
189 if (!ArgString.has_value())
190 DiagMsgKind = 0;
191 else if (ArgString->find('$') != std::string::npos)
192 DiagMsgKind = 1;
193
194 if (DiagMsgKind >= 0) {
195 S.Diag(Arg->getBeginLoc(), diag::err_builtin_verbose_trap_arg)
196 << DiagMsgKind << Arg->getSourceRange();
197 HasError = true;
198 }
199 }
200
201 return !HasError;
202}
203
205 if (Value->isTypeDependent())
206 return false;
207
208 InitializedEntity Entity =
212 if (Result.isInvalid())
213 return true;
214 Value = Result.get();
215 return false;
216}
217
218/// Check that the first argument to __builtin_annotation is an integer
219/// and the second argument is a non-wide string literal.
220static bool BuiltinAnnotation(Sema &S, CallExpr *TheCall) {
221 if (S.checkArgCount(TheCall, 2))
222 return true;
223
224 // First argument should be an integer.
225 Expr *ValArg = TheCall->getArg(0);
226 QualType Ty = ValArg->getType();
227 if (!Ty->isIntegerType()) {
228 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
229 << ValArg->getSourceRange();
230 return true;
231 }
232
233 // Second argument should be a constant string.
234 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
235 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
236 if (!Literal || !Literal->isOrdinary()) {
237 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
238 << StrArg->getSourceRange();
239 return true;
240 }
241
242 TheCall->setType(Ty);
243 return false;
244}
245
246static bool BuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
247 // We need at least one argument.
248 if (TheCall->getNumArgs() < 1) {
249 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
250 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
251 << TheCall->getCallee()->getSourceRange();
252 return true;
253 }
254
255 // All arguments should be wide string literals.
256 for (Expr *Arg : TheCall->arguments()) {
257 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
258 if (!Literal || !Literal->isWide()) {
259 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
260 << Arg->getSourceRange();
261 return true;
262 }
263 }
264
265 return false;
266}
267
268/// Check that the argument to __builtin_addressof is a glvalue, and set the
269/// result type to the corresponding pointer type.
270static bool BuiltinAddressof(Sema &S, CallExpr *TheCall) {
271 if (S.checkArgCount(TheCall, 1))
272 return true;
273
274 ExprResult Arg(TheCall->getArg(0));
275 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
276 if (ResultType.isNull())
277 return true;
278
279 TheCall->setArg(0, Arg.get());
280 TheCall->setType(ResultType);
281 return false;
282}
283
284/// Check that the argument to __builtin_function_start is a function.
285static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
286 if (S.checkArgCount(TheCall, 1))
287 return true;
288
290 if (Arg.isInvalid())
291 return true;
292
293 TheCall->setArg(0, Arg.get());
294 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
296
297 if (!FD) {
298 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type)
299 << TheCall->getSourceRange();
300 return true;
301 }
302
303 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
304 TheCall->getBeginLoc());
305}
306
307/// Check the number of arguments and set the result type to
308/// the argument type.
309static bool BuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
310 if (S.checkArgCount(TheCall, 1))
311 return true;
312
313 TheCall->setType(TheCall->getArg(0)->getType());
314 return false;
315}
316
317/// Check that the value argument for __builtin_is_aligned(value, alignment) and
318/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
319/// type (but not a function pointer) and that the alignment is a power-of-two.
320static bool BuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
321 if (S.checkArgCount(TheCall, 2))
322 return true;
323
324 clang::Expr *Source = TheCall->getArg(0);
325 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
326
327 auto IsValidIntegerType = [](QualType Ty) {
328 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
329 };
330 QualType SrcTy = Source->getType();
331 // We should also be able to use it with arrays (but not functions!).
332 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
333 SrcTy = S.Context.getDecayedType(SrcTy);
334 }
335 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
336 SrcTy->isFunctionPointerType()) {
337 // FIXME: this is not quite the right error message since we don't allow
338 // floating point types, or member pointers.
339 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
340 << SrcTy;
341 return true;
342 }
343
344 clang::Expr *AlignOp = TheCall->getArg(1);
345 if (!IsValidIntegerType(AlignOp->getType())) {
346 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
347 << AlignOp->getType();
348 return true;
349 }
350 Expr::EvalResult AlignResult;
351 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
352 // We can't check validity of alignment if it is value dependent.
353 if (!AlignOp->isValueDependent() &&
354 AlignOp->EvaluateAsInt(AlignResult, S.Context,
356 llvm::APSInt AlignValue = AlignResult.Val.getInt();
357 llvm::APSInt MaxValue(
358 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
359 if (AlignValue < 1) {
360 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
361 return true;
362 }
363 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
364 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
365 << toString(MaxValue, 10);
366 return true;
367 }
368 if (!AlignValue.isPowerOf2()) {
369 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
370 return true;
371 }
372 if (AlignValue == 1) {
373 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
374 << IsBooleanAlignBuiltin;
375 }
376 }
377
380 SourceLocation(), Source);
381 if (SrcArg.isInvalid())
382 return true;
383 TheCall->setArg(0, SrcArg.get());
384 ExprResult AlignArg =
386 S.Context, AlignOp->getType(), false),
387 SourceLocation(), AlignOp);
388 if (AlignArg.isInvalid())
389 return true;
390 TheCall->setArg(1, AlignArg.get());
391 // For align_up/align_down, the return type is the same as the (potentially
392 // decayed) argument type including qualifiers. For is_aligned(), the result
393 // is always bool.
394 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
395 return false;
396}
397
398static bool BuiltinOverflow(Sema &S, CallExpr *TheCall, unsigned BuiltinID) {
399 if (S.checkArgCount(TheCall, 3))
400 return true;
401
402 std::pair<unsigned, const char *> Builtins[] = {
403 { Builtin::BI__builtin_add_overflow, "ckd_add" },
404 { Builtin::BI__builtin_sub_overflow, "ckd_sub" },
405 { Builtin::BI__builtin_mul_overflow, "ckd_mul" },
406 };
407
408 bool CkdOperation = llvm::any_of(Builtins, [&](const std::pair<unsigned,
409 const char *> &P) {
410 return BuiltinID == P.first && TheCall->getExprLoc().isMacroID() &&
412 S.getSourceManager(), S.getLangOpts()) == P.second;
413 });
414
415 auto ValidCkdIntType = [](QualType QT) {
416 // A valid checked integer type is an integer type other than a plain char,
417 // bool, a bit-precise type, or an enumeration type.
418 if (const auto *BT = QT.getCanonicalType()->getAs<BuiltinType>())
419 return (BT->getKind() >= BuiltinType::Short &&
420 BT->getKind() <= BuiltinType::Int128) || (
421 BT->getKind() >= BuiltinType::UShort &&
422 BT->getKind() <= BuiltinType::UInt128) ||
423 BT->getKind() == BuiltinType::UChar ||
424 BT->getKind() == BuiltinType::SChar;
425 return false;
426 };
427
428 // First two arguments should be integers.
429 for (unsigned I = 0; I < 2; ++I) {
431 if (Arg.isInvalid()) return true;
432 TheCall->setArg(I, Arg.get());
433
434 QualType Ty = Arg.get()->getType();
435 bool IsValid = CkdOperation ? ValidCkdIntType(Ty) : Ty->isIntegerType();
436 if (!IsValid) {
437 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
438 << CkdOperation << Ty << Arg.get()->getSourceRange();
439 return true;
440 }
441 }
442
443 // Third argument should be a pointer to a non-const integer.
444 // IRGen correctly handles volatile, restrict, and address spaces, and
445 // the other qualifiers aren't possible.
446 {
448 if (Arg.isInvalid()) return true;
449 TheCall->setArg(2, Arg.get());
450
451 QualType Ty = Arg.get()->getType();
452 const auto *PtrTy = Ty->getAs<PointerType>();
453 if (!PtrTy ||
454 !PtrTy->getPointeeType()->isIntegerType() ||
455 (!ValidCkdIntType(PtrTy->getPointeeType()) && CkdOperation) ||
456 PtrTy->getPointeeType().isConstQualified()) {
457 S.Diag(Arg.get()->getBeginLoc(),
458 diag::err_overflow_builtin_must_be_ptr_int)
459 << CkdOperation << Ty << Arg.get()->getSourceRange();
460 return true;
461 }
462 }
463
464 // Disallow signed bit-precise integer args larger than 128 bits to mul
465 // function until we improve backend support.
466 if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
467 for (unsigned I = 0; I < 3; ++I) {
468 const auto Arg = TheCall->getArg(I);
469 // Third argument will be a pointer.
470 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
471 if (Ty->isBitIntType() && Ty->isSignedIntegerType() &&
472 S.getASTContext().getIntWidth(Ty) > 128)
473 return S.Diag(Arg->getBeginLoc(),
474 diag::err_overflow_builtin_bit_int_max_size)
475 << 128;
476 }
477 }
478
479 return false;
480}
481
482namespace {
483struct BuiltinDumpStructGenerator {
484 Sema &S;
485 CallExpr *TheCall;
486 SourceLocation Loc = TheCall->getBeginLoc();
488 DiagnosticErrorTrap ErrorTracker;
489 PrintingPolicy Policy;
490
491 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall)
492 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()),
493 Policy(S.Context.getPrintingPolicy()) {
494 Policy.AnonymousTagLocations = false;
495 }
496
497 Expr *makeOpaqueValueExpr(Expr *Inner) {
498 auto *OVE = new (S.Context)
499 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(),
500 Inner->getObjectKind(), Inner);
501 Actions.push_back(OVE);
502 return OVE;
503 }
504
505 Expr *getStringLiteral(llvm::StringRef Str) {
507 // Wrap the literal in parentheses to attach a source location.
508 return new (S.Context) ParenExpr(Loc, Loc, Lit);
509 }
510
511 bool callPrintFunction(llvm::StringRef Format,
512 llvm::ArrayRef<Expr *> Exprs = {}) {
514 assert(TheCall->getNumArgs() >= 2);
515 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size());
516 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end());
517 Args.push_back(getStringLiteral(Format));
518 Args.insert(Args.end(), Exprs.begin(), Exprs.end());
519
520 // Register a note to explain why we're performing the call.
524 Ctx.CallArgs = Args.data();
525 Ctx.NumCallArgs = Args.size();
527
528 ExprResult RealCall =
529 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1),
530 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc());
531
533 if (!RealCall.isInvalid())
534 Actions.push_back(RealCall.get());
535 // Bail out if we've hit any errors, even if we managed to build the
536 // call. We don't want to produce more than one error.
537 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred();
538 }
539
540 Expr *getIndentString(unsigned Depth) {
541 if (!Depth)
542 return nullptr;
543
545 Indent.resize(Depth * Policy.Indentation, ' ');
546 return getStringLiteral(Indent);
547 }
548
550 return getStringLiteral(T.getAsString(Policy));
551 }
552
553 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) {
554 llvm::raw_svector_ostream OS(Str);
555
556 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
557 // than trying to print a single character.
558 if (auto *BT = T->getAs<BuiltinType>()) {
559 switch (BT->getKind()) {
560 case BuiltinType::Bool:
561 OS << "%d";
562 return true;
563 case BuiltinType::Char_U:
564 case BuiltinType::UChar:
565 OS << "%hhu";
566 return true;
567 case BuiltinType::Char_S:
568 case BuiltinType::SChar:
569 OS << "%hhd";
570 return true;
571 default:
572 break;
573 }
574 }
575
577 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) {
578 // We were able to guess how to format this.
579 if (Specifier.getConversionSpecifier().getKind() ==
580 analyze_printf::PrintfConversionSpecifier::sArg) {
581 // Wrap double-quotes around a '%s' specifier and limit its maximum
582 // length. Ideally we'd also somehow escape special characters in the
583 // contents but printf doesn't support that.
584 // FIXME: '%s' formatting is not safe in general.
585 OS << '"';
586 Specifier.setPrecision(analyze_printf::OptionalAmount(32u));
587 Specifier.toString(OS);
588 OS << '"';
589 // FIXME: It would be nice to include a '...' if the string doesn't fit
590 // in the length limit.
591 } else {
592 Specifier.toString(OS);
593 }
594 return true;
595 }
596
597 if (T->isPointerType()) {
598 // Format all pointers with '%p'.
599 OS << "%p";
600 return true;
601 }
602
603 return false;
604 }
605
606 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
607 Expr *IndentLit = getIndentString(Depth);
608 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD));
609 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit})
610 : callPrintFunction("%s", {TypeLit}))
611 return true;
612
613 return dumpRecordValue(RD, E, IndentLit, Depth);
614 }
615
616 // Dump a record value. E should be a pointer or lvalue referring to an RD.
617 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent,
618 unsigned Depth) {
619 // FIXME: Decide what to do if RD is a union. At least we should probably
620 // turn off printing `const char*` members with `%s`, because that is very
621 // likely to crash if that's not the active member. Whatever we decide, we
622 // should document it.
623
624 // Build an OpaqueValueExpr so we can refer to E more than once without
625 // triggering re-evaluation.
626 Expr *RecordArg = makeOpaqueValueExpr(E);
627 bool RecordArgIsPtr = RecordArg->getType()->isPointerType();
628
629 if (callPrintFunction(" {\n"))
630 return true;
631
632 // Dump each base class, regardless of whether they're aggregates.
633 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
634 for (const auto &Base : CXXRD->bases()) {
635 QualType BaseType =
636 RecordArgIsPtr ? S.Context.getPointerType(Base.getType())
637 : S.Context.getLValueReferenceType(Base.getType());
640 RecordArg);
641 if (BasePtr.isInvalid() ||
642 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(),
643 Depth + 1))
644 return true;
645 }
646 }
647
648 Expr *FieldIndentArg = getIndentString(Depth + 1);
649
650 // Dump each field.
651 for (auto *D : RD->decls()) {
652 auto *IFD = dyn_cast<IndirectFieldDecl>(D);
653 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
654 if (!FD || FD->isUnnamedBitField() || FD->isAnonymousStructOrUnion())
655 continue;
656
657 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
658 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg,
659 getTypeString(FD->getType()),
660 getStringLiteral(FD->getName())};
661
662 if (FD->isBitField()) {
663 Format += ": %zu ";
665 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT),
666 FD->getBitWidthValue(S.Context));
667 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc));
668 }
669
670 Format += "=";
671
674 CXXScopeSpec(), Loc, IFD,
675 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc)
677 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD,
679 DeclarationNameInfo(FD->getDeclName(), Loc));
680 if (Field.isInvalid())
681 return true;
682
683 auto *InnerRD = FD->getType()->getAsRecordDecl();
684 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD);
685 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) {
686 // Recursively print the values of members of aggregate record type.
687 if (callPrintFunction(Format, Args) ||
688 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1))
689 return true;
690 } else {
691 Format += " ";
692 if (appendFormatSpecifier(FD->getType(), Format)) {
693 // We know how to print this field.
694 Args.push_back(Field.get());
695 } else {
696 // We don't know how to print this field. Print out its address
697 // with a format specifier that a smart tool will be able to
698 // recognize and treat specially.
699 Format += "*%p";
700 ExprResult FieldAddr =
701 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get());
702 if (FieldAddr.isInvalid())
703 return true;
704 Args.push_back(FieldAddr.get());
705 }
706 Format += "\n";
707 if (callPrintFunction(Format, Args))
708 return true;
709 }
710 }
711
712 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent)
713 : callPrintFunction("}\n");
714 }
715
716 Expr *buildWrapper() {
717 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions,
719 TheCall->setType(Wrapper->getType());
720 TheCall->setValueKind(Wrapper->getValueKind());
721 return Wrapper;
722 }
723};
724} // namespace
725
727 if (S.checkArgCountAtLeast(TheCall, 2))
728 return ExprError();
729
730 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
731 if (PtrArgResult.isInvalid())
732 return ExprError();
733 TheCall->setArg(0, PtrArgResult.get());
734
735 // First argument should be a pointer to a struct.
736 QualType PtrArgType = PtrArgResult.get()->getType();
737 if (!PtrArgType->isPointerType() ||
738 !PtrArgType->getPointeeType()->isRecordType()) {
739 S.Diag(PtrArgResult.get()->getBeginLoc(),
740 diag::err_expected_struct_pointer_argument)
741 << 1 << TheCall->getDirectCallee() << PtrArgType;
742 return ExprError();
743 }
744 QualType Pointee = PtrArgType->getPointeeType();
745 const RecordDecl *RD = Pointee->getAsRecordDecl();
746 // Try to instantiate the class template as appropriate; otherwise, access to
747 // its data() may lead to a crash.
748 if (S.RequireCompleteType(PtrArgResult.get()->getBeginLoc(), Pointee,
749 diag::err_incomplete_type))
750 return ExprError();
751 // Second argument is a callable, but we can't fully validate it until we try
752 // calling it.
753 QualType FnArgType = TheCall->getArg(1)->getType();
754 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() &&
755 !FnArgType->isBlockPointerType() &&
756 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) {
757 auto *BT = FnArgType->getAs<BuiltinType>();
758 switch (BT ? BT->getKind() : BuiltinType::Void) {
759 case BuiltinType::Dependent:
760 case BuiltinType::Overload:
761 case BuiltinType::BoundMember:
762 case BuiltinType::PseudoObject:
763 case BuiltinType::UnknownAny:
764 case BuiltinType::BuiltinFn:
765 // This might be a callable.
766 break;
767
768 default:
769 S.Diag(TheCall->getArg(1)->getBeginLoc(),
770 diag::err_expected_callable_argument)
771 << 2 << TheCall->getDirectCallee() << FnArgType;
772 return ExprError();
773 }
774 }
775
776 BuiltinDumpStructGenerator Generator(S, TheCall);
777
778 // Wrap parentheses around the given pointer. This is not necessary for
779 // correct code generation, but it means that when we pretty-print the call
780 // arguments in our diagnostics we will produce '(&s)->n' instead of the
781 // incorrect '&s->n'.
782 Expr *PtrArg = PtrArgResult.get();
783 PtrArg = new (S.Context)
784 ParenExpr(PtrArg->getBeginLoc(),
785 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg);
786 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0))
787 return ExprError();
788
789 return Generator.buildWrapper();
790}
791
792static bool BuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
793 if (S.checkArgCount(BuiltinCall, 2))
794 return true;
795
796 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
797 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
798 Expr *Call = BuiltinCall->getArg(0);
799 Expr *Chain = BuiltinCall->getArg(1);
800
801 if (Call->getStmtClass() != Stmt::CallExprClass) {
802 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
803 << Call->getSourceRange();
804 return true;
805 }
806
807 auto CE = cast<CallExpr>(Call);
808 if (CE->getCallee()->getType()->isBlockPointerType()) {
809 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
810 << Call->getSourceRange();
811 return true;
812 }
813
814 const Decl *TargetDecl = CE->getCalleeDecl();
815 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
816 if (FD->getBuiltinID()) {
817 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
818 << Call->getSourceRange();
819 return true;
820 }
821
822 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
823 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
824 << Call->getSourceRange();
825 return true;
826 }
827
828 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
829 if (ChainResult.isInvalid())
830 return true;
831 if (!ChainResult.get()->getType()->isPointerType()) {
832 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
833 << Chain->getSourceRange();
834 return true;
835 }
836
837 QualType ReturnTy = CE->getCallReturnType(S.Context);
838 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
839 QualType BuiltinTy = S.Context.getFunctionType(
840 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
841 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
842
843 Builtin =
844 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
845
846 BuiltinCall->setType(CE->getType());
847 BuiltinCall->setValueKind(CE->getValueKind());
848 BuiltinCall->setObjectKind(CE->getObjectKind());
849 BuiltinCall->setCallee(Builtin);
850 BuiltinCall->setArg(1, ChainResult.get());
851
852 return false;
853}
854
855namespace {
856
857class ScanfDiagnosticFormatHandler
859 // Accepts the argument index (relative to the first destination index) of the
860 // argument whose size we want.
861 using ComputeSizeFunction =
862 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>;
863
864 // Accepts the argument index (relative to the first destination index), the
865 // destination size, and the source size).
866 using DiagnoseFunction =
867 llvm::function_ref<void(unsigned, unsigned, unsigned)>;
868
869 ComputeSizeFunction ComputeSizeArgument;
870 DiagnoseFunction Diagnose;
871
872public:
873 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument,
874 DiagnoseFunction Diagnose)
875 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {}
876
877 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
878 const char *StartSpecifier,
879 unsigned specifierLen) override {
880 if (!FS.consumesDataArgument())
881 return true;
882
883 unsigned NulByte = 0;
884 switch ((FS.getConversionSpecifier().getKind())) {
885 default:
886 return true;
889 NulByte = 1;
890 break;
892 break;
893 }
894
895 analyze_format_string::OptionalAmount FW = FS.getFieldWidth();
896 if (FW.getHowSpecified() !=
897 analyze_format_string::OptionalAmount::HowSpecified::Constant)
898 return true;
899
900 unsigned SourceSize = FW.getConstantAmount() + NulByte;
901
902 std::optional<llvm::APSInt> DestSizeAPS =
903 ComputeSizeArgument(FS.getArgIndex());
904 if (!DestSizeAPS)
905 return true;
906
907 unsigned DestSize = DestSizeAPS->getZExtValue();
908
909 if (DestSize < SourceSize)
910 Diagnose(FS.getArgIndex(), DestSize, SourceSize);
911
912 return true;
913 }
914};
915
916class EstimateSizeFormatHandler
918 size_t Size;
919 /// Whether the format string contains Linux kernel's format specifier
920 /// extension.
921 bool IsKernelCompatible = true;
922
923public:
924 EstimateSizeFormatHandler(StringRef Format)
925 : Size(std::min(Format.find(0), Format.size()) +
926 1 /* null byte always written by sprintf */) {}
927
928 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
929 const char *, unsigned SpecifierLen,
930 const TargetInfo &) override {
931
932 const size_t FieldWidth = computeFieldWidth(FS);
933 const size_t Precision = computePrecision(FS);
934
935 // The actual format.
936 switch (FS.getConversionSpecifier().getKind()) {
937 // Just a char.
940 Size += std::max(FieldWidth, (size_t)1);
941 break;
942 // Just an integer.
952 Size += std::max(FieldWidth, Precision);
953 break;
954
955 // %g style conversion switches between %f or %e style dynamically.
956 // %g removes trailing zeros, and does not print decimal point if there are
957 // no digits that follow it. Thus %g can print a single digit.
958 // FIXME: If it is alternative form:
959 // For g and G conversions, trailing zeros are not removed from the result.
962 Size += 1;
963 break;
964
965 // Floating point number in the form '[+]ddd.ddd'.
968 Size += std::max(FieldWidth, 1 /* integer part */ +
969 (Precision ? 1 + Precision
970 : 0) /* period + decimal */);
971 break;
972
973 // Floating point number in the form '[-]d.ddde[+-]dd'.
976 Size +=
977 std::max(FieldWidth,
978 1 /* integer part */ +
979 (Precision ? 1 + Precision : 0) /* period + decimal */ +
980 1 /* e or E letter */ + 2 /* exponent */);
981 break;
982
983 // Floating point number in the form '[-]0xh.hhhhp±dd'.
986 Size +=
987 std::max(FieldWidth,
988 2 /* 0x */ + 1 /* integer part */ +
989 (Precision ? 1 + Precision : 0) /* period + decimal */ +
990 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
991 break;
992
993 // Just a string.
996 Size += FieldWidth;
997 break;
998
999 // Just a pointer in the form '0xddd'.
1001 // Linux kernel has its own extesion for `%p` specifier.
1002 // Kernel Document:
1003 // https://docs.kernel.org/core-api/printk-formats.html#pointer-types
1004 IsKernelCompatible = false;
1005 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
1006 break;
1007
1008 // A plain percent.
1010 Size += 1;
1011 break;
1012
1013 default:
1014 break;
1015 }
1016
1017 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
1018
1019 if (FS.hasAlternativeForm()) {
1020 switch (FS.getConversionSpecifier().getKind()) {
1021 // For o conversion, it increases the precision, if and only if necessary,
1022 // to force the first digit of the result to be a zero
1023 // (if the value and precision are both 0, a single 0 is printed)
1025 // For b conversion, a nonzero result has 0b prefixed to it.
1027 // For x (or X) conversion, a nonzero result has 0x (or 0X) prefixed to
1028 // it.
1031 // Note: even when the prefix is added, if
1032 // (prefix_width <= FieldWidth - formatted_length) holds,
1033 // the prefix does not increase the format
1034 // size. e.g.(("%#3x", 0xf) is "0xf")
1035
1036 // If the result is zero, o, b, x, X adds nothing.
1037 break;
1038 // For a, A, e, E, f, F, g, and G conversions,
1039 // the result of converting a floating-point number always contains a
1040 // decimal-point
1049 Size += (Precision ? 0 : 1);
1050 break;
1051 // For other conversions, the behavior is undefined.
1052 default:
1053 break;
1054 }
1055 }
1056 assert(SpecifierLen <= Size && "no underflow");
1057 Size -= SpecifierLen;
1058 return true;
1059 }
1060
1061 size_t getSizeLowerBound() const { return Size; }
1062 bool isKernelCompatible() const { return IsKernelCompatible; }
1063
1064private:
1065 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
1066 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
1067 size_t FieldWidth = 0;
1069 FieldWidth = FW.getConstantAmount();
1070 return FieldWidth;
1071 }
1072
1073 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
1074 const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
1075 size_t Precision = 0;
1076
1077 // See man 3 printf for default precision value based on the specifier.
1078 switch (FW.getHowSpecified()) {
1080 switch (FS.getConversionSpecifier().getKind()) {
1081 default:
1082 break;
1086 Precision = 1;
1087 break;
1094 Precision = 1;
1095 break;
1102 Precision = 6;
1103 break;
1105 Precision = 1;
1106 break;
1107 }
1108 break;
1110 Precision = FW.getConstantAmount();
1111 break;
1112 default:
1113 break;
1114 }
1115 return Precision;
1116 }
1117};
1118
1119} // namespace
1120
1121static bool ProcessFormatStringLiteral(const Expr *FormatExpr,
1122 StringRef &FormatStrRef, size_t &StrLen,
1123 ASTContext &Context) {
1124 if (const auto *Format = dyn_cast<StringLiteral>(FormatExpr);
1125 Format && (Format->isOrdinary() || Format->isUTF8())) {
1126 FormatStrRef = Format->getString();
1127 const ConstantArrayType *T =
1128 Context.getAsConstantArrayType(Format->getType());
1129 assert(T && "String literal not of constant array type!");
1130 size_t TypeSize = T->getZExtSize();
1131 // In case there's a null byte somewhere.
1132 StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
1133 return true;
1134 }
1135 return false;
1136}
1137
1138void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
1139 CallExpr *TheCall) {
1140 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
1142 return;
1143
1144 bool UseDABAttr = false;
1145 const FunctionDecl *UseDecl = FD;
1146
1147 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>();
1148 if (DABAttr) {
1149 UseDecl = DABAttr->getFunction();
1150 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
1151 UseDABAttr = true;
1152 }
1153
1154 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true);
1155
1156 if (!BuiltinID)
1157 return;
1158
1159 const TargetInfo &TI = getASTContext().getTargetInfo();
1160 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
1161
1162 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> {
1163 // If we refer to a diagnose_as_builtin attribute, we need to change the
1164 // argument index to refer to the arguments of the called function. Unless
1165 // the index is out of bounds, which presumably means it's a variadic
1166 // function.
1167 if (!UseDABAttr)
1168 return Index;
1169 unsigned DABIndices = DABAttr->argIndices_size();
1170 unsigned NewIndex = Index < DABIndices
1171 ? DABAttr->argIndices_begin()[Index]
1172 : Index - DABIndices + FD->getNumParams();
1173 if (NewIndex >= TheCall->getNumArgs())
1174 return std::nullopt;
1175 return NewIndex;
1176 };
1177
1178 auto ComputeExplicitObjectSizeArgument =
1179 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1180 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1181 if (!IndexOptional)
1182 return std::nullopt;
1183 unsigned NewIndex = *IndexOptional;
1185 Expr *SizeArg = TheCall->getArg(NewIndex);
1186 if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
1187 return std::nullopt;
1188 llvm::APSInt Integer = Result.Val.getInt();
1189 Integer.setIsUnsigned(true);
1190 return Integer;
1191 };
1192
1193 auto ComputeSizeArgument =
1194 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1195 // If the parameter has a pass_object_size attribute, then we should use its
1196 // (potentially) more strict checking mode. Otherwise, conservatively assume
1197 // type 0.
1198 int BOSType = 0;
1199 // This check can fail for variadic functions.
1200 if (Index < FD->getNumParams()) {
1201 if (const auto *POS =
1202 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
1203 BOSType = POS->getType();
1204 }
1205
1206 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1207 if (!IndexOptional)
1208 return std::nullopt;
1209 unsigned NewIndex = *IndexOptional;
1210
1211 if (NewIndex >= TheCall->getNumArgs())
1212 return std::nullopt;
1213
1214 const Expr *ObjArg = TheCall->getArg(NewIndex);
1216 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
1217 return std::nullopt;
1218
1219 // Get the object size in the target's size_t width.
1220 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
1221 };
1222
1223 auto ComputeStrLenArgument =
1224 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1225 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1226 if (!IndexOptional)
1227 return std::nullopt;
1228 unsigned NewIndex = *IndexOptional;
1229
1230 const Expr *ObjArg = TheCall->getArg(NewIndex);
1232 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
1233 return std::nullopt;
1234 // Add 1 for null byte.
1235 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth);
1236 };
1237
1238 std::optional<llvm::APSInt> SourceSize;
1239 std::optional<llvm::APSInt> DestinationSize;
1240 unsigned DiagID = 0;
1241 bool IsChkVariant = false;
1242
1243 auto GetFunctionName = [&]() {
1244 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
1245 // Skim off the details of whichever builtin was called to produce a better
1246 // diagnostic, as it's unlikely that the user wrote the __builtin
1247 // explicitly.
1248 if (IsChkVariant) {
1249 FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
1250 FunctionName = FunctionName.drop_back(std::strlen("_chk"));
1251 } else {
1252 FunctionName.consume_front("__builtin_");
1253 }
1254 return FunctionName;
1255 };
1256
1257 switch (BuiltinID) {
1258 default:
1259 return;
1260 case Builtin::BI__builtin_strcpy:
1261 case Builtin::BIstrcpy: {
1262 DiagID = diag::warn_fortify_strlen_overflow;
1263 SourceSize = ComputeStrLenArgument(1);
1264 DestinationSize = ComputeSizeArgument(0);
1265 break;
1266 }
1267
1268 case Builtin::BI__builtin___strcpy_chk: {
1269 DiagID = diag::warn_fortify_strlen_overflow;
1270 SourceSize = ComputeStrLenArgument(1);
1271 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1272 IsChkVariant = true;
1273 break;
1274 }
1275
1276 case Builtin::BIscanf:
1277 case Builtin::BIfscanf:
1278 case Builtin::BIsscanf: {
1279 unsigned FormatIndex = 1;
1280 unsigned DataIndex = 2;
1281 if (BuiltinID == Builtin::BIscanf) {
1282 FormatIndex = 0;
1283 DataIndex = 1;
1284 }
1285
1286 const auto *FormatExpr =
1287 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
1288
1289 StringRef FormatStrRef;
1290 size_t StrLen;
1291 if (!ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context))
1292 return;
1293
1294 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
1295 unsigned SourceSize) {
1296 DiagID = diag::warn_fortify_scanf_overflow;
1297 unsigned Index = ArgIndex + DataIndex;
1298 StringRef FunctionName = GetFunctionName();
1299 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall,
1300 PDiag(DiagID) << FunctionName << (Index + 1)
1301 << DestSize << SourceSize);
1302 };
1303
1304 auto ShiftedComputeSizeArgument = [&](unsigned Index) {
1305 return ComputeSizeArgument(Index + DataIndex);
1306 };
1307 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose);
1308 const char *FormatBytes = FormatStrRef.data();
1310 FormatBytes + StrLen, getLangOpts(),
1312
1313 // Unlike the other cases, in this one we have already issued the diagnostic
1314 // here, so no need to continue (because unlike the other cases, here the
1315 // diagnostic refers to the argument number).
1316 return;
1317 }
1318
1319 case Builtin::BIsprintf:
1320 case Builtin::BI__builtin___sprintf_chk: {
1321 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
1322 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
1323
1324 StringRef FormatStrRef;
1325 size_t StrLen;
1326 if (ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1327 EstimateSizeFormatHandler H(FormatStrRef);
1328 const char *FormatBytes = FormatStrRef.data();
1330 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
1331 Context.getTargetInfo(), false)) {
1332 DiagID = H.isKernelCompatible()
1333 ? diag::warn_format_overflow
1334 : diag::warn_format_overflow_non_kprintf;
1335 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
1336 .extOrTrunc(SizeTypeWidth);
1337 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
1338 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1339 IsChkVariant = true;
1340 } else {
1341 DestinationSize = ComputeSizeArgument(0);
1342 }
1343 break;
1344 }
1345 }
1346 return;
1347 }
1348 case Builtin::BI__builtin___memcpy_chk:
1349 case Builtin::BI__builtin___memmove_chk:
1350 case Builtin::BI__builtin___memset_chk:
1351 case Builtin::BI__builtin___strlcat_chk:
1352 case Builtin::BI__builtin___strlcpy_chk:
1353 case Builtin::BI__builtin___strncat_chk:
1354 case Builtin::BI__builtin___strncpy_chk:
1355 case Builtin::BI__builtin___stpncpy_chk:
1356 case Builtin::BI__builtin___memccpy_chk:
1357 case Builtin::BI__builtin___mempcpy_chk: {
1358 DiagID = diag::warn_builtin_chk_overflow;
1359 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
1360 DestinationSize =
1361 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1362 IsChkVariant = true;
1363 break;
1364 }
1365
1366 case Builtin::BI__builtin___snprintf_chk:
1367 case Builtin::BI__builtin___vsnprintf_chk: {
1368 DiagID = diag::warn_builtin_chk_overflow;
1369 SourceSize = ComputeExplicitObjectSizeArgument(1);
1370 DestinationSize = ComputeExplicitObjectSizeArgument(3);
1371 IsChkVariant = true;
1372 break;
1373 }
1374
1375 case Builtin::BIstrncat:
1376 case Builtin::BI__builtin_strncat:
1377 case Builtin::BIstrncpy:
1378 case Builtin::BI__builtin_strncpy:
1379 case Builtin::BIstpncpy:
1380 case Builtin::BI__builtin_stpncpy: {
1381 // Whether these functions overflow depends on the runtime strlen of the
1382 // string, not just the buffer size, so emitting the "always overflow"
1383 // diagnostic isn't quite right. We should still diagnose passing a buffer
1384 // size larger than the destination buffer though; this is a runtime abort
1385 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
1386 DiagID = diag::warn_fortify_source_size_mismatch;
1387 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1388 DestinationSize = ComputeSizeArgument(0);
1389 break;
1390 }
1391
1392 case Builtin::BImemcpy:
1393 case Builtin::BI__builtin_memcpy:
1394 case Builtin::BImemmove:
1395 case Builtin::BI__builtin_memmove:
1396 case Builtin::BImemset:
1397 case Builtin::BI__builtin_memset:
1398 case Builtin::BImempcpy:
1399 case Builtin::BI__builtin_mempcpy: {
1400 DiagID = diag::warn_fortify_source_overflow;
1401 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1402 DestinationSize = ComputeSizeArgument(0);
1403 break;
1404 }
1405 case Builtin::BIsnprintf:
1406 case Builtin::BI__builtin_snprintf:
1407 case Builtin::BIvsnprintf:
1408 case Builtin::BI__builtin_vsnprintf: {
1409 DiagID = diag::warn_fortify_source_size_mismatch;
1410 SourceSize = ComputeExplicitObjectSizeArgument(1);
1411 const auto *FormatExpr = TheCall->getArg(2)->IgnoreParenImpCasts();
1412 StringRef FormatStrRef;
1413 size_t StrLen;
1414 if (SourceSize &&
1415 ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1416 EstimateSizeFormatHandler H(FormatStrRef);
1417 const char *FormatBytes = FormatStrRef.data();
1419 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
1420 Context.getTargetInfo(), /*isFreeBSDKPrintf=*/false)) {
1421 llvm::APSInt FormatSize =
1422 llvm::APSInt::getUnsigned(H.getSizeLowerBound())
1423 .extOrTrunc(SizeTypeWidth);
1424 if (FormatSize > *SourceSize && *SourceSize != 0) {
1425 unsigned TruncationDiagID =
1426 H.isKernelCompatible() ? diag::warn_format_truncation
1427 : diag::warn_format_truncation_non_kprintf;
1428 SmallString<16> SpecifiedSizeStr;
1429 SmallString<16> FormatSizeStr;
1430 SourceSize->toString(SpecifiedSizeStr, /*Radix=*/10);
1431 FormatSize.toString(FormatSizeStr, /*Radix=*/10);
1432 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1433 PDiag(TruncationDiagID)
1434 << GetFunctionName() << SpecifiedSizeStr
1435 << FormatSizeStr);
1436 }
1437 }
1438 }
1439 DestinationSize = ComputeSizeArgument(0);
1440 }
1441 }
1442
1443 if (!SourceSize || !DestinationSize ||
1444 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0)
1445 return;
1446
1447 StringRef FunctionName = GetFunctionName();
1448
1449 SmallString<16> DestinationStr;
1450 SmallString<16> SourceStr;
1451 DestinationSize->toString(DestinationStr, /*Radix=*/10);
1452 SourceSize->toString(SourceStr, /*Radix=*/10);
1453 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1454 PDiag(DiagID)
1455 << FunctionName << DestinationStr << SourceStr);
1456}
1457
1458static bool BuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
1459 Scope::ScopeFlags NeededScopeFlags,
1460 unsigned DiagID) {
1461 // Scopes aren't available during instantiation. Fortunately, builtin
1462 // functions cannot be template args so they cannot be formed through template
1463 // instantiation. Therefore checking once during the parse is sufficient.
1464 if (SemaRef.inTemplateInstantiation())
1465 return false;
1466
1467 Scope *S = SemaRef.getCurScope();
1468 while (S && !S->isSEHExceptScope())
1469 S = S->getParent();
1470 if (!S || !(S->getFlags() & NeededScopeFlags)) {
1471 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
1472 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
1473 << DRE->getDecl()->getIdentifier();
1474 return true;
1475 }
1476
1477 return false;
1478}
1479
1480// In OpenCL, __builtin_alloca_* should return a pointer to address space
1481// that corresponds to the stack address space i.e private address space.
1482static void builtinAllocaAddrSpace(Sema &S, CallExpr *TheCall) {
1483 QualType RT = TheCall->getType();
1484 assert((RT->isPointerType() && !(RT->getPointeeType().hasAddressSpace())) &&
1485 "__builtin_alloca has invalid address space");
1486
1487 RT = RT->getPointeeType();
1489 TheCall->setType(S.Context.getPointerType(RT));
1490}
1491
1492namespace {
1493enum PointerAuthOpKind {
1494 PAO_Strip,
1495 PAO_Sign,
1496 PAO_Auth,
1497 PAO_SignGeneric,
1498 PAO_Discriminator,
1499 PAO_BlendPointer,
1500 PAO_BlendInteger
1501};
1502}
1503
1505 if (getLangOpts().PointerAuthIntrinsics)
1506 return false;
1507
1508 Diag(Loc, diag::err_ptrauth_disabled) << Range;
1509 return true;
1510}
1511
1514}
1515
1516static bool checkPointerAuthKey(Sema &S, Expr *&Arg) {
1517 // Convert it to type 'int'.
1518 if (convertArgumentToType(S, Arg, S.Context.IntTy))
1519 return true;
1520
1521 // Value-dependent expressions are okay; wait for template instantiation.
1522 if (Arg->isValueDependent())
1523 return false;
1524
1525 unsigned KeyValue;
1526 return S.checkConstantPointerAuthKey(Arg, KeyValue);
1527}
1528
1530 // Attempt to constant-evaluate the expression.
1531 std::optional<llvm::APSInt> KeyValue = Arg->getIntegerConstantExpr(Context);
1532 if (!KeyValue) {
1533 Diag(Arg->getExprLoc(), diag::err_expr_not_ice)
1534 << 0 << Arg->getSourceRange();
1535 return true;
1536 }
1537
1538 // Ask the target to validate the key parameter.
1539 if (!Context.getTargetInfo().validatePointerAuthKey(*KeyValue)) {
1541 {
1542 llvm::raw_svector_ostream Str(Value);
1543 Str << *KeyValue;
1544 }
1545
1546 Diag(Arg->getExprLoc(), diag::err_ptrauth_invalid_key)
1547 << Value << Arg->getSourceRange();
1548 return true;
1549 }
1550
1551 Result = KeyValue->getZExtValue();
1552 return false;
1553}
1554
1555static std::pair<const ValueDecl *, CharUnits>
1557 // Must evaluate as a pointer.
1559 if (!E->EvaluateAsRValue(Result, S.Context) || !Result.Val.isLValue())
1560 return {nullptr, CharUnits()};
1561
1562 const auto *BaseDecl =
1563 Result.Val.getLValueBase().dyn_cast<const ValueDecl *>();
1564 if (!BaseDecl)
1565 return {nullptr, CharUnits()};
1566
1567 return {BaseDecl, Result.Val.getLValueOffset()};
1568}
1569
1570static bool checkPointerAuthValue(Sema &S, Expr *&Arg, PointerAuthOpKind OpKind,
1571 bool RequireConstant = false) {
1572 if (Arg->hasPlaceholderType()) {
1574 if (R.isInvalid())
1575 return true;
1576 Arg = R.get();
1577 }
1578
1579 auto AllowsPointer = [](PointerAuthOpKind OpKind) {
1580 return OpKind != PAO_BlendInteger;
1581 };
1582 auto AllowsInteger = [](PointerAuthOpKind OpKind) {
1583 return OpKind == PAO_Discriminator || OpKind == PAO_BlendInteger ||
1584 OpKind == PAO_SignGeneric;
1585 };
1586
1587 // Require the value to have the right range of type.
1588 QualType ExpectedTy;
1589 if (AllowsPointer(OpKind) && Arg->getType()->isPointerType()) {
1590 ExpectedTy = Arg->getType().getUnqualifiedType();
1591 } else if (AllowsPointer(OpKind) && Arg->getType()->isNullPtrType()) {
1592 ExpectedTy = S.Context.VoidPtrTy;
1593 } else if (AllowsInteger(OpKind) &&
1595 ExpectedTy = S.Context.getUIntPtrType();
1596
1597 } else {
1598 // Diagnose the failures.
1599 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_value_bad_type)
1600 << unsigned(OpKind == PAO_Discriminator ? 1
1601 : OpKind == PAO_BlendPointer ? 2
1602 : OpKind == PAO_BlendInteger ? 3
1603 : 0)
1604 << unsigned(AllowsInteger(OpKind) ? (AllowsPointer(OpKind) ? 2 : 1) : 0)
1605 << Arg->getType() << Arg->getSourceRange();
1606 return true;
1607 }
1608
1609 // Convert to that type. This should just be an lvalue-to-rvalue
1610 // conversion.
1611 if (convertArgumentToType(S, Arg, ExpectedTy))
1612 return true;
1613
1614 if (!RequireConstant) {
1615 // Warn about null pointers for non-generic sign and auth operations.
1616 if ((OpKind == PAO_Sign || OpKind == PAO_Auth) &&
1618 S.Diag(Arg->getExprLoc(), OpKind == PAO_Sign
1619 ? diag::warn_ptrauth_sign_null_pointer
1620 : diag::warn_ptrauth_auth_null_pointer)
1621 << Arg->getSourceRange();
1622 }
1623
1624 return false;
1625 }
1626
1627 // Perform special checking on the arguments to ptrauth_sign_constant.
1628
1629 // The main argument.
1630 if (OpKind == PAO_Sign) {
1631 // Require the value we're signing to have a special form.
1632 auto [BaseDecl, Offset] = findConstantBaseAndOffset(S, Arg);
1633 bool Invalid;
1634
1635 // Must be rooted in a declaration reference.
1636 if (!BaseDecl)
1637 Invalid = true;
1638
1639 // If it's a function declaration, we can't have an offset.
1640 else if (isa<FunctionDecl>(BaseDecl))
1641 Invalid = !Offset.isZero();
1642
1643 // Otherwise we're fine.
1644 else
1645 Invalid = false;
1646
1647 if (Invalid)
1648 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_bad_constant_pointer);
1649 return Invalid;
1650 }
1651
1652 // The discriminator argument.
1653 assert(OpKind == PAO_Discriminator);
1654
1655 // Must be a pointer or integer or blend thereof.
1656 Expr *Pointer = nullptr;
1657 Expr *Integer = nullptr;
1658 if (auto *Call = dyn_cast<CallExpr>(Arg->IgnoreParens())) {
1659 if (Call->getBuiltinCallee() ==
1660 Builtin::BI__builtin_ptrauth_blend_discriminator) {
1661 Pointer = Call->getArg(0);
1662 Integer = Call->getArg(1);
1663 }
1664 }
1665 if (!Pointer && !Integer) {
1666 if (Arg->getType()->isPointerType())
1667 Pointer = Arg;
1668 else
1669 Integer = Arg;
1670 }
1671
1672 // Check the pointer.
1673 bool Invalid = false;
1674 if (Pointer) {
1675 assert(Pointer->getType()->isPointerType());
1676
1677 // TODO: if we're initializing a global, check that the address is
1678 // somehow related to what we're initializing. This probably will
1679 // never really be feasible and we'll have to catch it at link-time.
1680 auto [BaseDecl, Offset] = findConstantBaseAndOffset(S, Pointer);
1681 if (!BaseDecl || !isa<VarDecl>(BaseDecl))
1682 Invalid = true;
1683 }
1684
1685 // Check the integer.
1686 if (Integer) {
1687 assert(Integer->getType()->isIntegerType());
1688 if (!Integer->isEvaluatable(S.Context))
1689 Invalid = true;
1690 }
1691
1692 if (Invalid)
1693 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_bad_constant_discriminator);
1694 return Invalid;
1695}
1696
1698 if (S.checkArgCount(Call, 2))
1699 return ExprError();
1701 return ExprError();
1702 if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_Strip) ||
1703 checkPointerAuthKey(S, Call->getArgs()[1]))
1704 return ExprError();
1705
1706 Call->setType(Call->getArgs()[0]->getType());
1707 return Call;
1708}
1709
1711 if (S.checkArgCount(Call, 2))
1712 return ExprError();
1714 return ExprError();
1715 if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_BlendPointer) ||
1716 checkPointerAuthValue(S, Call->getArgs()[1], PAO_BlendInteger))
1717 return ExprError();
1718
1719 Call->setType(S.Context.getUIntPtrType());
1720 return Call;
1721}
1722
1724 if (S.checkArgCount(Call, 2))
1725 return ExprError();
1727 return ExprError();
1728 if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_SignGeneric) ||
1729 checkPointerAuthValue(S, Call->getArgs()[1], PAO_Discriminator))
1730 return ExprError();
1731
1732 Call->setType(S.Context.getUIntPtrType());
1733 return Call;
1734}
1735
1737 PointerAuthOpKind OpKind,
1738 bool RequireConstant) {
1739 if (S.checkArgCount(Call, 3))
1740 return ExprError();
1742 return ExprError();
1743 if (checkPointerAuthValue(S, Call->getArgs()[0], OpKind, RequireConstant) ||
1744 checkPointerAuthKey(S, Call->getArgs()[1]) ||
1745 checkPointerAuthValue(S, Call->getArgs()[2], PAO_Discriminator,
1746 RequireConstant))
1747 return ExprError();
1748
1749 Call->setType(Call->getArgs()[0]->getType());
1750 return Call;
1751}
1752
1754 if (S.checkArgCount(Call, 5))
1755 return ExprError();
1757 return ExprError();
1758 if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_Auth) ||
1759 checkPointerAuthKey(S, Call->getArgs()[1]) ||
1760 checkPointerAuthValue(S, Call->getArgs()[2], PAO_Discriminator) ||
1761 checkPointerAuthKey(S, Call->getArgs()[3]) ||
1762 checkPointerAuthValue(S, Call->getArgs()[4], PAO_Discriminator))
1763 return ExprError();
1764
1765 Call->setType(Call->getArgs()[0]->getType());
1766 return Call;
1767}
1768
1771 return ExprError();
1772
1773 // We've already performed normal call type-checking.
1774 const Expr *Arg = Call->getArg(0)->IgnoreParenImpCasts();
1775
1776 // Operand must be an ordinary or UTF-8 string literal.
1777 const auto *Literal = dyn_cast<StringLiteral>(Arg);
1778 if (!Literal || Literal->getCharByteWidth() != 1) {
1779 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_string_not_literal)
1780 << (Literal ? 1 : 0) << Arg->getSourceRange();
1781 return ExprError();
1782 }
1783
1784 return Call;
1785}
1786
1788 if (S.checkArgCount(TheCall, 1))
1789 return ExprError();
1790
1791 // Compute __builtin_launder's parameter type from the argument.
1792 // The parameter type is:
1793 // * The type of the argument if it's not an array or function type,
1794 // Otherwise,
1795 // * The decayed argument type.
1796 QualType ParamTy = [&]() {
1797 QualType ArgTy = TheCall->getArg(0)->getType();
1798 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1799 return S.Context.getPointerType(Ty->getElementType());
1800 if (ArgTy->isFunctionType()) {
1801 return S.Context.getPointerType(ArgTy);
1802 }
1803 return ArgTy;
1804 }();
1805
1806 TheCall->setType(ParamTy);
1807
1808 auto DiagSelect = [&]() -> std::optional<unsigned> {
1809 if (!ParamTy->isPointerType())
1810 return 0;
1811 if (ParamTy->isFunctionPointerType())
1812 return 1;
1813 if (ParamTy->isVoidPointerType())
1814 return 2;
1815 return std::optional<unsigned>{};
1816 }();
1817 if (DiagSelect) {
1818 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
1819 << *DiagSelect << TheCall->getSourceRange();
1820 return ExprError();
1821 }
1822
1823 // We either have an incomplete class type, or we have a class template
1824 // whose instantiation has not been forced. Example:
1825 //
1826 // template <class T> struct Foo { T value; };
1827 // Foo<int> *p = nullptr;
1828 // auto *d = __builtin_launder(p);
1829 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
1830 diag::err_incomplete_type))
1831 return ExprError();
1832
1833 assert(ParamTy->getPointeeType()->isObjectType() &&
1834 "Unhandled non-object pointer case");
1835
1836 InitializedEntity Entity =
1838 ExprResult Arg =
1839 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
1840 if (Arg.isInvalid())
1841 return ExprError();
1842 TheCall->setArg(0, Arg.get());
1843
1844 return TheCall;
1845}
1846
1847// Emit an error and return true if the current object format type is in the
1848// list of unsupported types.
1850 Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1851 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) {
1852 llvm::Triple::ObjectFormatType CurObjFormat =
1853 S.getASTContext().getTargetInfo().getTriple().getObjectFormat();
1854 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) {
1855 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1856 << TheCall->getSourceRange();
1857 return true;
1858 }
1859 return false;
1860}
1861
1862// Emit an error and return true if the current architecture is not in the list
1863// of supported architectures.
1864static bool
1866 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
1867 llvm::Triple::ArchType CurArch =
1868 S.getASTContext().getTargetInfo().getTriple().getArch();
1869 if (llvm::is_contained(SupportedArchs, CurArch))
1870 return false;
1871 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1872 << TheCall->getSourceRange();
1873 return true;
1874}
1875
1876static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
1877 SourceLocation CallSiteLoc);
1878
1879bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
1880 CallExpr *TheCall) {
1881 switch (TI.getTriple().getArch()) {
1882 default:
1883 // Some builtins don't require additional checking, so just consider these
1884 // acceptable.
1885 return false;
1886 case llvm::Triple::arm:
1887 case llvm::Triple::armeb:
1888 case llvm::Triple::thumb:
1889 case llvm::Triple::thumbeb:
1890 return ARM().CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
1891 case llvm::Triple::aarch64:
1892 case llvm::Triple::aarch64_32:
1893 case llvm::Triple::aarch64_be:
1894 return ARM().CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
1895 case llvm::Triple::bpfeb:
1896 case llvm::Triple::bpfel:
1897 return BPF().CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
1898 case llvm::Triple::hexagon:
1899 return Hexagon().CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
1900 case llvm::Triple::mips:
1901 case llvm::Triple::mipsel:
1902 case llvm::Triple::mips64:
1903 case llvm::Triple::mips64el:
1904 return MIPS().CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
1905 case llvm::Triple::systemz:
1906 return SystemZ().CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
1907 case llvm::Triple::x86:
1908 case llvm::Triple::x86_64:
1909 return X86().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
1910 case llvm::Triple::ppc:
1911 case llvm::Triple::ppcle:
1912 case llvm::Triple::ppc64:
1913 case llvm::Triple::ppc64le:
1914 return PPC().CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
1915 case llvm::Triple::amdgcn:
1916 return AMDGPU().CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
1917 case llvm::Triple::riscv32:
1918 case llvm::Triple::riscv64:
1919 return RISCV().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
1920 case llvm::Triple::loongarch32:
1921 case llvm::Triple::loongarch64:
1922 return LoongArch().CheckLoongArchBuiltinFunctionCall(TI, BuiltinID,
1923 TheCall);
1924 case llvm::Triple::wasm32:
1925 case llvm::Triple::wasm64:
1926 return Wasm().CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
1927 case llvm::Triple::nvptx:
1928 case llvm::Triple::nvptx64:
1929 return NVPTX().CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
1930 }
1931}
1932
1933// Check if \p Ty is a valid type for the elementwise math builtins. If it is
1934// not a valid type, emit an error message and return true. Otherwise return
1935// false.
1937 QualType ArgTy, int ArgIndex) {
1938 if (!ArgTy->getAs<VectorType>() &&
1940 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
1941 << ArgIndex << /* vector, integer or float ty*/ 0 << ArgTy;
1942 }
1943
1944 return false;
1945}
1946
1948 QualType ArgTy, int ArgIndex) {
1949 QualType EltTy = ArgTy;
1950 if (auto *VecTy = EltTy->getAs<VectorType>())
1951 EltTy = VecTy->getElementType();
1952
1953 if (!EltTy->isRealFloatingType()) {
1954 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
1955 << ArgIndex << /* vector or float ty*/ 5 << ArgTy;
1956 }
1957
1958 return false;
1959}
1960
1961/// BuiltinCpu{Supports|Is} - Handle __builtin_cpu_{supports|is}(char *).
1962/// This checks that the target supports the builtin and that the string
1963/// argument is constant and valid.
1964static bool BuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
1965 const TargetInfo *AuxTI, unsigned BuiltinID) {
1966 assert((BuiltinID == Builtin::BI__builtin_cpu_supports ||
1967 BuiltinID == Builtin::BI__builtin_cpu_is) &&
1968 "Expecting __builtin_cpu_...");
1969
1970 bool IsCPUSupports = BuiltinID == Builtin::BI__builtin_cpu_supports;
1971 const TargetInfo *TheTI = &TI;
1972 auto SupportsBI = [=](const TargetInfo *TInfo) {
1973 return TInfo && ((IsCPUSupports && TInfo->supportsCpuSupports()) ||
1974 (!IsCPUSupports && TInfo->supportsCpuIs()));
1975 };
1976 if (!SupportsBI(&TI) && SupportsBI(AuxTI))
1977 TheTI = AuxTI;
1978
1979 if ((!IsCPUSupports && !TheTI->supportsCpuIs()) ||
1980 (IsCPUSupports && !TheTI->supportsCpuSupports()))
1981 return S.Diag(TheCall->getBeginLoc(),
1982 TI.getTriple().isOSAIX()
1983 ? diag::err_builtin_aix_os_unsupported
1984 : diag::err_builtin_target_unsupported)
1985 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
1986
1987 Expr *Arg = TheCall->getArg(0)->IgnoreParenImpCasts();
1988 // Check if the argument is a string literal.
1989 if (!isa<StringLiteral>(Arg))
1990 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
1991 << Arg->getSourceRange();
1992
1993 // Check the contents of the string.
1994 StringRef Feature = cast<StringLiteral>(Arg)->getString();
1995 if (IsCPUSupports && !TheTI->validateCpuSupports(Feature)) {
1996 S.Diag(TheCall->getBeginLoc(), diag::warn_invalid_cpu_supports)
1997 << Arg->getSourceRange();
1998 return false;
1999 }
2000 if (!IsCPUSupports && !TheTI->validateCpuIs(Feature))
2001 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
2002 << Arg->getSourceRange();
2003 return false;
2004}
2005
2006/// Checks that __builtin_popcountg was called with a single argument, which is
2007/// an unsigned integer.
2008static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
2009 if (S.checkArgCount(TheCall, 1))
2010 return true;
2011
2012 ExprResult ArgRes = S.DefaultLvalueConversion(TheCall->getArg(0));
2013 if (ArgRes.isInvalid())
2014 return true;
2015
2016 Expr *Arg = ArgRes.get();
2017 TheCall->setArg(0, Arg);
2018
2019 QualType ArgTy = Arg->getType();
2020
2021 if (!ArgTy->isUnsignedIntegerType()) {
2022 S.Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2023 << 1 << /*unsigned integer ty*/ 7 << ArgTy;
2024 return true;
2025 }
2026 return false;
2027}
2028
2029/// Checks that __builtin_{clzg,ctzg} was called with a first argument, which is
2030/// an unsigned integer, and an optional second argument, which is promoted to
2031/// an 'int'.
2032static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
2033 if (S.checkArgCountRange(TheCall, 1, 2))
2034 return true;
2035
2036 ExprResult Arg0Res = S.DefaultLvalueConversion(TheCall->getArg(0));
2037 if (Arg0Res.isInvalid())
2038 return true;
2039
2040 Expr *Arg0 = Arg0Res.get();
2041 TheCall->setArg(0, Arg0);
2042
2043 QualType Arg0Ty = Arg0->getType();
2044
2045 if (!Arg0Ty->isUnsignedIntegerType()) {
2046 S.Diag(Arg0->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2047 << 1 << /*unsigned integer ty*/ 7 << Arg0Ty;
2048 return true;
2049 }
2050
2051 if (TheCall->getNumArgs() > 1) {
2052 ExprResult Arg1Res = S.UsualUnaryConversions(TheCall->getArg(1));
2053 if (Arg1Res.isInvalid())
2054 return true;
2055
2056 Expr *Arg1 = Arg1Res.get();
2057 TheCall->setArg(1, Arg1);
2058
2059 QualType Arg1Ty = Arg1->getType();
2060
2061 if (!Arg1Ty->isSpecificBuiltinType(BuiltinType::Int)) {
2062 S.Diag(Arg1->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2063 << 2 << /*'int' ty*/ 8 << Arg1Ty;
2064 return true;
2065 }
2066 }
2067
2068 return false;
2069}
2070
2072Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
2073 CallExpr *TheCall) {
2074 ExprResult TheCallResult(TheCall);
2075
2076 // Find out if any arguments are required to be integer constant expressions.
2077 unsigned ICEArguments = 0;
2079 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
2080 if (Error != ASTContext::GE_None)
2081 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
2082
2083 // If any arguments are required to be ICE's, check and diagnose.
2084 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
2085 // Skip arguments not required to be ICE's.
2086 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
2087
2088 llvm::APSInt Result;
2089 // If we don't have enough arguments, continue so we can issue better
2090 // diagnostic in checkArgCount(...)
2091 if (ArgNo < TheCall->getNumArgs() &&
2092 BuiltinConstantArg(TheCall, ArgNo, Result))
2093 return true;
2094 ICEArguments &= ~(1 << ArgNo);
2095 }
2096
2097 FPOptions FPO;
2098 switch (BuiltinID) {
2099 case Builtin::BI__builtin_cpu_supports:
2100 case Builtin::BI__builtin_cpu_is:
2101 if (BuiltinCpu(*this, Context.getTargetInfo(), TheCall,
2102 Context.getAuxTargetInfo(), BuiltinID))
2103 return ExprError();
2104 break;
2105 case Builtin::BI__builtin_cpu_init:
2107 Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2108 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2109 return ExprError();
2110 }
2111 break;
2112 case Builtin::BI__builtin___CFStringMakeConstantString:
2113 // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
2114 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
2116 *this, BuiltinID, TheCall,
2117 {llvm::Triple::GOFF, llvm::Triple::XCOFF}))
2118 return ExprError();
2119 assert(TheCall->getNumArgs() == 1 &&
2120 "Wrong # arguments to builtin CFStringMakeConstantString");
2121 if (ObjC().CheckObjCString(TheCall->getArg(0)))
2122 return ExprError();
2123 break;
2124 case Builtin::BI__builtin_ms_va_start:
2125 case Builtin::BI__builtin_stdarg_start:
2126 case Builtin::BI__builtin_va_start:
2127 if (BuiltinVAStart(BuiltinID, TheCall))
2128 return ExprError();
2129 break;
2130 case Builtin::BI__va_start: {
2131 switch (Context.getTargetInfo().getTriple().getArch()) {
2132 case llvm::Triple::aarch64:
2133 case llvm::Triple::arm:
2134 case llvm::Triple::thumb:
2135 if (BuiltinVAStartARMMicrosoft(TheCall))
2136 return ExprError();
2137 break;
2138 default:
2139 if (BuiltinVAStart(BuiltinID, TheCall))
2140 return ExprError();
2141 break;
2142 }
2143 break;
2144 }
2145
2146 // The acquire, release, and no fence variants are ARM and AArch64 only.
2147 case Builtin::BI_interlockedbittestandset_acq:
2148 case Builtin::BI_interlockedbittestandset_rel:
2149 case Builtin::BI_interlockedbittestandset_nf:
2150 case Builtin::BI_interlockedbittestandreset_acq:
2151 case Builtin::BI_interlockedbittestandreset_rel:
2152 case Builtin::BI_interlockedbittestandreset_nf:
2154 *this, TheCall,
2155 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
2156 return ExprError();
2157 break;
2158
2159 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
2160 case Builtin::BI_bittest64:
2161 case Builtin::BI_bittestandcomplement64:
2162 case Builtin::BI_bittestandreset64:
2163 case Builtin::BI_bittestandset64:
2164 case Builtin::BI_interlockedbittestandreset64:
2165 case Builtin::BI_interlockedbittestandset64:
2167 *this, TheCall,
2168 {llvm::Triple::x86_64, llvm::Triple::arm, llvm::Triple::thumb,
2169 llvm::Triple::aarch64, llvm::Triple::amdgcn}))
2170 return ExprError();
2171 break;
2172
2173 case Builtin::BI__builtin_set_flt_rounds:
2175 *this, TheCall,
2176 {llvm::Triple::x86, llvm::Triple::x86_64, llvm::Triple::arm,
2177 llvm::Triple::thumb, llvm::Triple::aarch64, llvm::Triple::amdgcn}))
2178 return ExprError();
2179 break;
2180
2181 case Builtin::BI__builtin_isgreater:
2182 case Builtin::BI__builtin_isgreaterequal:
2183 case Builtin::BI__builtin_isless:
2184 case Builtin::BI__builtin_islessequal:
2185 case Builtin::BI__builtin_islessgreater:
2186 case Builtin::BI__builtin_isunordered:
2187 if (BuiltinUnorderedCompare(TheCall, BuiltinID))
2188 return ExprError();
2189 break;
2190 case Builtin::BI__builtin_fpclassify:
2191 if (BuiltinFPClassification(TheCall, 6, BuiltinID))
2192 return ExprError();
2193 break;
2194 case Builtin::BI__builtin_isfpclass:
2195 if (BuiltinFPClassification(TheCall, 2, BuiltinID))
2196 return ExprError();
2197 break;
2198 case Builtin::BI__builtin_isfinite:
2199 case Builtin::BI__builtin_isinf:
2200 case Builtin::BI__builtin_isinf_sign:
2201 case Builtin::BI__builtin_isnan:
2202 case Builtin::BI__builtin_issignaling:
2203 case Builtin::BI__builtin_isnormal:
2204 case Builtin::BI__builtin_issubnormal:
2205 case Builtin::BI__builtin_iszero:
2206 case Builtin::BI__builtin_signbit:
2207 case Builtin::BI__builtin_signbitf:
2208 case Builtin::BI__builtin_signbitl:
2209 if (BuiltinFPClassification(TheCall, 1, BuiltinID))
2210 return ExprError();
2211 break;
2212 case Builtin::BI__builtin_shufflevector:
2213 return BuiltinShuffleVector(TheCall);
2214 // TheCall will be freed by the smart pointer here, but that's fine, since
2215 // BuiltinShuffleVector guts it, but then doesn't release it.
2216 case Builtin::BI__builtin_prefetch:
2217 if (BuiltinPrefetch(TheCall))
2218 return ExprError();
2219 break;
2220 case Builtin::BI__builtin_alloca_with_align:
2221 case Builtin::BI__builtin_alloca_with_align_uninitialized:
2222 if (BuiltinAllocaWithAlign(TheCall))
2223 return ExprError();
2224 [[fallthrough]];
2225 case Builtin::BI__builtin_alloca:
2226 case Builtin::BI__builtin_alloca_uninitialized:
2227 Diag(TheCall->getBeginLoc(), diag::warn_alloca)
2228 << TheCall->getDirectCallee();
2229 if (getLangOpts().OpenCL) {
2230 builtinAllocaAddrSpace(*this, TheCall);
2231 }
2232 break;
2233 case Builtin::BI__arithmetic_fence:
2234 if (BuiltinArithmeticFence(TheCall))
2235 return ExprError();
2236 break;
2237 case Builtin::BI__assume:
2238 case Builtin::BI__builtin_assume:
2239 if (BuiltinAssume(TheCall))
2240 return ExprError();
2241 break;
2242 case Builtin::BI__builtin_assume_aligned:
2243 if (BuiltinAssumeAligned(TheCall))
2244 return ExprError();
2245 break;
2246 case Builtin::BI__builtin_dynamic_object_size:
2247 case Builtin::BI__builtin_object_size:
2248 if (BuiltinConstantArgRange(TheCall, 1, 0, 3))
2249 return ExprError();
2250 break;
2251 case Builtin::BI__builtin_longjmp:
2252 if (BuiltinLongjmp(TheCall))
2253 return ExprError();
2254 break;
2255 case Builtin::BI__builtin_setjmp:
2256 if (BuiltinSetjmp(TheCall))
2257 return ExprError();
2258 break;
2259 case Builtin::BI__builtin_classify_type:
2260 if (checkArgCount(TheCall, 1))
2261 return true;
2262 TheCall->setType(Context.IntTy);
2263 break;
2264 case Builtin::BI__builtin_complex:
2265 if (BuiltinComplex(TheCall))
2266 return ExprError();
2267 break;
2268 case Builtin::BI__builtin_constant_p: {
2269 if (checkArgCount(TheCall, 1))
2270 return true;
2272 if (Arg.isInvalid()) return true;
2273 TheCall->setArg(0, Arg.get());
2274 TheCall->setType(Context.IntTy);
2275 break;
2276 }
2277 case Builtin::BI__builtin_launder:
2278 return BuiltinLaunder(*this, TheCall);
2279 case Builtin::BI__sync_fetch_and_add:
2280 case Builtin::BI__sync_fetch_and_add_1:
2281 case Builtin::BI__sync_fetch_and_add_2:
2282 case Builtin::BI__sync_fetch_and_add_4:
2283 case Builtin::BI__sync_fetch_and_add_8:
2284 case Builtin::BI__sync_fetch_and_add_16:
2285 case Builtin::BI__sync_fetch_and_sub:
2286 case Builtin::BI__sync_fetch_and_sub_1:
2287 case Builtin::BI__sync_fetch_and_sub_2:
2288 case Builtin::BI__sync_fetch_and_sub_4:
2289 case Builtin::BI__sync_fetch_and_sub_8:
2290 case Builtin::BI__sync_fetch_and_sub_16:
2291 case Builtin::BI__sync_fetch_and_or:
2292 case Builtin::BI__sync_fetch_and_or_1:
2293 case Builtin::BI__sync_fetch_and_or_2:
2294 case Builtin::BI__sync_fetch_and_or_4:
2295 case Builtin::BI__sync_fetch_and_or_8:
2296 case Builtin::BI__sync_fetch_and_or_16:
2297 case Builtin::BI__sync_fetch_and_and:
2298 case Builtin::BI__sync_fetch_and_and_1:
2299 case Builtin::BI__sync_fetch_and_and_2:
2300 case Builtin::BI__sync_fetch_and_and_4:
2301 case Builtin::BI__sync_fetch_and_and_8:
2302 case Builtin::BI__sync_fetch_and_and_16:
2303 case Builtin::BI__sync_fetch_and_xor:
2304 case Builtin::BI__sync_fetch_and_xor_1:
2305 case Builtin::BI__sync_fetch_and_xor_2:
2306 case Builtin::BI__sync_fetch_and_xor_4:
2307 case Builtin::BI__sync_fetch_and_xor_8:
2308 case Builtin::BI__sync_fetch_and_xor_16:
2309 case Builtin::BI__sync_fetch_and_nand:
2310 case Builtin::BI__sync_fetch_and_nand_1:
2311 case Builtin::BI__sync_fetch_and_nand_2:
2312 case Builtin::BI__sync_fetch_and_nand_4:
2313 case Builtin::BI__sync_fetch_and_nand_8:
2314 case Builtin::BI__sync_fetch_and_nand_16:
2315 case Builtin::BI__sync_add_and_fetch:
2316 case Builtin::BI__sync_add_and_fetch_1:
2317 case Builtin::BI__sync_add_and_fetch_2:
2318 case Builtin::BI__sync_add_and_fetch_4:
2319 case Builtin::BI__sync_add_and_fetch_8:
2320 case Builtin::BI__sync_add_and_fetch_16:
2321 case Builtin::BI__sync_sub_and_fetch:
2322 case Builtin::BI__sync_sub_and_fetch_1:
2323 case Builtin::BI__sync_sub_and_fetch_2:
2324 case Builtin::BI__sync_sub_and_fetch_4:
2325 case Builtin::BI__sync_sub_and_fetch_8:
2326 case Builtin::BI__sync_sub_and_fetch_16:
2327 case Builtin::BI__sync_and_and_fetch:
2328 case Builtin::BI__sync_and_and_fetch_1:
2329 case Builtin::BI__sync_and_and_fetch_2:
2330 case Builtin::BI__sync_and_and_fetch_4:
2331 case Builtin::BI__sync_and_and_fetch_8:
2332 case Builtin::BI__sync_and_and_fetch_16:
2333 case Builtin::BI__sync_or_and_fetch:
2334 case Builtin::BI__sync_or_and_fetch_1:
2335 case Builtin::BI__sync_or_and_fetch_2:
2336 case Builtin::BI__sync_or_and_fetch_4:
2337 case Builtin::BI__sync_or_and_fetch_8:
2338 case Builtin::BI__sync_or_and_fetch_16:
2339 case Builtin::BI__sync_xor_and_fetch:
2340 case Builtin::BI__sync_xor_and_fetch_1:
2341 case Builtin::BI__sync_xor_and_fetch_2:
2342 case Builtin::BI__sync_xor_and_fetch_4:
2343 case Builtin::BI__sync_xor_and_fetch_8:
2344 case Builtin::BI__sync_xor_and_fetch_16:
2345 case Builtin::BI__sync_nand_and_fetch:
2346 case Builtin::BI__sync_nand_and_fetch_1:
2347 case Builtin::BI__sync_nand_and_fetch_2:
2348 case Builtin::BI__sync_nand_and_fetch_4:
2349 case Builtin::BI__sync_nand_and_fetch_8:
2350 case Builtin::BI__sync_nand_and_fetch_16:
2351 case Builtin::BI__sync_val_compare_and_swap:
2352 case Builtin::BI__sync_val_compare_and_swap_1:
2353 case Builtin::BI__sync_val_compare_and_swap_2:
2354 case Builtin::BI__sync_val_compare_and_swap_4:
2355 case Builtin::BI__sync_val_compare_and_swap_8:
2356 case Builtin::BI__sync_val_compare_and_swap_16:
2357 case Builtin::BI__sync_bool_compare_and_swap:
2358 case Builtin::BI__sync_bool_compare_and_swap_1:
2359 case Builtin::BI__sync_bool_compare_and_swap_2:
2360 case Builtin::BI__sync_bool_compare_and_swap_4:
2361 case Builtin::BI__sync_bool_compare_and_swap_8:
2362 case Builtin::BI__sync_bool_compare_and_swap_16:
2363 case Builtin::BI__sync_lock_test_and_set:
2364 case Builtin::BI__sync_lock_test_and_set_1:
2365 case Builtin::BI__sync_lock_test_and_set_2:
2366 case Builtin::BI__sync_lock_test_and_set_4:
2367 case Builtin::BI__sync_lock_test_and_set_8:
2368 case Builtin::BI__sync_lock_test_and_set_16:
2369 case Builtin::BI__sync_lock_release:
2370 case Builtin::BI__sync_lock_release_1:
2371 case Builtin::BI__sync_lock_release_2:
2372 case Builtin::BI__sync_lock_release_4:
2373 case Builtin::BI__sync_lock_release_8:
2374 case Builtin::BI__sync_lock_release_16:
2375 case Builtin::BI__sync_swap:
2376 case Builtin::BI__sync_swap_1:
2377 case Builtin::BI__sync_swap_2:
2378 case Builtin::BI__sync_swap_4:
2379 case Builtin::BI__sync_swap_8:
2380 case Builtin::BI__sync_swap_16:
2381 return BuiltinAtomicOverloaded(TheCallResult);
2382 case Builtin::BI__sync_synchronize:
2383 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
2384 << TheCall->getCallee()->getSourceRange();
2385 break;
2386 case Builtin::BI__builtin_nontemporal_load:
2387 case Builtin::BI__builtin_nontemporal_store:
2388 return BuiltinNontemporalOverloaded(TheCallResult);
2389 case Builtin::BI__builtin_memcpy_inline: {
2390 clang::Expr *SizeOp = TheCall->getArg(2);
2391 // We warn about copying to or from `nullptr` pointers when `size` is
2392 // greater than 0. When `size` is value dependent we cannot evaluate its
2393 // value so we bail out.
2394 if (SizeOp->isValueDependent())
2395 break;
2396 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) {
2397 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
2398 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
2399 }
2400 break;
2401 }
2402 case Builtin::BI__builtin_memset_inline: {
2403 clang::Expr *SizeOp = TheCall->getArg(2);
2404 // We warn about filling to `nullptr` pointers when `size` is greater than
2405 // 0. When `size` is value dependent we cannot evaluate its value so we bail
2406 // out.
2407 if (SizeOp->isValueDependent())
2408 break;
2409 if (!SizeOp->EvaluateKnownConstInt(Context).isZero())
2410 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
2411 break;
2412 }
2413#define BUILTIN(ID, TYPE, ATTRS)
2414#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
2415 case Builtin::BI##ID: \
2416 return AtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
2417#include "clang/Basic/Builtins.inc"
2418 case Builtin::BI__annotation:
2419 if (BuiltinMSVCAnnotation(*this, TheCall))
2420 return ExprError();
2421 break;
2422 case Builtin::BI__builtin_annotation:
2423 if (BuiltinAnnotation(*this, TheCall))
2424 return ExprError();
2425 break;
2426 case Builtin::BI__builtin_addressof:
2427 if (BuiltinAddressof(*this, TheCall))
2428 return ExprError();
2429 break;
2430 case Builtin::BI__builtin_function_start:
2431 if (BuiltinFunctionStart(*this, TheCall))
2432 return ExprError();
2433 break;
2434 case Builtin::BI__builtin_is_aligned:
2435 case Builtin::BI__builtin_align_up:
2436 case Builtin::BI__builtin_align_down:
2437 if (BuiltinAlignment(*this, TheCall, BuiltinID))
2438 return ExprError();
2439 break;
2440 case Builtin::BI__builtin_add_overflow:
2441 case Builtin::BI__builtin_sub_overflow:
2442 case Builtin::BI__builtin_mul_overflow:
2443 if (BuiltinOverflow(*this, TheCall, BuiltinID))
2444 return ExprError();
2445 break;
2446 case Builtin::BI__builtin_operator_new:
2447 case Builtin::BI__builtin_operator_delete: {
2448 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
2449 ExprResult Res =
2450 BuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
2451 if (Res.isInvalid())
2452 CorrectDelayedTyposInExpr(TheCallResult.get());
2453 return Res;
2454 }
2455 case Builtin::BI__builtin_dump_struct:
2456 return BuiltinDumpStruct(*this, TheCall);
2457 case Builtin::BI__builtin_expect_with_probability: {
2458 // We first want to ensure we are called with 3 arguments
2459 if (checkArgCount(TheCall, 3))
2460 return ExprError();
2461 // then check probability is constant float in range [0.0, 1.0]
2462 const Expr *ProbArg = TheCall->getArg(2);
2464 Expr::EvalResult Eval;
2465 Eval.Diag = &Notes;
2466 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
2467 !Eval.Val.isFloat()) {
2468 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
2469 << ProbArg->getSourceRange();
2470 for (const PartialDiagnosticAt &PDiag : Notes)
2471 Diag(PDiag.first, PDiag.second);
2472 return ExprError();
2473 }
2474 llvm::APFloat Probability = Eval.Val.getFloat();
2475 bool LoseInfo = false;
2476 Probability.convert(llvm::APFloat::IEEEdouble(),
2477 llvm::RoundingMode::Dynamic, &LoseInfo);
2478 if (!(Probability >= llvm::APFloat(0.0) &&
2479 Probability <= llvm::APFloat(1.0))) {
2480 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
2481 << ProbArg->getSourceRange();
2482 return ExprError();
2483 }
2484 break;
2485 }
2486 case Builtin::BI__builtin_preserve_access_index:
2487 if (BuiltinPreserveAI(*this, TheCall))
2488 return ExprError();
2489 break;
2490 case Builtin::BI__builtin_call_with_static_chain:
2491 if (BuiltinCallWithStaticChain(*this, TheCall))
2492 return ExprError();
2493 break;
2494 case Builtin::BI__exception_code:
2495 case Builtin::BI_exception_code:
2496 if (BuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
2497 diag::err_seh___except_block))
2498 return ExprError();
2499 break;
2500 case Builtin::BI__exception_info:
2501 case Builtin::BI_exception_info:
2502 if (BuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
2503 diag::err_seh___except_filter))
2504 return ExprError();
2505 break;
2506 case Builtin::BI__GetExceptionInfo:
2507 if (checkArgCount(TheCall, 1))
2508 return ExprError();
2509
2511 TheCall->getBeginLoc(),
2513 TheCall))
2514 return ExprError();
2515
2516 TheCall->setType(Context.VoidPtrTy);
2517 break;
2518 case Builtin::BIaddressof:
2519 case Builtin::BI__addressof:
2520 case Builtin::BIforward:
2521 case Builtin::BIforward_like:
2522 case Builtin::BImove:
2523 case Builtin::BImove_if_noexcept:
2524 case Builtin::BIas_const: {
2525 // These are all expected to be of the form
2526 // T &/&&/* f(U &/&&)
2527 // where T and U only differ in qualification.
2528 if (checkArgCount(TheCall, 1))
2529 return ExprError();
2530 QualType Param = FDecl->getParamDecl(0)->getType();
2531 QualType Result = FDecl->getReturnType();
2532 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof ||
2533 BuiltinID == Builtin::BI__addressof;
2534 if (!(Param->isReferenceType() &&
2535 (ReturnsPointer ? Result->isAnyPointerType()
2536 : Result->isReferenceType()) &&
2538 Result->getPointeeType()))) {
2539 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported)
2540 << FDecl;
2541 return ExprError();
2542 }
2543 break;
2544 }
2545 case Builtin::BI__builtin_ptrauth_strip:
2546 return PointerAuthStrip(*this, TheCall);
2547 case Builtin::BI__builtin_ptrauth_blend_discriminator:
2548 return PointerAuthBlendDiscriminator(*this, TheCall);
2549 case Builtin::BI__builtin_ptrauth_sign_constant:
2550 return PointerAuthSignOrAuth(*this, TheCall, PAO_Sign,
2551 /*RequireConstant=*/true);
2552 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
2553 return PointerAuthSignOrAuth(*this, TheCall, PAO_Sign,
2554 /*RequireConstant=*/false);
2555 case Builtin::BI__builtin_ptrauth_auth:
2556 return PointerAuthSignOrAuth(*this, TheCall, PAO_Auth,
2557 /*RequireConstant=*/false);
2558 case Builtin::BI__builtin_ptrauth_sign_generic_data:
2559 return PointerAuthSignGenericData(*this, TheCall);
2560 case Builtin::BI__builtin_ptrauth_auth_and_resign:
2561 return PointerAuthAuthAndResign(*this, TheCall);
2562 case Builtin::BI__builtin_ptrauth_string_discriminator:
2563 return PointerAuthStringDiscriminator(*this, TheCall);
2564 // OpenCL v2.0, s6.13.16 - Pipe functions
2565 case Builtin::BIread_pipe:
2566 case Builtin::BIwrite_pipe:
2567 // Since those two functions are declared with var args, we need a semantic
2568 // check for the argument.
2569 if (OpenCL().checkBuiltinRWPipe(TheCall))
2570 return ExprError();
2571 break;
2572 case Builtin::BIreserve_read_pipe:
2573 case Builtin::BIreserve_write_pipe:
2574 case Builtin::BIwork_group_reserve_read_pipe:
2575 case Builtin::BIwork_group_reserve_write_pipe:
2576 if (OpenCL().checkBuiltinReserveRWPipe(TheCall))
2577 return ExprError();
2578 break;
2579 case Builtin::BIsub_group_reserve_read_pipe:
2580 case Builtin::BIsub_group_reserve_write_pipe:
2581 if (OpenCL().checkSubgroupExt(TheCall) ||
2582 OpenCL().checkBuiltinReserveRWPipe(TheCall))
2583 return ExprError();
2584 break;
2585 case Builtin::BIcommit_read_pipe:
2586 case Builtin::BIcommit_write_pipe:
2587 case Builtin::BIwork_group_commit_read_pipe:
2588 case Builtin::BIwork_group_commit_write_pipe:
2589 if (OpenCL().checkBuiltinCommitRWPipe(TheCall))
2590 return ExprError();
2591 break;
2592 case Builtin::BIsub_group_commit_read_pipe:
2593 case Builtin::BIsub_group_commit_write_pipe:
2594 if (OpenCL().checkSubgroupExt(TheCall) ||
2595 OpenCL().checkBuiltinCommitRWPipe(TheCall))
2596 return ExprError();
2597 break;
2598 case Builtin::BIget_pipe_num_packets:
2599 case Builtin::BIget_pipe_max_packets:
2600 if (OpenCL().checkBuiltinPipePackets(TheCall))
2601 return ExprError();
2602 break;
2603 case Builtin::BIto_global:
2604 case Builtin::BIto_local:
2605 case Builtin::BIto_private:
2606 if (OpenCL().checkBuiltinToAddr(BuiltinID, TheCall))
2607 return ExprError();
2608 break;
2609 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
2610 case Builtin::BIenqueue_kernel:
2611 if (OpenCL().checkBuiltinEnqueueKernel(TheCall))
2612 return ExprError();
2613 break;
2614 case Builtin::BIget_kernel_work_group_size:
2615 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2616 if (OpenCL().checkBuiltinKernelWorkGroupSize(TheCall))
2617 return ExprError();
2618 break;
2619 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2620 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2621 if (OpenCL().checkBuiltinNDRangeAndBlock(TheCall))
2622 return ExprError();
2623 break;
2624 case Builtin::BI__builtin_os_log_format:
2626 [[fallthrough]];
2627 case Builtin::BI__builtin_os_log_format_buffer_size:
2628 if (BuiltinOSLogFormat(TheCall))
2629 return ExprError();
2630 break;
2631 case Builtin::BI__builtin_frame_address:
2632 case Builtin::BI__builtin_return_address: {
2633 if (BuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
2634 return ExprError();
2635
2636 // -Wframe-address warning if non-zero passed to builtin
2637 // return/frame address.
2639 if (!TheCall->getArg(0)->isValueDependent() &&
2640 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
2641 Result.Val.getInt() != 0)
2642 Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
2643 << ((BuiltinID == Builtin::BI__builtin_return_address)
2644 ? "__builtin_return_address"
2645 : "__builtin_frame_address")
2646 << TheCall->getSourceRange();
2647 break;
2648 }
2649
2650 case Builtin::BI__builtin_nondeterministic_value: {
2651 if (BuiltinNonDeterministicValue(TheCall))
2652 return ExprError();
2653 break;
2654 }
2655
2656 // __builtin_elementwise_abs restricts the element type to signed integers or
2657 // floating point types only.
2658 case Builtin::BI__builtin_elementwise_abs: {
2660 return ExprError();
2661
2662 QualType ArgTy = TheCall->getArg(0)->getType();
2663 QualType EltTy = ArgTy;
2664
2665 if (auto *VecTy = EltTy->getAs<VectorType>())
2666 EltTy = VecTy->getElementType();
2667 if (EltTy->isUnsignedIntegerType()) {
2668 Diag(TheCall->getArg(0)->getBeginLoc(),
2669 diag::err_builtin_invalid_arg_type)
2670 << 1 << /* signed integer or float ty*/ 3 << ArgTy;
2671 return ExprError();
2672 }
2673 break;
2674 }
2675
2676 // These builtins restrict the element type to floating point
2677 // types only.
2678 case Builtin::BI__builtin_elementwise_acos:
2679 case Builtin::BI__builtin_elementwise_asin:
2680 case Builtin::BI__builtin_elementwise_atan:
2681 case Builtin::BI__builtin_elementwise_ceil:
2682 case Builtin::BI__builtin_elementwise_cos:
2683 case Builtin::BI__builtin_elementwise_cosh:
2684 case Builtin::BI__builtin_elementwise_exp:
2685 case Builtin::BI__builtin_elementwise_exp2:
2686 case Builtin::BI__builtin_elementwise_floor:
2687 case Builtin::BI__builtin_elementwise_log:
2688 case Builtin::BI__builtin_elementwise_log2:
2689 case Builtin::BI__builtin_elementwise_log10:
2690 case Builtin::BI__builtin_elementwise_roundeven:
2691 case Builtin::BI__builtin_elementwise_round:
2692 case Builtin::BI__builtin_elementwise_rint:
2693 case Builtin::BI__builtin_elementwise_nearbyint:
2694 case Builtin::BI__builtin_elementwise_sin:
2695 case Builtin::BI__builtin_elementwise_sinh:
2696 case Builtin::BI__builtin_elementwise_sqrt:
2697 case Builtin::BI__builtin_elementwise_tan:
2698 case Builtin::BI__builtin_elementwise_tanh:
2699 case Builtin::BI__builtin_elementwise_trunc:
2700 case Builtin::BI__builtin_elementwise_canonicalize: {
2702 return ExprError();
2703
2704 QualType ArgTy = TheCall->getArg(0)->getType();
2705 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
2706 ArgTy, 1))
2707 return ExprError();
2708 break;
2709 }
2710 case Builtin::BI__builtin_elementwise_fma: {
2711 if (BuiltinElementwiseTernaryMath(TheCall))
2712 return ExprError();
2713 break;
2714 }
2715
2716 // These builtins restrict the element type to floating point
2717 // types only, and take in two arguments.
2718 case Builtin::BI__builtin_elementwise_pow: {
2719 if (BuiltinElementwiseMath(TheCall))
2720 return ExprError();
2721
2722 QualType ArgTy = TheCall->getArg(0)->getType();
2723 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
2724 ArgTy, 1) ||
2725 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
2726 ArgTy, 2))
2727 return ExprError();
2728 break;
2729 }
2730
2731 // These builtins restrict the element type to integer
2732 // types only.
2733 case Builtin::BI__builtin_elementwise_add_sat:
2734 case Builtin::BI__builtin_elementwise_sub_sat: {
2735 if (BuiltinElementwiseMath(TheCall))
2736 return ExprError();
2737
2738 const Expr *Arg = TheCall->getArg(0);
2739 QualType ArgTy = Arg->getType();
2740 QualType EltTy = ArgTy;
2741
2742 if (auto *VecTy = EltTy->getAs<VectorType>())
2743 EltTy = VecTy->getElementType();
2744
2745 if (!EltTy->isIntegerType()) {
2746 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2747 << 1 << /* integer ty */ 6 << ArgTy;
2748 return ExprError();
2749 }
2750 break;
2751 }
2752
2753 case Builtin::BI__builtin_elementwise_min:
2754 case Builtin::BI__builtin_elementwise_max:
2755 if (BuiltinElementwiseMath(TheCall))
2756 return ExprError();
2757 break;
2758
2759 case Builtin::BI__builtin_elementwise_bitreverse: {
2761 return ExprError();
2762
2763 const Expr *Arg = TheCall->getArg(0);
2764 QualType ArgTy = Arg->getType();
2765 QualType EltTy = ArgTy;
2766
2767 if (auto *VecTy = EltTy->getAs<VectorType>())
2768 EltTy = VecTy->getElementType();
2769
2770 if (!EltTy->isIntegerType()) {
2771 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2772 << 1 << /* integer ty */ 6 << ArgTy;
2773 return ExprError();
2774 }
2775 break;
2776 }
2777
2778 case Builtin::BI__builtin_elementwise_copysign: {
2779 if (checkArgCount(TheCall, 2))
2780 return ExprError();
2781
2782 ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0));
2783 ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1));
2784 if (Magnitude.isInvalid() || Sign.isInvalid())
2785 return ExprError();
2786
2787 QualType MagnitudeTy = Magnitude.get()->getType();
2788 QualType SignTy = Sign.get()->getType();
2789 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
2790 MagnitudeTy, 1) ||
2791 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
2792 SignTy, 2)) {
2793 return ExprError();
2794 }
2795
2796 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) {
2797 return Diag(Sign.get()->getBeginLoc(),
2798 diag::err_typecheck_call_different_arg_types)
2799 << MagnitudeTy << SignTy;
2800 }
2801
2802 TheCall->setArg(0, Magnitude.get());
2803 TheCall->setArg(1, Sign.get());
2804 TheCall->setType(Magnitude.get()->getType());
2805 break;
2806 }
2807 case Builtin::BI__builtin_reduce_max:
2808 case Builtin::BI__builtin_reduce_min: {
2809 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
2810 return ExprError();
2811
2812 const Expr *Arg = TheCall->getArg(0);
2813 const auto *TyA = Arg->getType()->getAs<VectorType>();
2814
2815 QualType ElTy;
2816 if (TyA)
2817 ElTy = TyA->getElementType();
2818 else if (Arg->getType()->isSizelessVectorType())
2820
2821 if (ElTy.isNull()) {
2822 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2823 << 1 << /* vector ty*/ 4 << Arg->getType();
2824 return ExprError();
2825 }
2826
2827 TheCall->setType(ElTy);
2828 break;
2829 }
2830
2831 // These builtins support vectors of integers only.
2832 // TODO: ADD/MUL should support floating-point types.
2833 case Builtin::BI__builtin_reduce_add:
2834 case Builtin::BI__builtin_reduce_mul:
2835 case Builtin::BI__builtin_reduce_xor:
2836 case Builtin::BI__builtin_reduce_or:
2837 case Builtin::BI__builtin_reduce_and: {
2838 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
2839 return ExprError();
2840
2841 const Expr *Arg = TheCall->getArg(0);
2842 const auto *TyA = Arg->getType()->getAs<VectorType>();
2843
2844 QualType ElTy;
2845 if (TyA)
2846 ElTy = TyA->getElementType();
2847 else if (Arg->getType()->isSizelessVectorType())
2849
2850 if (ElTy.isNull() || !ElTy->isIntegerType()) {
2851 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2852 << 1 << /* vector of integers */ 6 << Arg->getType();
2853 return ExprError();
2854 }
2855
2856 TheCall->setType(ElTy);
2857 break;
2858 }
2859
2860 case Builtin::BI__builtin_matrix_transpose:
2861 return BuiltinMatrixTranspose(TheCall, TheCallResult);
2862
2863 case Builtin::BI__builtin_matrix_column_major_load:
2864 return BuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
2865
2866 case Builtin::BI__builtin_matrix_column_major_store:
2867 return BuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
2868
2869 case Builtin::BI__builtin_verbose_trap:
2870 if (!checkBuiltinVerboseTrap(TheCall, *this))
2871 return ExprError();
2872 break;
2873
2874 case Builtin::BI__builtin_get_device_side_mangled_name: {
2875 auto Check = [](CallExpr *TheCall) {
2876 if (TheCall->getNumArgs() != 1)
2877 return false;
2878 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
2879 if (!DRE)
2880 return false;
2881 auto *D = DRE->getDecl();
2882 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
2883 return false;
2884 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
2885 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
2886 };
2887 if (!Check(TheCall)) {
2888 Diag(TheCall->getBeginLoc(),
2889 diag::err_hip_invalid_args_builtin_mangled_name);
2890 return ExprError();
2891 }
2892 break;
2893 }
2894 case Builtin::BI__builtin_popcountg:
2895 if (BuiltinPopcountg(*this, TheCall))
2896 return ExprError();
2897 break;
2898 case Builtin::BI__builtin_clzg:
2899 case Builtin::BI__builtin_ctzg:
2900 if (BuiltinCountZeroBitsGeneric(*this, TheCall))
2901 return ExprError();
2902 break;
2903
2904 case Builtin::BI__builtin_allow_runtime_check: {
2905 Expr *Arg = TheCall->getArg(0);
2906 // Check if the argument is a string literal.
2907 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
2908 Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
2909 << Arg->getSourceRange();
2910 return ExprError();
2911 }
2912 break;
2913 }
2914 }
2915
2916 if (getLangOpts().HLSL && HLSL().CheckBuiltinFunctionCall(BuiltinID, TheCall))
2917 return ExprError();
2918
2919 // Since the target specific builtins for each arch overlap, only check those
2920 // of the arch we are compiling for.
2921 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
2922 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
2923 assert(Context.getAuxTargetInfo() &&
2924 "Aux Target Builtin, but not an aux target?");
2925
2926 if (CheckTSBuiltinFunctionCall(
2928 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
2929 return ExprError();
2930 } else {
2931 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
2932 TheCall))
2933 return ExprError();
2934 }
2935 }
2936
2937 return TheCallResult;
2938}
2939
2940bool Sema::ValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
2941 llvm::APSInt Result;
2942 // We can't check the value of a dependent argument.
2943 Expr *Arg = TheCall->getArg(ArgNum);
2944 if (Arg->isTypeDependent() || Arg->isValueDependent())
2945 return false;
2946
2947 // Check constant-ness first.
2948 if (BuiltinConstantArg(TheCall, ArgNum, Result))
2949 return true;
2950
2951 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
2952 if (Result.isShiftedMask() || (~Result).isShiftedMask())
2953 return false;
2954
2955 return Diag(TheCall->getBeginLoc(),
2956 diag::err_argument_not_contiguous_bit_field)
2957 << ArgNum << Arg->getSourceRange();
2958}
2959
2960bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
2961 bool IsVariadic, FormatStringInfo *FSI) {
2962 if (Format->getFirstArg() == 0)
2964 else if (IsVariadic)
2966 else
2968 FSI->FormatIdx = Format->getFormatIdx() - 1;
2969 FSI->FirstDataArg =
2970 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1;
2971
2972 // The way the format attribute works in GCC, the implicit this argument
2973 // of member functions is counted. However, it doesn't appear in our own
2974 // lists, so decrement format_idx in that case.
2975 if (IsCXXMember) {
2976 if(FSI->FormatIdx == 0)
2977 return false;
2978 --FSI->FormatIdx;
2979 if (FSI->FirstDataArg != 0)
2980 --FSI->FirstDataArg;
2981 }
2982 return true;
2983}
2984
2985/// Checks if a the given expression evaluates to null.
2986///
2987/// Returns true if the value evaluates to null.
2988static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
2989 // Treat (smart) pointers constructed from nullptr as null, whether we can
2990 // const-evaluate them or not.
2991 // This must happen first: the smart pointer expr might have _Nonnull type!
2992 if (isa<CXXNullPtrLiteralExpr>(
2995 return true;
2996
2997 // If the expression has non-null type, it doesn't evaluate to null.
2998 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) {
2999 if (*nullability == NullabilityKind::NonNull)
3000 return false;
3001 }
3002
3003 // As a special case, transparent unions initialized with zero are
3004 // considered null for the purposes of the nonnull attribute.
3005 if (const RecordType *UT = Expr->getType()->getAsUnionType();
3006 UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
3007 if (const auto *CLE = dyn_cast<CompoundLiteralExpr>(Expr))
3008 if (const auto *ILE = dyn_cast<InitListExpr>(CLE->getInitializer()))
3009 Expr = ILE->getInit(0);
3010 }
3011
3012 bool Result;
3013 return (!Expr->isValueDependent() &&
3015 !Result);
3016}
3017
3019 const Expr *ArgExpr,
3020 SourceLocation CallSiteLoc) {
3021 if (CheckNonNullExpr(S, ArgExpr))
3022 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
3023 S.PDiag(diag::warn_null_arg)
3024 << ArgExpr->getSourceRange());
3025}
3026
3027/// Determine whether the given type has a non-null nullability annotation.
3029 if (auto nullability = type->getNullability())
3030 return *nullability == NullabilityKind::NonNull;
3031
3032 return false;
3033}
3034
3036 const NamedDecl *FDecl,
3037 const FunctionProtoType *Proto,
3039 SourceLocation CallSiteLoc) {
3040 assert((FDecl || Proto) && "Need a function declaration or prototype");
3041
3042 // Already checked by constant evaluator.
3044 return;
3045 // Check the attributes attached to the method/function itself.
3046 llvm::SmallBitVector NonNullArgs;
3047 if (FDecl) {
3048 // Handle the nonnull attribute on the function/method declaration itself.
3049 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
3050 if (!NonNull->args_size()) {
3051 // Easy case: all pointer arguments are nonnull.
3052 for (const auto *Arg : Args)
3053 if (S.isValidPointerAttrType(Arg->getType()))
3054 CheckNonNullArgument(S, Arg, CallSiteLoc);
3055 return;
3056 }
3057
3058 for (const ParamIdx &Idx : NonNull->args()) {
3059 unsigned IdxAST = Idx.getASTIndex();
3060 if (IdxAST >= Args.size())
3061 continue;
3062 if (NonNullArgs.empty())
3063 NonNullArgs.resize(Args.size());
3064 NonNullArgs.set(IdxAST);
3065 }
3066 }
3067 }
3068
3069 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
3070 // Handle the nonnull attribute on the parameters of the
3071 // function/method.
3073 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
3074 parms = FD->parameters();
3075 else
3076 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
3077
3078 unsigned ParamIndex = 0;
3079 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
3080 I != E; ++I, ++ParamIndex) {
3081 const ParmVarDecl *PVD = *I;
3082 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) {
3083 if (NonNullArgs.empty())
3084 NonNullArgs.resize(Args.size());
3085
3086 NonNullArgs.set(ParamIndex);
3087 }
3088 }
3089 } else {
3090 // If we have a non-function, non-method declaration but no
3091 // function prototype, try to dig out the function prototype.
3092 if (!Proto) {
3093 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
3094 QualType type = VD->getType().getNonReferenceType();
3095 if (auto pointerType = type->getAs<PointerType>())
3096 type = pointerType->getPointeeType();
3097 else if (auto blockType = type->getAs<BlockPointerType>())
3098 type = blockType->getPointeeType();
3099 // FIXME: data member pointers?
3100
3101 // Dig out the function prototype, if there is one.
3102 Proto = type->getAs<FunctionProtoType>();
3103 }
3104 }
3105
3106 // Fill in non-null argument information from the nullability
3107 // information on the parameter types (if we have them).
3108 if (Proto) {
3109 unsigned Index = 0;
3110 for (auto paramType : Proto->getParamTypes()) {
3111 if (isNonNullType(paramType)) {
3112 if (NonNullArgs.empty())
3113 NonNullArgs.resize(Args.size());
3114
3115 NonNullArgs.set(Index);
3116 }
3117
3118 ++Index;
3119 }
3120 }
3121 }
3122
3123 // Check for non-null arguments.
3124 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
3125 ArgIndex != ArgIndexEnd; ++ArgIndex) {
3126 if (NonNullArgs[ArgIndex])
3127 CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc());
3128 }
3129}
3130
3131void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
3132 StringRef ParamName, QualType ArgTy,
3133 QualType ParamTy) {
3134
3135 // If a function accepts a pointer or reference type
3136 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
3137 return;
3138
3139 // If the parameter is a pointer type, get the pointee type for the
3140 // argument too. If the parameter is a reference type, don't try to get
3141 // the pointee type for the argument.
3142 if (ParamTy->isPointerType())
3143 ArgTy = ArgTy->getPointeeType();
3144
3145 // Remove reference or pointer
3146 ParamTy = ParamTy->getPointeeType();
3147
3148 // Find expected alignment, and the actual alignment of the passed object.
3149 // getTypeAlignInChars requires complete types
3150 if (ArgTy.isNull() || ParamTy->isDependentType() ||
3151 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() ||
3152 ParamTy->isUndeducedType() || ArgTy->isUndeducedType())
3153 return;
3154
3155 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
3156 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
3157
3158 // If the argument is less aligned than the parameter, there is a
3159 // potential alignment issue.
3160 if (ArgAlign < ParamAlign)
3161 Diag(Loc, diag::warn_param_mismatched_alignment)
3162 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
3163 << ParamName << (FDecl != nullptr) << FDecl;
3164}
3165
3167 const Expr *ThisArg, ArrayRef<const Expr *> Args,
3168 bool IsMemberFunction, SourceLocation Loc,
3170 // FIXME: We should check as much as we can in the template definition.
3172 return;
3173
3174 // Printf and scanf checking.
3175 llvm::SmallBitVector CheckedVarArgs;
3176 if (FDecl) {
3177 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
3178 // Only create vector if there are format attributes.
3179 CheckedVarArgs.resize(Args.size());
3180
3181 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
3182 CheckedVarArgs);
3183 }
3184 }
3185
3186 // Refuse POD arguments that weren't caught by the format string
3187 // checks above.
3188 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
3189 if (CallType != VariadicDoesNotApply &&
3190 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
3191 unsigned NumParams = Proto ? Proto->getNumParams()
3192 : isa_and_nonnull<FunctionDecl>(FDecl)
3193 ? cast<FunctionDecl>(FDecl)->getNumParams()
3194 : isa_and_nonnull<ObjCMethodDecl>(FDecl)
3195 ? cast<ObjCMethodDecl>(FDecl)->param_size()
3196 : 0;
3197
3198 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
3199 // Args[ArgIdx] can be null in malformed code.
3200 if (const Expr *Arg = Args[ArgIdx]) {
3201 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
3202 checkVariadicArgument(Arg, CallType);
3203 }
3204 }
3205 }
3206
3207 if (FDecl || Proto) {
3208 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
3209
3210 // Type safety checking.
3211 if (FDecl) {
3212 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
3213 CheckArgumentWithTypeTag(I, Args, Loc);
3214 }
3215 }
3216
3217 // Check that passed arguments match the alignment of original arguments.
3218 // Try to get the missing prototype from the declaration.
3219 if (!Proto && FDecl) {
3220 const auto *FT = FDecl->getFunctionType();
3221 if (isa_and_nonnull<FunctionProtoType>(FT))
3222 Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
3223 }
3224 if (Proto) {
3225 // For variadic functions, we may have more args than parameters.
3226 // For some K&R functions, we may have less args than parameters.
3227 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
3228 bool IsScalableRet = Proto->getReturnType()->isSizelessVectorType();
3229 bool IsScalableArg = false;
3230 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
3231 // Args[ArgIdx] can be null in malformed code.
3232 if (const Expr *Arg = Args[ArgIdx]) {
3233 if (Arg->containsErrors())
3234 continue;
3235
3236 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg &&
3237 FDecl->hasLinkage() &&
3238 FDecl->getFormalLinkage() != Linkage::Internal &&
3239 CallType == VariadicDoesNotApply)
3240 PPC().checkAIXMemberAlignment((Arg->getExprLoc()), Arg);
3241
3242 QualType ParamTy = Proto->getParamType(ArgIdx);
3243 if (ParamTy->isSizelessVectorType())
3244 IsScalableArg = true;
3245 QualType ArgTy = Arg->getType();
3246 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
3247 ArgTy, ParamTy);
3248 }
3249 }
3250
3251 // If the callee has an AArch64 SME attribute to indicate that it is an
3252 // __arm_streaming function, then the caller requires SME to be available.
3255 if (auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) {
3256 llvm::StringMap<bool> CallerFeatureMap;
3257 Context.getFunctionFeatureMap(CallerFeatureMap, CallerFD);
3258 if (!CallerFeatureMap.contains("sme"))
3259 Diag(Loc, diag::err_sme_call_in_non_sme_target);
3260 } else if (!Context.getTargetInfo().hasFeature("sme")) {
3261 Diag(Loc, diag::err_sme_call_in_non_sme_target);
3262 }
3263 }
3264
3265 // If the call requires a streaming-mode change and has scalable vector
3266 // arguments or return values, then warn the user that the streaming and
3267 // non-streaming vector lengths may be different.
3268 const auto *CallerFD = dyn_cast<FunctionDecl>(CurContext);
3269 if (CallerFD && (!FD || !FD->getBuiltinID()) &&
3270 (IsScalableArg || IsScalableRet)) {
3271 bool IsCalleeStreaming =
3273 bool IsCalleeStreamingCompatible =
3274 ExtInfo.AArch64SMEAttributes &
3276 SemaARM::ArmStreamingType CallerFnType = getArmStreamingFnType(CallerFD);
3277 if (!IsCalleeStreamingCompatible &&
3278 (CallerFnType == SemaARM::ArmStreamingCompatible ||
3279 ((CallerFnType == SemaARM::ArmStreaming) ^ IsCalleeStreaming))) {
3280 if (IsScalableArg)
3281 Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
3282 << /*IsArg=*/true;
3283 if (IsScalableRet)
3284 Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
3285 << /*IsArg=*/false;
3286 }
3287 }
3288
3289 FunctionType::ArmStateValue CalleeArmZAState =
3291 FunctionType::ArmStateValue CalleeArmZT0State =
3293 if (CalleeArmZAState != FunctionType::ARM_None ||
3294 CalleeArmZT0State != FunctionType::ARM_None) {
3295 bool CallerHasZAState = false;
3296 bool CallerHasZT0State = false;
3297 if (CallerFD) {
3298 auto *Attr = CallerFD->getAttr<ArmNewAttr>();
3299 if (Attr && Attr->isNewZA())
3300 CallerHasZAState = true;
3301 if (Attr && Attr->isNewZT0())
3302 CallerHasZT0State = true;
3303 if (const auto *FPT = CallerFD->getType()->getAs<FunctionProtoType>()) {
3304 CallerHasZAState |=
3306 FPT->getExtProtoInfo().AArch64SMEAttributes) !=
3308 CallerHasZT0State |=
3310 FPT->getExtProtoInfo().AArch64SMEAttributes) !=
3312 }
3313 }
3314
3315 if (CalleeArmZAState != FunctionType::ARM_None && !CallerHasZAState)
3316 Diag(Loc, diag::err_sme_za_call_no_za_state);
3317
3318 if (CalleeArmZT0State != FunctionType::ARM_None && !CallerHasZT0State)
3319 Diag(Loc, diag::err_sme_zt0_call_no_zt0_state);
3320
3321 if (CallerHasZAState && CalleeArmZAState == FunctionType::ARM_None &&
3322 CalleeArmZT0State != FunctionType::ARM_None) {
3323 Diag(Loc, diag::err_sme_unimplemented_za_save_restore);
3324 Diag(Loc, diag::note_sme_use_preserves_za);
3325 }
3326 }
3327 }
3328
3329 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
3330 auto *AA = FDecl->getAttr<AllocAlignAttr>();
3331 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
3332 if (!Arg->isValueDependent()) {
3333 Expr::EvalResult Align;
3334 if (Arg->EvaluateAsInt(Align, Context)) {
3335 const llvm::APSInt &I = Align.Val.getInt();
3336 if (!I.isPowerOf2())
3337 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
3338 << Arg->getSourceRange();
3339
3340 if (I > Sema::MaximumAlignment)
3341 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
3342 << Arg->getSourceRange() << Sema::MaximumAlignment;
3343 }
3344 }
3345 }
3346
3347 if (FD)
3348 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
3349}
3350
3352 if (ConceptDecl *Decl = AutoT->getTypeConstraintConcept()) {
3354 }
3355}
3356
3357void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
3359 const FunctionProtoType *Proto,
3361 VariadicCallType CallType =
3363
3364 auto *Ctor = cast<CXXConstructorDecl>(FDecl);
3365 CheckArgAlignment(
3366 Loc, FDecl, "'this'", Context.getPointerType(ThisType),
3367 Context.getPointerType(Ctor->getFunctionObjectParameterType()));
3368
3369 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
3370 Loc, SourceRange(), CallType);
3371}
3372
3374 const FunctionProtoType *Proto) {
3375 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
3376 isa<CXXMethodDecl>(FDecl);
3377 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
3378 IsMemberOperatorCall;
3379 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
3380 TheCall->getCallee());
3381 Expr** Args = TheCall->getArgs();
3382 unsigned NumArgs = TheCall->getNumArgs();
3383
3384 Expr *ImplicitThis = nullptr;
3385 if (IsMemberOperatorCall && !FDecl->hasCXXExplicitFunctionObjectParameter()) {
3386 // If this is a call to a member operator, hide the first
3387 // argument from checkCall.
3388 // FIXME: Our choice of AST representation here is less than ideal.
3389 ImplicitThis = Args[0];
3390 ++Args;
3391 --NumArgs;
3392 } else if (IsMemberFunction && !FDecl->isStatic() &&
3394 ImplicitThis =
3395 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
3396
3397 if (ImplicitThis) {
3398 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
3399 // used.
3400 QualType ThisType = ImplicitThis->getType();
3401 if (!ThisType->isPointerType()) {
3402 assert(!ThisType->isReferenceType());
3403 ThisType = Context.getPointerType(ThisType);
3404 }
3405
3406 QualType ThisTypeFromDecl = Context.getPointerType(
3407 cast<CXXMethodDecl>(FDecl)->getFunctionObjectParameterType());
3408
3409 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
3410 ThisTypeFromDecl);
3411 }
3412
3413 checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs),
3414 IsMemberFunction, TheCall->getRParenLoc(),
3415 TheCall->getCallee()->getSourceRange(), CallType);
3416
3417 IdentifierInfo *FnInfo = FDecl->getIdentifier();
3418 // None of the checks below are needed for functions that don't have
3419 // simple names (e.g., C++ conversion functions).
3420 if (!FnInfo)
3421 return false;
3422
3423 // Enforce TCB except for builtin calls, which are always allowed.
3424 if (FDecl->getBuiltinID() == 0)
3425 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl);
3426
3427 CheckAbsoluteValueFunction(TheCall, FDecl);
3428 CheckMaxUnsignedZero(TheCall, FDecl);
3429 CheckInfNaNFunction(TheCall, FDecl);
3430
3431 if (getLangOpts().ObjC)
3432 ObjC().DiagnoseCStringFormatDirectiveInCFAPI(FDecl, Args, NumArgs);
3433
3434 unsigned CMId = FDecl->getMemoryFunctionKind();
3435
3436 // Handle memory setting and copying functions.
3437 switch (CMId) {
3438 case 0:
3439 return false;
3440 case Builtin::BIstrlcpy: // fallthrough
3441 case Builtin::BIstrlcat:
3442 CheckStrlcpycatArguments(TheCall, FnInfo);
3443 break;
3444 case Builtin::BIstrncat:
3445 CheckStrncatArguments(TheCall, FnInfo);
3446 break;
3447 case Builtin::BIfree:
3448 CheckFreeArguments(TheCall);
3449 break;
3450 default:
3451 CheckMemaccessArguments(TheCall, CMId, FnInfo);
3452 }
3453
3454 return false;
3455}
3456
3457bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
3458 const FunctionProtoType *Proto) {
3459 QualType Ty;
3460 if (const auto *V = dyn_cast<VarDecl>(NDecl))
3461 Ty = V->getType().getNonReferenceType();
3462 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
3463 Ty = F->getType().getNonReferenceType();
3464 else
3465 return false;
3466
3467 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
3468 !Ty->isFunctionProtoType())
3469 return false;
3470
3471 VariadicCallType CallType;
3472 if (!Proto || !Proto->isVariadic()) {
3473 CallType = VariadicDoesNotApply;
3474 } else if (Ty->isBlockPointerType()) {
3475 CallType = VariadicBlock;
3476 } else { // Ty->isFunctionPointerType()
3477 CallType = VariadicFunction;
3478 }
3479
3480 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
3481 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
3482 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
3483 TheCall->getCallee()->getSourceRange(), CallType);
3484
3485 return false;
3486}
3487
3488bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
3489 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
3490 TheCall->getCallee());
3491 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
3492 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
3493 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
3494 TheCall->getCallee()->getSourceRange(), CallType);
3495
3496 return false;
3497}
3498
3499static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
3500 if (!llvm::isValidAtomicOrderingCABI(Ordering))
3501 return false;
3502
3503 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
3504 switch (Op) {
3505 case AtomicExpr::AO__c11_atomic_init:
3506 case AtomicExpr::AO__opencl_atomic_init:
3507 llvm_unreachable("There is no ordering argument for an init");
3508
3509 case AtomicExpr::AO__c11_atomic_load:
3510 case AtomicExpr::AO__opencl_atomic_load:
3511 case AtomicExpr::AO__hip_atomic_load:
3512 case AtomicExpr::AO__atomic_load_n:
3513 case AtomicExpr::AO__atomic_load:
3514 case AtomicExpr::AO__scoped_atomic_load_n:
3515 case AtomicExpr::AO__scoped_atomic_load:
3516 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
3517 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
3518
3519 case AtomicExpr::AO__c11_atomic_store:
3520 case AtomicExpr::AO__opencl_atomic_store:
3521 case AtomicExpr::AO__hip_atomic_store:
3522 case AtomicExpr::AO__atomic_store:
3523 case AtomicExpr::AO__atomic_store_n:
3524 case AtomicExpr::AO__scoped_atomic_store:
3525 case AtomicExpr::AO__scoped_atomic_store_n:
3526 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
3527 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
3528 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
3529
3530 default:
3531 return true;
3532 }
3533}
3534
3535ExprResult Sema::AtomicOpsOverloaded(ExprResult TheCallResult,
3537 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
3538 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
3539 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
3540 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()},
3541 DRE->getSourceRange(), TheCall->getRParenLoc(), Args,
3542 Op);
3543}
3544
3546 SourceLocation RParenLoc, MultiExprArg Args,
3548 AtomicArgumentOrder ArgOrder) {
3549 // All the non-OpenCL operations take one of the following forms.
3550 // The OpenCL operations take the __c11 forms with one extra argument for
3551 // synchronization scope.
3552 enum {
3553 // C __c11_atomic_init(A *, C)
3554 Init,
3555
3556 // C __c11_atomic_load(A *, int)
3557 Load,
3558
3559 // void __atomic_load(A *, CP, int)
3560 LoadCopy,
3561
3562 // void __atomic_store(A *, CP, int)
3563 Copy,
3564
3565 // C __c11_atomic_add(A *, M, int)
3566 Arithmetic,
3567
3568 // C __atomic_exchange_n(A *, CP, int)
3569 Xchg,
3570
3571 // void __atomic_exchange(A *, C *, CP, int)
3572 GNUXchg,
3573
3574 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
3575 C11CmpXchg,
3576
3577 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
3578 GNUCmpXchg
3579 } Form = Init;
3580
3581 const unsigned NumForm = GNUCmpXchg + 1;
3582 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
3583 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
3584 // where:
3585 // C is an appropriate type,
3586 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
3587 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
3588 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
3589 // the int parameters are for orderings.
3590
3591 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
3592 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
3593 "need to update code for modified forms");
3594 static_assert(AtomicExpr::AO__atomic_add_fetch == 0 &&
3595 AtomicExpr::AO__atomic_xor_fetch + 1 ==
3596 AtomicExpr::AO__c11_atomic_compare_exchange_strong,
3597 "need to update code for modified C11 atomics");
3598 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_compare_exchange_strong &&
3599 Op <= AtomicExpr::AO__opencl_atomic_store;
3600 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_compare_exchange_strong &&
3601 Op <= AtomicExpr::AO__hip_atomic_store;
3602 bool IsScoped = Op >= AtomicExpr::AO__scoped_atomic_add_fetch &&
3603 Op <= AtomicExpr::AO__scoped_atomic_xor_fetch;
3604 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_compare_exchange_strong &&
3605 Op <= AtomicExpr::AO__c11_atomic_store) ||
3606 IsOpenCL;
3607 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
3608 Op == AtomicExpr::AO__atomic_store_n ||
3609 Op == AtomicExpr::AO__atomic_exchange_n ||
3610 Op == AtomicExpr::AO__atomic_compare_exchange_n ||
3611 Op == AtomicExpr::AO__scoped_atomic_load_n ||
3612 Op == AtomicExpr::AO__scoped_atomic_store_n ||
3613 Op == AtomicExpr::AO__scoped_atomic_exchange_n ||
3614 Op == AtomicExpr::AO__scoped_atomic_compare_exchange_n;
3615 // Bit mask for extra allowed value types other than integers for atomic
3616 // arithmetic operations. Add/sub allow pointer and floating point. Min/max
3617 // allow floating point.
3618 enum ArithOpExtraValueType {
3619 AOEVT_None = 0,
3620 AOEVT_Pointer = 1,
3621 AOEVT_FP = 2,
3622 };
3623 unsigned ArithAllows = AOEVT_None;
3624
3625 switch (Op) {
3626 case AtomicExpr::AO__c11_atomic_init:
3627 case AtomicExpr::AO__opencl_atomic_init:
3628 Form = Init;
3629 break;
3630
3631 case AtomicExpr::AO__c11_atomic_load:
3632 case AtomicExpr::AO__opencl_atomic_load:
3633 case AtomicExpr::AO__hip_atomic_load:
3634 case AtomicExpr::AO__atomic_load_n:
3635 case AtomicExpr::AO__scoped_atomic_load_n:
3636 Form = Load;
3637 break;
3638
3639 case AtomicExpr::AO__atomic_load:
3640 case AtomicExpr::AO__scoped_atomic_load:
3641 Form = LoadCopy;
3642 break;
3643
3644 case AtomicExpr::AO__c11_atomic_store:
3645 case AtomicExpr::AO__opencl_atomic_store:
3646 case AtomicExpr::AO__hip_atomic_store:
3647 case AtomicExpr::AO__atomic_store:
3648 case AtomicExpr::AO__atomic_store_n:
3649 case AtomicExpr::AO__scoped_atomic_store:
3650 case AtomicExpr::AO__scoped_atomic_store_n:
3651 Form = Copy;
3652 break;
3653 case AtomicExpr::AO__atomic_fetch_add:
3654 case AtomicExpr::AO__atomic_fetch_sub:
3655 case AtomicExpr::AO__atomic_add_fetch:
3656 case AtomicExpr::AO__atomic_sub_fetch:
3657 case AtomicExpr::AO__scoped_atomic_fetch_add:
3658 case AtomicExpr::AO__scoped_atomic_fetch_sub:
3659 case AtomicExpr::AO__scoped_atomic_add_fetch:
3660 case AtomicExpr::AO__scoped_atomic_sub_fetch:
3661 case AtomicExpr::AO__c11_atomic_fetch_add:
3662 case AtomicExpr::AO__c11_atomic_fetch_sub:
3663 case AtomicExpr::AO__opencl_atomic_fetch_add:
3664 case AtomicExpr::AO__opencl_atomic_fetch_sub:
3665 case AtomicExpr::AO__hip_atomic_fetch_add:
3666 case AtomicExpr::AO__hip_atomic_fetch_sub:
3667 ArithAllows = AOEVT_Pointer | AOEVT_FP;
3668 Form = Arithmetic;
3669 break;
3670 case AtomicExpr::AO__atomic_fetch_max:
3671 case AtomicExpr::AO__atomic_fetch_min:
3672 case AtomicExpr::AO__atomic_max_fetch:
3673 case AtomicExpr::AO__atomic_min_fetch:
3674 case AtomicExpr::AO__scoped_atomic_fetch_max:
3675 case AtomicExpr::AO__scoped_atomic_fetch_min:
3676 case AtomicExpr::AO__scoped_atomic_max_fetch:
3677 case AtomicExpr::AO__scoped_atomic_min_fetch:
3678 case AtomicExpr::AO__c11_atomic_fetch_max:
3679 case AtomicExpr::AO__c11_atomic_fetch_min:
3680 case AtomicExpr::AO__opencl_atomic_fetch_max:
3681 case AtomicExpr::AO__opencl_atomic_fetch_min:
3682 case AtomicExpr::AO__hip_atomic_fetch_max:
3683 case AtomicExpr::AO__hip_atomic_fetch_min:
3684 ArithAllows = AOEVT_FP;
3685 Form = Arithmetic;
3686 break;
3687 case AtomicExpr::AO__c11_atomic_fetch_and:
3688 case AtomicExpr::AO__c11_atomic_fetch_or:
3689 case AtomicExpr::AO__c11_atomic_fetch_xor:
3690 case AtomicExpr::AO__hip_atomic_fetch_and:
3691 case AtomicExpr::AO__hip_atomic_fetch_or:
3692 case AtomicExpr::AO__hip_atomic_fetch_xor:
3693 case AtomicExpr::AO__c11_atomic_fetch_nand:
3694 case AtomicExpr::AO__opencl_atomic_fetch_and:
3695 case AtomicExpr::AO__opencl_atomic_fetch_or:
3696 case AtomicExpr::AO__opencl_atomic_fetch_xor:
3697 case AtomicExpr::AO__atomic_fetch_and:
3698 case AtomicExpr::AO__atomic_fetch_or:
3699 case AtomicExpr::AO__atomic_fetch_xor:
3700 case AtomicExpr::AO__atomic_fetch_nand:
3701 case AtomicExpr::AO__atomic_and_fetch:
3702 case AtomicExpr::AO__atomic_or_fetch:
3703 case AtomicExpr::AO__atomic_xor_fetch:
3704 case AtomicExpr::AO__atomic_nand_fetch:
3705 case AtomicExpr::AO__scoped_atomic_fetch_and:
3706 case AtomicExpr::AO__scoped_atomic_fetch_or:
3707 case AtomicExpr::AO__scoped_atomic_fetch_xor:
3708 case AtomicExpr::AO__scoped_atomic_fetch_nand:
3709 case AtomicExpr::AO__scoped_atomic_and_fetch:
3710 case AtomicExpr::AO__scoped_atomic_or_fetch:
3711 case AtomicExpr::AO__scoped_atomic_xor_fetch:
3712 case AtomicExpr::AO__scoped_atomic_nand_fetch:
3713 Form = Arithmetic;
3714 break;
3715
3716 case AtomicExpr::AO__c11_atomic_exchange:
3717 case AtomicExpr::AO__hip_atomic_exchange:
3718 case AtomicExpr::AO__opencl_atomic_exchange:
3719 case AtomicExpr::AO__atomic_exchange_n:
3720 case AtomicExpr::AO__scoped_atomic_exchange_n:
3721 Form = Xchg;
3722 break;
3723
3724 case AtomicExpr::AO__atomic_exchange:
3725 case AtomicExpr::AO__scoped_atomic_exchange:
3726 Form = GNUXchg;
3727 break;
3728
3729 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3730 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3731 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
3732 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
3733 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
3734 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
3735 Form = C11CmpXchg;
3736 break;
3737
3738 case AtomicExpr::AO__atomic_compare_exchange:
3739 case AtomicExpr::AO__atomic_compare_exchange_n:
3740 case AtomicExpr::AO__scoped_atomic_compare_exchange:
3741 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
3742 Form = GNUCmpXchg;
3743 break;
3744 }
3745
3746 unsigned AdjustedNumArgs = NumArgs[Form];
3747 if ((IsOpenCL || IsHIP || IsScoped) &&
3748 Op != AtomicExpr::AO__opencl_atomic_init)
3749 ++AdjustedNumArgs;
3750 // Check we have the right number of arguments.
3751 if (Args.size() < AdjustedNumArgs) {
3752 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
3753 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
3754 << /*is non object*/ 0 << ExprRange;
3755 return ExprError();
3756 } else if (Args.size() > AdjustedNumArgs) {
3757 Diag(Args[AdjustedNumArgs]->getBeginLoc(),
3758 diag::err_typecheck_call_too_many_args)
3759 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
3760 << /*is non object*/ 0 << ExprRange;
3761 return ExprError();
3762 }
3763
3764 // Inspect the first argument of the atomic operation.
3765 Expr *Ptr = Args[0];
3767 if (ConvertedPtr.isInvalid())
3768 return ExprError();
3769
3770 Ptr = ConvertedPtr.get();
3771 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
3772 if (!pointerType) {
3773 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
3774 << Ptr->getType() << 0 << Ptr->getSourceRange();
3775 return ExprError();
3776 }
3777
3778 // For a __c11 builtin, this should be a pointer to an _Atomic type.
3779 QualType AtomTy = pointerType->getPointeeType(); // 'A'
3780 QualType ValType = AtomTy; // 'C'
3781 if (IsC11) {
3782 if (!AtomTy->isAtomicType()) {
3783 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
3784 << Ptr->getType() << Ptr->getSourceRange();
3785 return ExprError();
3786 }
3787 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
3789 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
3790 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
3791 << Ptr->getSourceRange();
3792 return ExprError();
3793 }
3794 ValType = AtomTy->castAs<AtomicType>()->getValueType();
3795 } else if (Form != Load && Form != LoadCopy) {
3796 if (ValType.isConstQualified()) {
3797 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
3798 << Ptr->getType() << Ptr->getSourceRange();
3799 return ExprError();
3800 }
3801 }
3802
3803 // Pointer to object of size zero is not allowed.
3804 if (RequireCompleteType(Ptr->getBeginLoc(), AtomTy,
3805 diag::err_incomplete_type))
3806 return ExprError();
3807 if (Context.getTypeInfoInChars(AtomTy).Width.isZero()) {
3808 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
3809 << Ptr->getType() << 1 << Ptr->getSourceRange();
3810 return ExprError();
3811 }
3812
3813 // For an arithmetic operation, the implied arithmetic must be well-formed.
3814 if (Form == Arithmetic) {
3815 // GCC does not enforce these rules for GNU atomics, but we do to help catch
3816 // trivial type errors.
3817 auto IsAllowedValueType = [&](QualType ValType,
3818 unsigned AllowedType) -> bool {
3819 if (ValType->isIntegerType())
3820 return true;
3821 if (ValType->isPointerType())
3822 return AllowedType & AOEVT_Pointer;
3823 if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP)))
3824 return false;
3825 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
3826 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
3828 &llvm::APFloat::x87DoubleExtended())
3829 return false;
3830 return true;
3831 };
3832 if (!IsAllowedValueType(ValType, ArithAllows)) {
3833 auto DID = ArithAllows & AOEVT_FP
3834 ? (ArithAllows & AOEVT_Pointer
3835 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp
3836 : diag::err_atomic_op_needs_atomic_int_or_fp)
3837 : diag::err_atomic_op_needs_atomic_int;
3838 Diag(ExprRange.getBegin(), DID)
3839 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
3840 return ExprError();
3841 }
3842 if (IsC11 && ValType->isPointerType() &&
3844 diag::err_incomplete_type)) {
3845 return ExprError();
3846 }
3847 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
3848 // For __atomic_*_n operations, the value type must be a scalar integral or
3849 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
3850 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
3851 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
3852 return ExprError();
3853 }
3854
3855 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
3856 !AtomTy->isScalarType()) {
3857 // For GNU atomics, require a trivially-copyable type. This is not part of
3858 // the GNU atomics specification but we enforce it for consistency with
3859 // other atomics which generally all require a trivially-copyable type. This
3860 // is because atomics just copy bits.
3861 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
3862 << Ptr->getType() << Ptr->getSourceRange();
3863 return ExprError();
3864 }
3865
3866 switch (ValType.getObjCLifetime()) {
3869 // okay
3870 break;
3871
3875 // FIXME: Can this happen? By this point, ValType should be known
3876 // to be trivially copyable.
3877 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
3878 << ValType << Ptr->getSourceRange();
3879 return ExprError();
3880 }
3881
3882 // All atomic operations have an overload which takes a pointer to a volatile
3883 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
3884 // into the result or the other operands. Similarly atomic_load takes a
3885 // pointer to a const 'A'.
3886 ValType.removeLocalVolatile();
3887 ValType.removeLocalConst();
3888 QualType ResultType = ValType;
3889 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
3890 Form == Init)
3891 ResultType = Context.VoidTy;
3892 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
3893 ResultType = Context.BoolTy;
3894
3895 // The type of a parameter passed 'by value'. In the GNU atomics, such
3896 // arguments are actually passed as pointers.
3897 QualType ByValType = ValType; // 'CP'
3898 bool IsPassedByAddress = false;
3899 if (!IsC11 && !IsHIP && !IsN) {
3900 ByValType = Ptr->getType();
3901 IsPassedByAddress = true;
3902 }
3903
3904 SmallVector<Expr *, 5> APIOrderedArgs;
3905 if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
3906 APIOrderedArgs.push_back(Args[0]);
3907 switch (Form) {
3908 case Init:
3909 case Load:
3910 APIOrderedArgs.push_back(Args[1]); // Val1/Order
3911 break;
3912 case LoadCopy:
3913 case Copy:
3914 case Arithmetic:
3915 case Xchg:
3916 APIOrderedArgs.push_back(Args[2]); // Val1
3917 APIOrderedArgs.push_back(Args[1]); // Order
3918 break;
3919 case GNUXchg:
3920 APIOrderedArgs.push_back(Args[2]); // Val1
3921 APIOrderedArgs.push_back(Args[3]); // Val2
3922 APIOrderedArgs.push_back(Args[1]); // Order
3923 break;
3924 case C11CmpXchg:
3925 APIOrderedArgs.push_back(Args[2]); // Val1
3926 APIOrderedArgs.push_back(Args[4]); // Val2
3927 APIOrderedArgs.push_back(Args[1]); // Order
3928 APIOrderedArgs.push_back(Args[3]); // OrderFail
3929 break;
3930 case GNUCmpXchg:
3931 APIOrderedArgs.push_back(Args[2]); // Val1
3932 APIOrderedArgs.push_back(Args[4]); // Val2
3933 APIOrderedArgs.push_back(Args[5]); // Weak
3934 APIOrderedArgs.push_back(Args[1]); // Order
3935 APIOrderedArgs.push_back(Args[3]); // OrderFail
3936 break;
3937 }
3938 } else
3939 APIOrderedArgs.append(Args.begin(), Args.end());
3940
3941 // The first argument's non-CV pointer type is used to deduce the type of
3942 // subsequent arguments, except for:
3943 // - weak flag (always converted to bool)
3944 // - memory order (always converted to int)
3945 // - scope (always converted to int)
3946 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
3947 QualType Ty;
3948 if (i < NumVals[Form] + 1) {
3949 switch (i) {
3950 case 0:
3951 // The first argument is always a pointer. It has a fixed type.
3952 // It is always dereferenced, a nullptr is undefined.
3953 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
3954 // Nothing else to do: we already know all we want about this pointer.
3955 continue;
3956 case 1:
3957 // The second argument is the non-atomic operand. For arithmetic, this
3958 // is always passed by value, and for a compare_exchange it is always
3959 // passed by address. For the rest, GNU uses by-address and C11 uses
3960 // by-value.
3961 assert(Form != Load);
3962 if (Form == Arithmetic && ValType->isPointerType())
3964 else if (Form == Init || Form == Arithmetic)
3965 Ty = ValType;
3966 else if (Form == Copy || Form == Xchg) {
3967 if (IsPassedByAddress) {
3968 // The value pointer is always dereferenced, a nullptr is undefined.
3969 CheckNonNullArgument(*this, APIOrderedArgs[i],
3970 ExprRange.getBegin());
3971 }
3972 Ty = ByValType;
3973 } else {
3974 Expr *ValArg = APIOrderedArgs[i];
3975 // The value pointer is always dereferenced, a nullptr is undefined.
3976 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin());
3978 // Keep address space of non-atomic pointer type.
3979 if (const PointerType *PtrTy =
3980 ValArg->getType()->getAs<PointerType>()) {
3981 AS = PtrTy->getPointeeType().getAddressSpace();
3982 }
3985 }
3986 break;
3987 case 2:
3988 // The third argument to compare_exchange / GNU exchange is the desired
3989 // value, either by-value (for the C11 and *_n variant) or as a pointer.
3990 if (IsPassedByAddress)
3991 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
3992 Ty = ByValType;
3993 break;
3994 case 3:
3995 // The fourth argument to GNU compare_exchange is a 'weak' flag.
3996 Ty = Context.BoolTy;
3997 break;
3998 }
3999 } else {
4000 // The order(s) and scope are always converted to int.
4001 Ty = Context.IntTy;
4002 }
4003
4004 InitializedEntity Entity =
4006 ExprResult Arg = APIOrderedArgs[i];
4007 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4008 if (Arg.isInvalid())
4009 return true;
4010 APIOrderedArgs[i] = Arg.get();
4011 }
4012
4013 // Permute the arguments into a 'consistent' order.
4014 SmallVector<Expr*, 5> SubExprs;
4015 SubExprs.push_back(Ptr);
4016 switch (Form) {
4017 case Init:
4018 // Note, AtomicExpr::getVal1() has a special case for this atomic.
4019 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4020 break;
4021 case Load:
4022 SubExprs.push_back(APIOrderedArgs[1]); // Order
4023 break;
4024 case LoadCopy:
4025 case Copy:
4026 case Arithmetic:
4027 case Xchg:
4028 SubExprs.push_back(APIOrderedArgs[2]); // Order
4029 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4030 break;
4031 case GNUXchg:
4032 // Note, AtomicExpr::getVal2() has a special case for this atomic.
4033 SubExprs.push_back(APIOrderedArgs[3]); // Order
4034 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4035 SubExprs.push_back(APIOrderedArgs[2]); // Val2
4036 break;
4037 case C11CmpXchg:
4038 SubExprs.push_back(APIOrderedArgs[3]); // Order
4039 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4040 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail
4041 SubExprs.push_back(APIOrderedArgs[2]); // Val2
4042 break;
4043 case GNUCmpXchg:
4044 SubExprs.push_back(APIOrderedArgs[4]); // Order
4045 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4046 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail
4047 SubExprs.push_back(APIOrderedArgs[2]); // Val2
4048 SubExprs.push_back(APIOrderedArgs[3]); // Weak
4049 break;
4050 }
4051
4052 // If the memory orders are constants, check they are valid.
4053 if (SubExprs.size() >= 2 && Form != Init) {
4054 std::optional<llvm::APSInt> Success =
4055 SubExprs[1]->getIntegerConstantExpr(Context);
4056 if (Success && !isValidOrderingForOp(Success->getSExtValue(), Op)) {
4057 Diag(SubExprs[1]->getBeginLoc(),
4058 diag::warn_atomic_op_has_invalid_memory_order)
4059 << /*success=*/(Form == C11CmpXchg || Form == GNUCmpXchg)
4060 << SubExprs[1]->getSourceRange();
4061 }
4062 if (SubExprs.size() >= 5) {
4063 if (std::optional<llvm::APSInt> Failure =
4064 SubExprs[3]->getIntegerConstantExpr(Context)) {
4065 if (!llvm::is_contained(
4066 {llvm::AtomicOrderingCABI::relaxed,
4067 llvm::AtomicOrderingCABI::consume,
4068 llvm::AtomicOrderingCABI::acquire,
4069 llvm::AtomicOrderingCABI::seq_cst},
4070 (llvm::AtomicOrderingCABI)Failure->getSExtValue())) {
4071 Diag(SubExprs[3]->getBeginLoc(),
4072 diag::warn_atomic_op_has_invalid_memory_order)
4073 << /*failure=*/2 << SubExprs[3]->getSourceRange();
4074 }
4075 }
4076 }
4077 }
4078
4079 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
4080 auto *Scope = Args[Args.size() - 1];
4081 if (std::optional<llvm::APSInt> Result =
4082 Scope->getIntegerConstantExpr(Context)) {
4083 if (!ScopeModel->isValid(Result->getZExtValue()))
4084 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
4085 << Scope->getSourceRange();
4086 }
4087 SubExprs.push_back(Scope);
4088 }
4089
4090 AtomicExpr *AE = new (Context)
4091 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
4092
4093 if ((Op == AtomicExpr::AO__c11_atomic_load ||
4094 Op == AtomicExpr::AO__c11_atomic_store ||
4095 Op == AtomicExpr::AO__opencl_atomic_load ||
4096 Op == AtomicExpr::AO__hip_atomic_load ||
4097 Op == AtomicExpr::AO__opencl_atomic_store ||
4098 Op == AtomicExpr::AO__hip_atomic_store) &&
4100 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
4101 << ((Op == AtomicExpr::AO__c11_atomic_load ||
4102 Op == AtomicExpr::AO__opencl_atomic_load ||
4103 Op == AtomicExpr::AO__hip_atomic_load)
4104 ? 0
4105 : 1);
4106
4107 if (ValType->isBitIntType()) {
4108 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit);
4109 return ExprError();
4110 }
4111
4112 return AE;
4113}
4114
4115/// checkBuiltinArgument - Given a call to a builtin function, perform
4116/// normal type-checking on the given argument, updating the call in
4117/// place. This is useful when a builtin function requires custom
4118/// type-checking for some of its arguments but not necessarily all of
4119/// them.
4120///
4121/// Returns true on error.
4122static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
4123 FunctionDecl *Fn = E->getDirectCallee();
4124 assert(Fn && "builtin call without direct callee!");
4125
4126 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
4127 InitializedEntity Entity =
4129
4130 ExprResult Arg = E->getArg(ArgIndex);
4131 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
4132 if (Arg.isInvalid())
4133 return true;
4134
4135 E->setArg(ArgIndex, Arg.get());
4136 return false;
4137}
4138
4139ExprResult Sema::BuiltinAtomicOverloaded(ExprResult TheCallResult) {
4140 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
4141 Expr *Callee = TheCall->getCallee();
4142 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
4143 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
4144
4145 // Ensure that we have at least one argument to do type inference from.
4146 if (TheCall->getNumArgs() < 1) {
4147 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
4148 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
4149 << Callee->getSourceRange();
4150 return ExprError();
4151 }
4152
4153 // Inspect the first argument of the atomic builtin. This should always be
4154 // a pointer type, whose element is an integral scalar or pointer type.
4155 // Because it is a pointer type, we don't have to worry about any implicit
4156 // casts here.
4157 // FIXME: We don't allow floating point scalars as input.
4158 Expr *FirstArg = TheCall->getArg(0);
4159 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
4160 if (FirstArgResult.isInvalid())
4161 return ExprError();
4162 FirstArg = FirstArgResult.get();
4163 TheCall->setArg(0, FirstArg);
4164
4165 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
4166 if (!pointerType) {
4167 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
4168 << FirstArg->getType() << 0 << FirstArg->getSourceRange();
4169 return ExprError();
4170 }
4171
4172 QualType ValType = pointerType->getPointeeType();
4173 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
4174 !ValType->isBlockPointerType()) {
4175 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
4176 << FirstArg->getType() << 0 << FirstArg->getSourceRange();
4177 return ExprError();
4178 }
4179
4180 if (ValType.isConstQualified()) {
4181 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
4182 << FirstArg->getType() << FirstArg->getSourceRange();
4183 return ExprError();
4184 }
4185
4186 switch (ValType.getObjCLifetime()) {
4189 // okay
4190 break;
4191
4195 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
4196 << ValType << FirstArg->getSourceRange();
4197 return ExprError();
4198 }
4199
4200 // Strip any qualifiers off ValType.
4201 ValType = ValType.getUnqualifiedType();
4202
4203 // The majority of builtins return a value, but a few have special return
4204 // types, so allow them to override appropriately below.
4205 QualType ResultType = ValType;
4206
4207 // We need to figure out which concrete builtin this maps onto. For example,
4208 // __sync_fetch_and_add with a 2 byte object turns into
4209 // __sync_fetch_and_add_2.
4210#define BUILTIN_ROW(x) \
4211 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
4212 Builtin::BI##x##_8, Builtin::BI##x##_16 }
4213
4214 static const unsigned BuiltinIndices[][5] = {
4215 BUILTIN_ROW(__sync_fetch_and_add),
4216 BUILTIN_ROW(__sync_fetch_and_sub),
4217 BUILTIN_ROW(__sync_fetch_and_or),
4218 BUILTIN_ROW(__sync_fetch_and_and),
4219 BUILTIN_ROW(__sync_fetch_and_xor),
4220 BUILTIN_ROW(__sync_fetch_and_nand),
4221
4222 BUILTIN_ROW(__sync_add_and_fetch),
4223 BUILTIN_ROW(__sync_sub_and_fetch),
4224 BUILTIN_ROW(__sync_and_and_fetch),
4225 BUILTIN_ROW(__sync_or_and_fetch),
4226 BUILTIN_ROW(__sync_xor_and_fetch),
4227 BUILTIN_ROW(__sync_nand_and_fetch),
4228
4229 BUILTIN_ROW(__sync_val_compare_and_swap),
4230 BUILTIN_ROW(__sync_bool_compare_and_swap),
4231 BUILTIN_ROW(__sync_lock_test_and_set),
4232 BUILTIN_ROW(__sync_lock_release),
4233 BUILTIN_ROW(__sync_swap)
4234 };
4235#undef BUILTIN_ROW
4236
4237 // Determine the index of the size.
4238 unsigned SizeIndex;
4239 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
4240 case 1: SizeIndex = 0; break;
4241 case 2: SizeIndex = 1; break;
4242 case 4: SizeIndex = 2; break;
4243 case 8: SizeIndex = 3; break;
4244 case 16: SizeIndex = 4; break;
4245 default:
4246 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
4247 << FirstArg->getType() << FirstArg->getSourceRange();
4248 return ExprError();
4249 }
4250
4251 // Each of these builtins has one pointer argument, followed by some number of
4252 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
4253 // that we ignore. Find out which row of BuiltinIndices to read from as well
4254 // as the number of fixed args.
4255 unsigned BuiltinID = FDecl->getBuiltinID();
4256 unsigned BuiltinIndex, NumFixed = 1;
4257 bool WarnAboutSemanticsChange = false;
4258 switch (BuiltinID) {
4259 default: llvm_unreachable("Unknown overloaded atomic builtin!");
4260 case Builtin::BI__sync_fetch_and_add:
4261 case Builtin::BI__sync_fetch_and_add_1:
4262 case Builtin::BI__sync_fetch_and_add_2:
4263 case Builtin::BI__sync_fetch_and_add_4:
4264 case Builtin::BI__sync_fetch_and_add_8:
4265 case Builtin::BI__sync_fetch_and_add_16:
4266 BuiltinIndex = 0;
4267 break;
4268
4269 case Builtin::BI__sync_fetch_and_sub:
4270 case Builtin::BI__sync_fetch_and_sub_1:
4271 case Builtin::BI__sync_fetch_and_sub_2:
4272 case Builtin::BI__sync_fetch_and_sub_4:
4273 case Builtin::BI__sync_fetch_and_sub_8:
4274 case Builtin::BI__sync_fetch_and_sub_16:
4275 BuiltinIndex = 1;
4276 break;
4277
4278 case Builtin::BI__sync_fetch_and_or:
4279 case Builtin::BI__sync_fetch_and_or_1:
4280 case Builtin::BI__sync_fetch_and_or_2:
4281 case Builtin::BI__sync_fetch_and_or_4:
4282 case Builtin::BI__sync_fetch_and_or_8:
4283 case Builtin::BI__sync_fetch_and_or_16:
4284 BuiltinIndex = 2;
4285 break;
4286
4287 case Builtin::BI__sync_fetch_and_and:
4288 case Builtin::BI__sync_fetch_and_and_1:
4289 case Builtin::BI__sync_fetch_and_and_2:
4290 case Builtin::BI__sync_fetch_and_and_4:
4291 case Builtin::BI__sync_fetch_and_and_8:
4292 case Builtin::BI__sync_fetch_and_and_16:
4293 BuiltinIndex = 3;
4294 break;
4295
4296 case Builtin::BI__sync_fetch_and_xor:
4297 case Builtin::BI__sync_fetch_and_xor_1:
4298 case Builtin::BI__sync_fetch_and_xor_2:
4299 case Builtin::BI__sync_fetch_and_xor_4:
4300 case Builtin::BI__sync_fetch_and_xor_8:
4301 case Builtin::BI__sync_fetch_and_xor_16:
4302 BuiltinIndex = 4;
4303 break;
4304
4305 case Builtin::BI__sync_fetch_and_nand:
4306 case Builtin::BI__sync_fetch_and_nand_1:
4307 case Builtin::BI__sync_fetch_and_nand_2:
4308 case Builtin::BI__sync_fetch_and_nand_4:
4309 case Builtin::BI__sync_fetch_and_nand_8:
4310 case Builtin::BI__sync_fetch_and_nand_16:
4311 BuiltinIndex = 5;
4312 WarnAboutSemanticsChange = true;
4313 break;
4314
4315 case Builtin::BI__sync_add_and_fetch:
4316 case Builtin::BI__sync_add_and_fetch_1:
4317 case Builtin::BI__sync_add_and_fetch_2:
4318 case Builtin::BI__sync_add_and_fetch_4:
4319 case Builtin::BI__sync_add_and_fetch_8:
4320 case Builtin::BI__sync_add_and_fetch_16:
4321 BuiltinIndex = 6;
4322 break;
4323
4324 case Builtin::BI__sync_sub_and_fetch:
4325 case Builtin::BI__sync_sub_and_fetch_1:
4326 case Builtin::BI__sync_sub_and_fetch_2:
4327 case Builtin::BI__sync_sub_and_fetch_4:
4328 case Builtin::BI__sync_sub_and_fetch_8:
4329 case Builtin::BI__sync_sub_and_fetch_16:
4330 BuiltinIndex = 7;
4331 break;
4332
4333 case Builtin::BI__sync_and_and_fetch:
4334 case Builtin::BI__sync_and_and_fetch_1:
4335 case Builtin::BI__sync_and_and_fetch_2:
4336 case Builtin::BI__sync_and_and_fetch_4:
4337 case Builtin::BI__sync_and_and_fetch_8:
4338 case Builtin::BI__sync_and_and_fetch_16:
4339 BuiltinIndex = 8;
4340 break;
4341
4342 case Builtin::BI__sync_or_and_fetch:
4343 case Builtin::BI__sync_or_and_fetch_1:
4344 case Builtin::BI__sync_or_and_fetch_2:
4345 case Builtin::BI__sync_or_and_fetch_4:
4346 case Builtin::BI__sync_or_and_fetch_8:
4347 case Builtin::BI__sync_or_and_fetch_16:
4348 BuiltinIndex = 9;
4349 break;
4350
4351 case Builtin::BI__sync_xor_and_fetch:
4352 case Builtin::BI__sync_xor_and_fetch_1:
4353 case Builtin::BI__sync_xor_and_fetch_2:
4354 case Builtin::BI__sync_xor_and_fetch_4:
4355 case Builtin::BI__sync_xor_and_fetch_8:
4356 case Builtin::BI__sync_xor_and_fetch_16:
4357 BuiltinIndex = 10;
4358 break;
4359
4360 case Builtin::BI__sync_nand_and_fetch:
4361 case Builtin::BI__sync_nand_and_fetch_1:
4362 case Builtin::BI__sync_nand_and_fetch_2:
4363 case Builtin::BI__sync_nand_and_fetch_4:
4364 case Builtin::BI__sync_nand_and_fetch_8:
4365 case Builtin::BI__sync_nand_and_fetch_16:
4366 BuiltinIndex = 11;
4367 WarnAboutSemanticsChange = true;
4368 break;
4369
4370 case Builtin::BI__sync_val_compare_and_swap:
4371 case Builtin::BI__sync_val_compare_and_swap_1:
4372 case Builtin::BI__sync_val_compare_and_swap_2:
4373 case Builtin::BI__sync_val_compare_and_swap_4:
4374 case Builtin::BI__sync_val_compare_and_swap_8:
4375 case Builtin::BI__sync_val_compare_and_swap_16:
4376 BuiltinIndex = 12;
4377 NumFixed = 2;
4378 break;
4379
4380 case Builtin::BI__sync_bool_compare_and_swap:
4381 case Builtin::BI__sync_bool_compare_and_swap_1:
4382 case Builtin::BI__sync_bool_compare_and_swap_2:
4383 case Builtin::BI__sync_bool_compare_and_swap_4:
4384 case Builtin::BI__sync_bool_compare_and_swap_8:
4385 case Builtin::BI__sync_bool_compare_and_swap_16:
4386 BuiltinIndex = 13;
4387 NumFixed = 2;
4388 ResultType = Context.BoolTy;
4389 break;
4390
4391 case Builtin::BI__sync_lock_test_and_set:
4392 case Builtin::BI__sync_lock_test_and_set_1:
4393 case Builtin::BI__sync_lock_test_and_set_2:
4394 case Builtin::BI__sync_lock_test_and_set_4:
4395 case Builtin::BI__sync_lock_test_and_set_8:
4396 case Builtin::BI__sync_lock_test_and_set_16:
4397 BuiltinIndex = 14;
4398 break;
4399
4400 case Builtin::BI__sync_lock_release:
4401 case Builtin::BI__sync_lock_release_1:
4402 case Builtin::BI__sync_lock_release_2:
4403 case Builtin::BI__sync_lock_release_4:
4404 case Builtin::BI__sync_lock_release_8:
4405 case Builtin::BI__sync_lock_release_16:
4406 BuiltinIndex = 15;
4407 NumFixed = 0;
4408 ResultType = Context.VoidTy;
4409 break;
4410
4411 case Builtin::BI__sync_swap:
4412 case Builtin::BI__sync_swap_1:
4413 case Builtin::BI__sync_swap_2:
4414 case Builtin::BI__sync_swap_4:
4415 case Builtin::BI__sync_swap_8:
4416 case Builtin::BI__sync_swap_16:
4417 BuiltinIndex = 16;
4418 break;
4419 }
4420
4421 // Now that we know how many fixed arguments we expect, first check that we
4422 // have at least that many.
4423 if (TheCall->getNumArgs() < 1+NumFixed) {
4424 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
4425 << 0 << 1 + NumFixed << TheCall->getNumArgs() << /*is non object*/ 0
4426 << Callee->getSourceRange();
4427 return ExprError();
4428 }
4429
4430 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
4431 << Callee->getSourceRange();
4432
4433 if (WarnAboutSemanticsChange) {
4434 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
4435 << Callee->getSourceRange();
4436 }
4437
4438 // Get the decl for the concrete builtin from this, we can tell what the
4439 // concrete integer type we should convert to is.
4440 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
4441 StringRef NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
4442 FunctionDecl *NewBuiltinDecl;
4443 if (NewBuiltinID == BuiltinID)
4444 NewBuiltinDecl = FDecl;
4445 else {
4446 // Perform builtin lookup to avoid redeclaring it.
4447 DeclarationName DN(&Context.Idents.get(NewBuiltinName));
4448 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
4449 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
4450 assert(Res.getFoundDecl());
4451 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
4452 if (!NewBuiltinDecl)
4453 return ExprError();
4454 }
4455
4456 // The first argument --- the pointer --- has a fixed type; we
4457 // deduce the types of the rest of the arguments accordingly. Walk
4458 // the remaining arguments, converting them to the deduced value type.
4459 for (unsigned i = 0; i != NumFixed; ++i) {
4460 ExprResult Arg = TheCall->getArg(i+1);
4461
4462 // GCC does an implicit conversion to the pointer or integer ValType. This
4463 // can fail in some cases (1i -> int**), check for this error case now.
4464 // Initialize the argument.
4466 ValType, /*consume*/ false);
4467 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4468 if (Arg.isInvalid())
4469 return ExprError();
4470
4471 // Okay, we have something that *can* be converted to the right type. Check
4472 // to see if there is a potentially weird extension going on here. This can
4473 // happen when you do an atomic operation on something like an char* and
4474 // pass in 42. The 42 gets converted to char. This is even more strange
4475 // for things like 45.123 -> char, etc.
4476 // FIXME: Do this check.
4477 TheCall->setArg(i+1, Arg.get());
4478 }
4479
4480 // Create a new DeclRefExpr to refer to the new decl.
4482 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
4483 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
4484 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
4485
4486 // Set the callee in the CallExpr.
4487 // FIXME: This loses syntactic information.
4488 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
4489 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
4490 CK_BuiltinFnToFnPtr);
4491 TheCall->setCallee(PromotedCall.get());
4492
4493 // Change the result type of the call to match the original value type. This
4494 // is arbitrary, but the codegen for these builtins ins design to handle it
4495 // gracefully.
4496 TheCall->setType(ResultType);
4497
4498 // Prohibit problematic uses of bit-precise integer types with atomic
4499 // builtins. The arguments would have already been converted to the first
4500 // argument's type, so only need to check the first argument.
4501 const auto *BitIntValType = ValType->getAs<BitIntType>();
4502 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) {
4503 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
4504 return ExprError();
4505 }
4506
4507 return TheCallResult;
4508}
4509
4510ExprResult Sema::BuiltinNontemporalOverloaded(ExprResult TheCallResult) {
4511 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
4512 DeclRefExpr *DRE =
4513 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
4514 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
4515 unsigned BuiltinID = FDecl->getBuiltinID();
4516 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
4517 BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
4518 "Unexpected nontemporal load/store builtin!");
4519 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
4520 unsigned numArgs = isStore ? 2 : 1;
4521
4522 // Ensure that we have the proper number of arguments.
4523 if (checkArgCount(TheCall, numArgs))
4524 return ExprError();
4525
4526 // Inspect the last argument of the nontemporal builtin. This should always
4527 // be a pointer type, from which we imply the type of the memory access.
4528 // Because it is a pointer type, we don't have to worry about any implicit
4529 // casts here.
4530 Expr *PointerArg = TheCall->getArg(numArgs - 1);
4531 ExprResult PointerArgResult =
4533
4534 if (PointerArgResult.isInvalid())
4535 return ExprError();
4536 PointerArg = PointerArgResult.get();
4537 TheCall->setArg(numArgs - 1, PointerArg);
4538
4539 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
4540 if (!pointerType) {
4541 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
4542 << PointerArg->getType() << PointerArg->getSourceRange();
4543 return ExprError();
4544 }
4545
4546 QualType ValType = pointerType->getPointeeType();
4547
4548 // Strip any qualifiers off ValType.
4549 ValType = ValType.getUnqualifiedType();
4550 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
4551 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
4552 !ValType->isVectorType()) {
4553 Diag(DRE->getBeginLoc(),
4554 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
4555 << PointerArg->getType() << PointerArg->getSourceRange();
4556 return ExprError();
4557 }
4558
4559 if (!isStore) {
4560 TheCall->setType(ValType);
4561 return TheCallResult;
4562 }
4563
4564 ExprResult ValArg = TheCall->getArg(0);
4566 Context, ValType, /*consume*/ false);
4567 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
4568 if (ValArg.isInvalid())
4569 return ExprError();
4570
4571 TheCall->setArg(0, ValArg.get());
4572 TheCall->setType(Context.VoidTy);
4573 return TheCallResult;
4574}
4575
4576/// CheckObjCString - Checks that the format string argument to the os_log()
4577/// and os_trace() functions is correct, and converts it to const char *.
4578ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
4579 Arg = Arg->IgnoreParenCasts();
4580 auto *Literal = dyn_cast<StringLiteral>(Arg);
4581 if (!Literal) {
4582 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) {
4583 Literal = ObjcLiteral->getString();
4584 }
4585 }
4586
4587 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) {
4588 return ExprError(
4589 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
4590 << Arg->getSourceRange());
4591 }
4592
4593 ExprResult Result(Literal);
4595 InitializedEntity Entity =
4598 return Result;
4599}
4600
4601/// Check that the user is calling the appropriate va_start builtin for the
4602/// target and calling convention.
4603static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
4604 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
4605 bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
4606 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 ||
4607 TT.getArch() == llvm::Triple::aarch64_32);
4608 bool IsWindows = TT.isOSWindows();
4609 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
4610 if (IsX64 || IsAArch64) {
4611 CallingConv CC = CC_C;
4612 if (const FunctionDecl *FD = S.getCurFunctionDecl())
4613 CC = FD->getType()->castAs<FunctionType>()->getCallConv();
4614 if (IsMSVAStart) {
4615 // Don't allow this in System V ABI functions.
4616 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
4617 return S.Diag(Fn->getBeginLoc(),
4618 diag::err_ms_va_start_used_in_sysv_function);
4619 } else {
4620 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
4621 // On x64 Windows, don't allow this in System V ABI functions.
4622 // (Yes, that means there's no corresponding way to support variadic
4623 // System V ABI functions on Windows.)
4624 if ((IsWindows && CC == CC_X86_64SysV) ||
4625 (!IsWindows && CC == CC_Win64))
4626 return S.Diag(Fn->getBeginLoc(),
4627 diag::err_va_start_used_in_wrong_abi_function)
4628 << !IsWindows;
4629 }
4630 return false;
4631 }
4632
4633 if (IsMSVAStart)
4634 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only);
4635 return false;
4636}
4637
4639 ParmVarDecl **LastParam = nullptr) {
4640 // Determine whether the current function, block, or obj-c method is variadic
4641 // and get its parameter list.
4642 bool IsVariadic = false;
4644 DeclContext *Caller = S.CurContext;
4645 if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
4646 IsVariadic = Block->isVariadic();
4647 Params = Block->parameters();
4648 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
4649 IsVariadic = FD->isVariadic();
4650 Params = FD->parameters();
4651 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
4652 IsVariadic = MD->isVariadic();
4653 // FIXME: This isn't correct for methods (results in bogus warning).
4654 Params = MD->parameters();
4655 } else if (isa<CapturedDecl>(Caller)) {
4656 // We don't support va_start in a CapturedDecl.
4657 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt);
4658 return true;
4659 } else {
4660 // This must be some other declcontext that parses exprs.
4661 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function);
4662 return true;
4663 }
4664
4665 if (!IsVariadic) {
4666 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function);
4667 return true;
4668 }
4669
4670 if (LastParam)
4671 *LastParam = Params.empty() ? nullptr : Params.back();
4672
4673 return false;
4674}
4675
4676bool Sema::BuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
4677 Expr *Fn = TheCall->getCallee();
4678
4679 if (checkVAStartABI(*this, BuiltinID, Fn))
4680 return true;
4681
4682 // In C23 mode, va_start only needs one argument. However, the builtin still
4683 // requires two arguments (which matches the behavior of the GCC builtin),
4684 // <stdarg.h> passes `0` as the second argument in C23 mode.
4685 if (checkArgCount(TheCall, 2))
4686 return true;
4687
4688 // Type-check the first argument normally.
4689 if (checkBuiltinArgument(*this, TheCall, 0))
4690 return true;
4691
4692 // Check that the current function is variadic, and get its last parameter.
4693 ParmVarDecl *LastParam;
4694 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
4695 return true;
4696
4697 // Verify that the second argument to the builtin is the last argument of the
4698 // current function or method. In C23 mode, if the second argument is an
4699 // integer constant expression with value 0, then we don't bother with this
4700 // check.
4701 bool SecondArgIsLastNamedArgument = false;
4702 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
4703 if (std::optional<llvm::APSInt> Val =
4705 Val && LangOpts.C23 && *Val == 0)
4706 return false;
4707
4708 // These are valid if SecondArgIsLastNamedArgument is false after the next
4709 // block.
4710 QualType Type;
4711 SourceLocation ParamLoc;
4712 bool IsCRegister = false;
4713
4714 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
4715 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
4716 SecondArgIsLastNamedArgument = PV == LastParam;
4717
4718 Type = PV->getType();
4719 ParamLoc = PV->getLocation();
4720 IsCRegister =
4721 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
4722 }
4723 }
4724
4725 if (!SecondArgIsLastNamedArgument)
4726 Diag(TheCall->getArg(1)->getBeginLoc(),
4727 diag::warn_second_arg_of_va_start_not_last_named_param);
4728 else if (IsCRegister || Type->isReferenceType() ||
4729 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
4730 // Promotable integers are UB, but enumerations need a bit of
4731 // extra checking to see what their promotable type actually is.
4732 if (!Context.isPromotableIntegerType(Type))
4733 return false;
4734 if (!Type->isEnumeralType())
4735 return true;
4736 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
4737 return !(ED &&
4738 Context.typesAreCompatible(ED->getPromotionType(), Type));
4739 }()) {
4740 unsigned Reason = 0;
4741 if (Type->isReferenceType()) Reason = 1;
4742 else if (IsCRegister) Reason = 2;
4743 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason;
4744 Diag(ParamLoc, diag::note_parameter_type) << Type;
4745 }
4746
4747 return false;
4748}
4749
4750bool Sema::BuiltinVAStartARMMicrosoft(CallExpr *Call) {
4751 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool {
4752 const LangOptions &LO = getLangOpts();
4753
4754 if (LO.CPlusPlus)
4755 return Arg->getType()
4757 .getTypePtr()
4758 ->getPointeeType()
4760
4761 // In C, allow aliasing through `char *`, this is required for AArch64 at
4762 // least.
4763 return true;
4764 };
4765
4766 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
4767 // const char *named_addr);
4768
4769 Expr *Func = Call->getCallee();
4770
4771 if (Call->getNumArgs() < 3)
4772 return Diag(Call->getEndLoc(),
4773 diag::err_typecheck_call_too_few_args_at_least)
4774 << 0 /*function call*/ << 3 << Call->getNumArgs()
4775 << /*is non object*/ 0;
4776
4777 // Type-check the first argument normally.
4778 if (checkBuiltinArgument(*this, Call, 0))
4779 return true;
4780
4781 // Check that the current function is variadic.
4783 return true;
4784
4785 // __va_start on Windows does not validate the parameter qualifiers
4786
4787 const Expr *Arg1 = Call->getArg(1)->IgnoreParens();
4788 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
4789
4790 const Expr *Arg2 = Call->getArg(2)->IgnoreParens();
4791 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
4792
4793 const QualType &ConstCharPtrTy =
4795 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1))
4796 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
4797 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */
4798 << 0 /* qualifier difference */
4799 << 3 /* parameter mismatch */
4800 << 2 << Arg1->getType() << ConstCharPtrTy;
4801
4802 const QualType SizeTy = Context.getSizeType();
4803 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
4804 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible)
4805 << Arg2->getType() << SizeTy << 1 /* different class */
4806 << 0 /* qualifier difference */
4807 << 3 /* parameter mismatch */
4808 << 3 << Arg2->getType() << SizeTy;
4809
4810 return false;
4811}
4812
4813bool Sema::BuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
4814 if (checkArgCount(TheCall, 2))
4815 return true;
4816
4817 if (BuiltinID == Builtin::BI__builtin_isunordered &&
4818 TheCall->getFPFeaturesInEffect(getLangOpts()).getNoHonorNaNs())
4819 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
4820 << 1 << 0 << TheCall->getSourceRange();
4821
4822 ExprResult OrigArg0 = TheCall->getArg(0);
4823 ExprResult OrigArg1 = TheCall->getArg(1);
4824
4825 // Do standard promotions between the two arguments, returning their common
4826 // type.
4828 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison);
4829 if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
4830 return true;
4831
4832 // Make sure any conversions are pushed back into the call; this is
4833 // type safe since unordered compare builtins are declared as "_Bool
4834 // foo(...)".
4835 TheCall->setArg(0, OrigArg0.get());
4836 TheCall->setArg(1, OrigArg1.get());
4837
4838 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
4839 return false;
4840
4841 // If the common type isn't a real floating type, then the arguments were
4842 // invalid for this operation.
4843 if (Res.isNull() || !Res->isRealFloatingType())
4844 return Diag(OrigArg0.get()->getBeginLoc(),
4845 diag::err_typecheck_call_invalid_ordered_compare)
4846 << OrigArg0.get()->getType() << OrigArg1.get()->getType()
4847 << SourceRange(OrigArg0.get()->getBeginLoc(),
4848 OrigArg1.get()->getEndLoc());
4849
4850 return false;
4851}
4852
4853bool Sema::BuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
4854 unsigned BuiltinID) {
4855 if (checkArgCount(TheCall, NumArgs))
4856 return true;
4857
4859 if (FPO.getNoHonorInfs() && (BuiltinID == Builtin::BI__builtin_isfinite ||
4860 BuiltinID == Builtin::BI__builtin_isinf ||
4861 BuiltinID == Builtin::BI__builtin_isinf_sign))
4862 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
4863 << 0 << 0 << TheCall->getSourceRange();
4864
4865 if (FPO.getNoHonorNaNs() && (BuiltinID == Builtin::BI__builtin_isnan ||
4866 BuiltinID == Builtin::BI__builtin_isunordered))
4867 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
4868 << 1 << 0 << TheCall->getSourceRange();
4869
4870 bool IsFPClass = NumArgs == 2;
4871
4872 // Find out position of floating-point argument.
4873 unsigned FPArgNo = IsFPClass ? 0 : NumArgs - 1;
4874
4875 // We can count on all parameters preceding the floating-point just being int.
4876 // Try all of those.
4877 for (unsigned i = 0; i < FPArgNo; ++i) {
4878 Expr *Arg = TheCall->getArg(i);
4879
4880 if (Arg->isTypeDependent())
4881 return false;
4882
4884
4885 if (Res.isInvalid())
4886 return true;
4887 TheCall->setArg(i, Res.get());
4888 }
4889
4890 Expr *OrigArg = TheCall->getArg(FPArgNo);
4891
4892 if (OrigArg->isTypeDependent())
4893 return false;
4894
4895 // Usual Unary Conversions will convert half to float, which we want for
4896 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the
4897 // type how it is, but do normal L->Rvalue conversions.
4899 OrigArg = UsualUnaryConversions(OrigArg).get();
4900 else
4901 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get();
4902 TheCall->setArg(FPArgNo, OrigArg);
4903
4904 QualType VectorResultTy;
4905 QualType ElementTy = OrigArg->getType();
4906 // TODO: When all classification function are implemented with is_fpclass,
4907 // vector argument can be supported in all of them.
4908 if (ElementTy->isVectorType() && IsFPClass) {
4909 VectorResultTy = GetSignedVectorType(ElementTy);
4910 ElementTy = ElementTy->castAs<VectorType>()->getElementType();
4911 }
4912
4913 // This operation requires a non-_Complex floating-point number.
4914 if (!ElementTy->isRealFloatingType())
4915 return Diag(OrigArg->getBeginLoc(),
4916 diag::err_typecheck_call_invalid_unary_fp)
4917 << OrigArg->getType() << OrigArg->getSourceRange();
4918
4919 // __builtin_isfpclass has integer parameter that specify test mask. It is
4920 // passed in (...), so it should be analyzed completely here.
4921 if (IsFPClass)
4922 if (BuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags))
4923 return true;
4924
4925 // TODO: enable this code to all classification functions.
4926 if (IsFPClass) {
4927 QualType ResultTy;
4928 if (!VectorResultTy.isNull())
4929 ResultTy = VectorResultTy;
4930 else
4931 ResultTy = Context.IntTy;
4932 TheCall->setType(ResultTy);
4933 }
4934
4935 return false;
4936}
4937
4938bool Sema::BuiltinComplex(CallExpr *TheCall) {
4939 if (checkArgCount(TheCall, 2))
4940 return true;
4941
4942 bool Dependent = false;
4943 for (unsigned I = 0; I != 2; ++I) {
4944 Expr *Arg = TheCall->getArg(I);
4945 QualType T = Arg->getType();
4946 if (T->isDependentType()) {
4947 Dependent = true;
4948 continue;
4949 }
4950
4951 // Despite supporting _Complex int, GCC requires a real floating point type
4952 // for the operands of __builtin_complex.
4953 if (!T->isRealFloatingType()) {
4954 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp)
4955 << Arg->getType() << Arg->getSourceRange();
4956 }
4957
4958 ExprResult Converted = DefaultLvalueConversion(Arg);
4959 if (Converted.isInvalid())
4960 return true;
4961 TheCall->setArg(I, Converted.get());
4962 }
4963
4964 if (Dependent) {
4965 TheCall->setType(Context.DependentTy);
4966 return false;
4967 }
4968
4969 Expr *Real = TheCall->getArg(0);
4970 Expr *Imag = TheCall->getArg(1);
4971 if (!Context.hasSameType(Real->getType(), Imag->getType())) {
4972 return Diag(Real->getBeginLoc(),
4973 diag::err_typecheck_call_different_arg_types)
4974 << Real->getType() << Imag->getType()
4975 << Real->getSourceRange() << Imag->getSourceRange();
4976 }
4977
4978 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers;
4979 // don't allow this builtin to form those types either.
4980 // FIXME: Should we allow these types?
4981 if (Real->getType()->isFloat16Type())
4982 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
4983 << "_Float16";
4984 if (Real->getType()->isHalfType())
4985 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
4986 << "half";
4987
4988 TheCall->setType(Context.getComplexType(Real->getType()));
4989 return false;
4990}
4991
4992/// BuiltinShuffleVector - Handle __builtin_shufflevector.
4993// This is declared to take (...), so we have to check everything.
4995 if (TheCall->getNumArgs() < 2)
4996 return ExprError(Diag(TheCall->getEndLoc(),
4997 diag::err_typecheck_call_too_few_args_at_least)
4998 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
4999 << /*is non object*/ 0 << TheCall->getSourceRange());
5000
5001 // Determine which of the following types of shufflevector we're checking:
5002 // 1) unary, vector mask: (lhs, mask)
5003 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
5004 QualType resType = TheCall->getArg(0)->getType();
5005 unsigned numElements = 0;
5006
5007 if (!TheCall->getArg(0)->isTypeDependent() &&
5008 !TheCall->getArg(1)->isTypeDependent()) {
5009 QualType LHSType = TheCall->getArg(0)->getType();
5010 QualType RHSType = TheCall->getArg(1)->getType();
5011
5012 if (!LHSType->isVectorType() || !RHSType->isVectorType())
5013 return ExprError(
5014 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
5015 << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ false
5016 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5017 TheCall->getArg(1)->getEndLoc()));
5018
5019 numElements = LHSType->castAs<VectorType>()->getNumElements();
5020 unsigned numResElements = TheCall->getNumArgs() - 2;
5021
5022 // Check to see if we have a call with 2 vector arguments, the unary shuffle
5023 // with mask. If so, verify that RHS is an integer vector type with the
5024 // same number of elts as lhs.
5025 if (TheCall->getNumArgs() == 2) {
5026 if (!RHSType->hasIntegerRepresentation() ||
5027 RHSType->castAs<VectorType>()->getNumElements() != numElements)
5028 return ExprError(Diag(TheCall->getBeginLoc(),
5029 diag::err_vec_builtin_incompatible_vector)
5030 << TheCall->getDirectCallee()
5031 << /*isMorethantwoArgs*/ false
5032 << SourceRange(TheCall->getArg(1)->getBeginLoc(),
5033 TheCall->getArg(1)->getEndLoc()));
5034 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
5035 return ExprError(Diag(TheCall->getBeginLoc(),
5036 diag::err_vec_builtin_incompatible_vector)
5037 << TheCall->getDirectCallee()
5038 << /*isMorethantwoArgs*/ false
5039 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
5040 TheCall->getArg(1)->getEndLoc()));
5041 } else if (numElements != numResElements) {
5042 QualType eltType = LHSType->castAs<VectorType>()->getElementType();
5043 resType =
5044 Context.getVectorType(eltType, numResElements, VectorKind::Generic);
5045 }
5046 }
5047
5048 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
5049 if (TheCall->getArg(i)->isTypeDependent() ||
5050 TheCall->getArg(i)->isValueDependent())
5051 continue;
5052
5053 std::optional<llvm::APSInt> Result;
5054 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context)))
5055 return ExprError(Diag(TheCall->getBeginLoc(),
5056 diag::err_shufflevector_nonconstant_argument)
5057 << TheCall->getArg(i)->getSourceRange());
5058
5059 // Allow -1 which will be translated to undef in the IR.
5060 if (Result->isSigned() && Result->isAllOnes())
5061 continue;
5062
5063 if (Result->getActiveBits() > 64 ||
5064 Result->getZExtValue() >= numElements * 2)
5065 return ExprError(Diag(TheCall->getBeginLoc(),
5066 diag::err_shufflevector_argument_too_large)
5067 << TheCall->getArg(i)->getSourceRange());
5068 }
5069
5071
5072 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
5073 exprs.push_back(TheCall->getArg(i));
5074 TheCall->setArg(i, nullptr);
5075 }