clang 19.0.0git
SemaChecking.cpp
Go to the documentation of this file.
1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements extra semantic analysis beyond what is enforced
10// by the C type system.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/APValue.h"
16#include "clang/AST/Attr.h"
18#include "clang/AST/CharUnits.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclBase.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
31#include "clang/AST/NSAPI.h"
35#include "clang/AST/Stmt.h"
37#include "clang/AST/Type.h"
38#include "clang/AST/TypeLoc.h"
44#include "clang/Basic/LLVM.h"
57#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
59#include "clang/Sema/Lookup.h"
61#include "clang/Sema/Scope.h"
63#include "clang/Sema/Sema.h"
65#include "clang/Sema/SemaARM.h"
66#include "clang/Sema/SemaBPF.h"
70#include "clang/Sema/SemaMIPS.h"
72#include "clang/Sema/SemaObjC.h"
73#include "clang/Sema/SemaPPC.h"
76#include "clang/Sema/SemaWasm.h"
77#include "clang/Sema/SemaX86.h"
78#include "llvm/ADT/APFloat.h"
79#include "llvm/ADT/APInt.h"
80#include "llvm/ADT/APSInt.h"
81#include "llvm/ADT/ArrayRef.h"
82#include "llvm/ADT/DenseMap.h"
83#include "llvm/ADT/FoldingSet.h"
84#include "llvm/ADT/STLExtras.h"
85#include "llvm/ADT/SmallBitVector.h"
86#include "llvm/ADT/SmallPtrSet.h"
87#include "llvm/ADT/SmallString.h"
88#include "llvm/ADT/SmallVector.h"
89#include "llvm/ADT/StringExtras.h"
90#include "llvm/ADT/StringRef.h"
91#include "llvm/ADT/StringSet.h"
92#include "llvm/ADT/StringSwitch.h"
93#include "llvm/Support/AtomicOrdering.h"
94#include "llvm/Support/Casting.h"
95#include "llvm/Support/Compiler.h"
96#include "llvm/Support/ConvertUTF.h"
97#include "llvm/Support/ErrorHandling.h"
98#include "llvm/Support/Format.h"
99#include "llvm/Support/Locale.h"
100#include "llvm/Support/MathExtras.h"
101#include "llvm/Support/SaveAndRestore.h"
102#include "llvm/Support/raw_ostream.h"
103#include "llvm/TargetParser/RISCVTargetParser.h"
104#include "llvm/TargetParser/Triple.h"
105#include <algorithm>
106#include <bitset>
107#include <cassert>
108#include <cctype>
109#include <cstddef>
110#include <cstdint>
111#include <functional>
112#include <limits>
113#include <optional>
114#include <string>
115#include <tuple>
116#include <utility>
117
118using namespace clang;
119using namespace sema;
120
122 unsigned ByteNo) const {
123 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
125}
126
127static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
129 return (A << 8) | B;
130}
131
132/// Checks that a call expression's argument count is at least the desired
133/// number. This is useful when doing custom type-checking on a variadic
134/// function. Returns true on error.
135bool Sema::checkArgCountAtLeast(CallExpr *Call, unsigned MinArgCount) {
136 unsigned ArgCount = Call->getNumArgs();
137 if (ArgCount >= MinArgCount)
138 return false;
139
140 return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
141 << 0 /*function call*/ << MinArgCount << ArgCount
142 << /*is non object*/ 0 << Call->getSourceRange();
143}
144
145/// Checks that a call expression's argument count is at most the desired
146/// number. This is useful when doing custom type-checking on a variadic
147/// function. Returns true on error.
148bool Sema::checkArgCountAtMost(CallExpr *Call, unsigned MaxArgCount) {
149 unsigned ArgCount = Call->getNumArgs();
150 if (ArgCount <= MaxArgCount)
151 return false;
152 return Diag(Call->getEndLoc(), diag::err_typecheck_call_too_many_args_at_most)
153 << 0 /*function call*/ << MaxArgCount << ArgCount
154 << /*is non object*/ 0 << Call->getSourceRange();
155}
156
157/// Checks that a call expression's argument count is in the desired range. This
158/// is useful when doing custom type-checking on a variadic function. Returns
159/// true on error.
160bool Sema::checkArgCountRange(CallExpr *Call, unsigned MinArgCount,
161 unsigned MaxArgCount) {
162 return checkArgCountAtLeast(Call, MinArgCount) ||
163 checkArgCountAtMost(Call, MaxArgCount);
164}
165
166/// Checks that a call expression's argument count is the desired number.
167/// This is useful when doing custom type-checking. Returns true on error.
168bool Sema::checkArgCount(CallExpr *Call, unsigned DesiredArgCount) {
169 unsigned ArgCount = Call->getNumArgs();
170 if (ArgCount == DesiredArgCount)
171 return false;
172
173 if (checkArgCountAtLeast(Call, DesiredArgCount))
174 return true;
175 assert(ArgCount > DesiredArgCount && "should have diagnosed this");
176
177 // Highlight all the excess arguments.
178 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
179 Call->getArg(ArgCount - 1)->getEndLoc());
180
181 return Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
182 << 0 /*function call*/ << DesiredArgCount << ArgCount
183 << /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
184}
185
187 if (Value->isTypeDependent())
188 return false;
189
190 InitializedEntity Entity =
194 if (Result.isInvalid())
195 return true;
196 Value = Result.get();
197 return false;
198}
199
200/// Check that the first argument to __builtin_annotation is an integer
201/// and the second argument is a non-wide string literal.
202static bool BuiltinAnnotation(Sema &S, CallExpr *TheCall) {
203 if (S.checkArgCount(TheCall, 2))
204 return true;
205
206 // First argument should be an integer.
207 Expr *ValArg = TheCall->getArg(0);
208 QualType Ty = ValArg->getType();
209 if (!Ty->isIntegerType()) {
210 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
211 << ValArg->getSourceRange();
212 return true;
213 }
214
215 // Second argument should be a constant string.
216 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
217 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
218 if (!Literal || !Literal->isOrdinary()) {
219 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
220 << StrArg->getSourceRange();
221 return true;
222 }
223
224 TheCall->setType(Ty);
225 return false;
226}
227
228static bool BuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
229 // We need at least one argument.
230 if (TheCall->getNumArgs() < 1) {
231 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
232 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
233 << TheCall->getCallee()->getSourceRange();
234 return true;
235 }
236
237 // All arguments should be wide string literals.
238 for (Expr *Arg : TheCall->arguments()) {
239 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
240 if (!Literal || !Literal->isWide()) {
241 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
242 << Arg->getSourceRange();
243 return true;
244 }
245 }
246
247 return false;
248}
249
250/// Check that the argument to __builtin_addressof is a glvalue, and set the
251/// result type to the corresponding pointer type.
252static bool BuiltinAddressof(Sema &S, CallExpr *TheCall) {
253 if (S.checkArgCount(TheCall, 1))
254 return true;
255
256 ExprResult Arg(TheCall->getArg(0));
257 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
258 if (ResultType.isNull())
259 return true;
260
261 TheCall->setArg(0, Arg.get());
262 TheCall->setType(ResultType);
263 return false;
264}
265
266/// Check that the argument to __builtin_function_start is a function.
267static bool BuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
268 if (S.checkArgCount(TheCall, 1))
269 return true;
270
272 if (Arg.isInvalid())
273 return true;
274
275 TheCall->setArg(0, Arg.get());
276 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
278
279 if (!FD) {
280 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type)
281 << TheCall->getSourceRange();
282 return true;
283 }
284
285 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
286 TheCall->getBeginLoc());
287}
288
289/// Check the number of arguments and set the result type to
290/// the argument type.
291static bool BuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
292 if (S.checkArgCount(TheCall, 1))
293 return true;
294
295 TheCall->setType(TheCall->getArg(0)->getType());
296 return false;
297}
298
299/// Check that the value argument for __builtin_is_aligned(value, alignment) and
300/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
301/// type (but not a function pointer) and that the alignment is a power-of-two.
302static bool BuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
303 if (S.checkArgCount(TheCall, 2))
304 return true;
305
306 clang::Expr *Source = TheCall->getArg(0);
307 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
308
309 auto IsValidIntegerType = [](QualType Ty) {
310 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
311 };
312 QualType SrcTy = Source->getType();
313 // We should also be able to use it with arrays (but not functions!).
314 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
315 SrcTy = S.Context.getDecayedType(SrcTy);
316 }
317 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
318 SrcTy->isFunctionPointerType()) {
319 // FIXME: this is not quite the right error message since we don't allow
320 // floating point types, or member pointers.
321 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
322 << SrcTy;
323 return true;
324 }
325
326 clang::Expr *AlignOp = TheCall->getArg(1);
327 if (!IsValidIntegerType(AlignOp->getType())) {
328 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
329 << AlignOp->getType();
330 return true;
331 }
332 Expr::EvalResult AlignResult;
333 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
334 // We can't check validity of alignment if it is value dependent.
335 if (!AlignOp->isValueDependent() &&
336 AlignOp->EvaluateAsInt(AlignResult, S.Context,
338 llvm::APSInt AlignValue = AlignResult.Val.getInt();
339 llvm::APSInt MaxValue(
340 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
341 if (AlignValue < 1) {
342 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
343 return true;
344 }
345 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
346 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
347 << toString(MaxValue, 10);
348 return true;
349 }
350 if (!AlignValue.isPowerOf2()) {
351 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
352 return true;
353 }
354 if (AlignValue == 1) {
355 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
356 << IsBooleanAlignBuiltin;
357 }
358 }
359
362 SourceLocation(), Source);
363 if (SrcArg.isInvalid())
364 return true;
365 TheCall->setArg(0, SrcArg.get());
366 ExprResult AlignArg =
368 S.Context, AlignOp->getType(), false),
369 SourceLocation(), AlignOp);
370 if (AlignArg.isInvalid())
371 return true;
372 TheCall->setArg(1, AlignArg.get());
373 // For align_up/align_down, the return type is the same as the (potentially
374 // decayed) argument type including qualifiers. For is_aligned(), the result
375 // is always bool.
376 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
377 return false;
378}
379
380static bool BuiltinOverflow(Sema &S, CallExpr *TheCall, unsigned BuiltinID) {
381 if (S.checkArgCount(TheCall, 3))
382 return true;
383
384 std::pair<unsigned, const char *> Builtins[] = {
385 { Builtin::BI__builtin_add_overflow, "ckd_add" },
386 { Builtin::BI__builtin_sub_overflow, "ckd_sub" },
387 { Builtin::BI__builtin_mul_overflow, "ckd_mul" },
388 };
389
390 bool CkdOperation = llvm::any_of(Builtins, [&](const std::pair<unsigned,
391 const char *> &P) {
392 return BuiltinID == P.first && TheCall->getExprLoc().isMacroID() &&
394 S.getSourceManager(), S.getLangOpts()) == P.second;
395 });
396
397 auto ValidCkdIntType = [](QualType QT) {
398 // A valid checked integer type is an integer type other than a plain char,
399 // bool, a bit-precise type, or an enumeration type.
400 if (const auto *BT = QT.getCanonicalType()->getAs<BuiltinType>())
401 return (BT->getKind() >= BuiltinType::Short &&
402 BT->getKind() <= BuiltinType::Int128) || (
403 BT->getKind() >= BuiltinType::UShort &&
404 BT->getKind() <= BuiltinType::UInt128) ||
405 BT->getKind() == BuiltinType::UChar ||
406 BT->getKind() == BuiltinType::SChar;
407 return false;
408 };
409
410 // First two arguments should be integers.
411 for (unsigned I = 0; I < 2; ++I) {
413 if (Arg.isInvalid()) return true;
414 TheCall->setArg(I, Arg.get());
415
416 QualType Ty = Arg.get()->getType();
417 bool IsValid = CkdOperation ? ValidCkdIntType(Ty) : Ty->isIntegerType();
418 if (!IsValid) {
419 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
420 << CkdOperation << Ty << Arg.get()->getSourceRange();
421 return true;
422 }
423 }
424
425 // Third argument should be a pointer to a non-const integer.
426 // IRGen correctly handles volatile, restrict, and address spaces, and
427 // the other qualifiers aren't possible.
428 {
430 if (Arg.isInvalid()) return true;
431 TheCall->setArg(2, Arg.get());
432
433 QualType Ty = Arg.get()->getType();
434 const auto *PtrTy = Ty->getAs<PointerType>();
435 if (!PtrTy ||
436 !PtrTy->getPointeeType()->isIntegerType() ||
437 (!ValidCkdIntType(PtrTy->getPointeeType()) && CkdOperation) ||
438 PtrTy->getPointeeType().isConstQualified()) {
439 S.Diag(Arg.get()->getBeginLoc(),
440 diag::err_overflow_builtin_must_be_ptr_int)
441 << CkdOperation << Ty << Arg.get()->getSourceRange();
442 return true;
443 }
444 }
445
446 // Disallow signed bit-precise integer args larger than 128 bits to mul
447 // function until we improve backend support.
448 if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
449 for (unsigned I = 0; I < 3; ++I) {
450 const auto Arg = TheCall->getArg(I);
451 // Third argument will be a pointer.
452 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
453 if (Ty->isBitIntType() && Ty->isSignedIntegerType() &&
454 S.getASTContext().getIntWidth(Ty) > 128)
455 return S.Diag(Arg->getBeginLoc(),
456 diag::err_overflow_builtin_bit_int_max_size)
457 << 128;
458 }
459 }
460
461 return false;
462}
463
464namespace {
465struct BuiltinDumpStructGenerator {
466 Sema &S;
467 CallExpr *TheCall;
468 SourceLocation Loc = TheCall->getBeginLoc();
470 DiagnosticErrorTrap ErrorTracker;
471 PrintingPolicy Policy;
472
473 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall)
474 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()),
475 Policy(S.Context.getPrintingPolicy()) {
476 Policy.AnonymousTagLocations = false;
477 }
478
479 Expr *makeOpaqueValueExpr(Expr *Inner) {
480 auto *OVE = new (S.Context)
481 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(),
482 Inner->getObjectKind(), Inner);
483 Actions.push_back(OVE);
484 return OVE;
485 }
486
487 Expr *getStringLiteral(llvm::StringRef Str) {
489 // Wrap the literal in parentheses to attach a source location.
490 return new (S.Context) ParenExpr(Loc, Loc, Lit);
491 }
492
493 bool callPrintFunction(llvm::StringRef Format,
494 llvm::ArrayRef<Expr *> Exprs = {}) {
496 assert(TheCall->getNumArgs() >= 2);
497 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size());
498 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end());
499 Args.push_back(getStringLiteral(Format));
500 Args.insert(Args.end(), Exprs.begin(), Exprs.end());
501
502 // Register a note to explain why we're performing the call.
506 Ctx.CallArgs = Args.data();
507 Ctx.NumCallArgs = Args.size();
509
510 ExprResult RealCall =
511 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1),
512 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc());
513
515 if (!RealCall.isInvalid())
516 Actions.push_back(RealCall.get());
517 // Bail out if we've hit any errors, even if we managed to build the
518 // call. We don't want to produce more than one error.
519 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred();
520 }
521
522 Expr *getIndentString(unsigned Depth) {
523 if (!Depth)
524 return nullptr;
525
527 Indent.resize(Depth * Policy.Indentation, ' ');
528 return getStringLiteral(Indent);
529 }
530
532 return getStringLiteral(T.getAsString(Policy));
533 }
534
535 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) {
536 llvm::raw_svector_ostream OS(Str);
537
538 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
539 // than trying to print a single character.
540 if (auto *BT = T->getAs<BuiltinType>()) {
541 switch (BT->getKind()) {
542 case BuiltinType::Bool:
543 OS << "%d";
544 return true;
545 case BuiltinType::Char_U:
546 case BuiltinType::UChar:
547 OS << "%hhu";
548 return true;
549 case BuiltinType::Char_S:
550 case BuiltinType::SChar:
551 OS << "%hhd";
552 return true;
553 default:
554 break;
555 }
556 }
557
559 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) {
560 // We were able to guess how to format this.
561 if (Specifier.getConversionSpecifier().getKind() ==
562 analyze_printf::PrintfConversionSpecifier::sArg) {
563 // Wrap double-quotes around a '%s' specifier and limit its maximum
564 // length. Ideally we'd also somehow escape special characters in the
565 // contents but printf doesn't support that.
566 // FIXME: '%s' formatting is not safe in general.
567 OS << '"';
568 Specifier.setPrecision(analyze_printf::OptionalAmount(32u));
569 Specifier.toString(OS);
570 OS << '"';
571 // FIXME: It would be nice to include a '...' if the string doesn't fit
572 // in the length limit.
573 } else {
574 Specifier.toString(OS);
575 }
576 return true;
577 }
578
579 if (T->isPointerType()) {
580 // Format all pointers with '%p'.
581 OS << "%p";
582 return true;
583 }
584
585 return false;
586 }
587
588 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
589 Expr *IndentLit = getIndentString(Depth);
590 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD));
591 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit})
592 : callPrintFunction("%s", {TypeLit}))
593 return true;
594
595 return dumpRecordValue(RD, E, IndentLit, Depth);
596 }
597
598 // Dump a record value. E should be a pointer or lvalue referring to an RD.
599 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent,
600 unsigned Depth) {
601 // FIXME: Decide what to do if RD is a union. At least we should probably
602 // turn off printing `const char*` members with `%s`, because that is very
603 // likely to crash if that's not the active member. Whatever we decide, we
604 // should document it.
605
606 // Build an OpaqueValueExpr so we can refer to E more than once without
607 // triggering re-evaluation.
608 Expr *RecordArg = makeOpaqueValueExpr(E);
609 bool RecordArgIsPtr = RecordArg->getType()->isPointerType();
610
611 if (callPrintFunction(" {\n"))
612 return true;
613
614 // Dump each base class, regardless of whether they're aggregates.
615 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
616 for (const auto &Base : CXXRD->bases()) {
617 QualType BaseType =
618 RecordArgIsPtr ? S.Context.getPointerType(Base.getType())
619 : S.Context.getLValueReferenceType(Base.getType());
622 RecordArg);
623 if (BasePtr.isInvalid() ||
624 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(),
625 Depth + 1))
626 return true;
627 }
628 }
629
630 Expr *FieldIndentArg = getIndentString(Depth + 1);
631
632 // Dump each field.
633 for (auto *D : RD->decls()) {
634 auto *IFD = dyn_cast<IndirectFieldDecl>(D);
635 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
636 if (!FD || FD->isUnnamedBitField() || FD->isAnonymousStructOrUnion())
637 continue;
638
639 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
640 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg,
641 getTypeString(FD->getType()),
642 getStringLiteral(FD->getName())};
643
644 if (FD->isBitField()) {
645 Format += ": %zu ";
647 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT),
648 FD->getBitWidthValue(S.Context));
649 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc));
650 }
651
652 Format += "=";
653
656 CXXScopeSpec(), Loc, IFD,
657 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc)
659 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD,
661 DeclarationNameInfo(FD->getDeclName(), Loc));
662 if (Field.isInvalid())
663 return true;
664
665 auto *InnerRD = FD->getType()->getAsRecordDecl();
666 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD);
667 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) {
668 // Recursively print the values of members of aggregate record type.
669 if (callPrintFunction(Format, Args) ||
670 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1))
671 return true;
672 } else {
673 Format += " ";
674 if (appendFormatSpecifier(FD->getType(), Format)) {
675 // We know how to print this field.
676 Args.push_back(Field.get());
677 } else {
678 // We don't know how to print this field. Print out its address
679 // with a format specifier that a smart tool will be able to
680 // recognize and treat specially.
681 Format += "*%p";
682 ExprResult FieldAddr =
683 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get());
684 if (FieldAddr.isInvalid())
685 return true;
686 Args.push_back(FieldAddr.get());
687 }
688 Format += "\n";
689 if (callPrintFunction(Format, Args))
690 return true;
691 }
692 }
693
694 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent)
695 : callPrintFunction("}\n");
696 }
697
698 Expr *buildWrapper() {
699 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions,
701 TheCall->setType(Wrapper->getType());
702 TheCall->setValueKind(Wrapper->getValueKind());
703 return Wrapper;
704 }
705};
706} // namespace
707
709 if (S.checkArgCountAtLeast(TheCall, 2))
710 return ExprError();
711
712 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
713 if (PtrArgResult.isInvalid())
714 return ExprError();
715 TheCall->setArg(0, PtrArgResult.get());
716
717 // First argument should be a pointer to a struct.
718 QualType PtrArgType = PtrArgResult.get()->getType();
719 if (!PtrArgType->isPointerType() ||
720 !PtrArgType->getPointeeType()->isRecordType()) {
721 S.Diag(PtrArgResult.get()->getBeginLoc(),
722 diag::err_expected_struct_pointer_argument)
723 << 1 << TheCall->getDirectCallee() << PtrArgType;
724 return ExprError();
725 }
726 QualType Pointee = PtrArgType->getPointeeType();
727 const RecordDecl *RD = Pointee->getAsRecordDecl();
728 // Try to instantiate the class template as appropriate; otherwise, access to
729 // its data() may lead to a crash.
730 if (S.RequireCompleteType(PtrArgResult.get()->getBeginLoc(), Pointee,
731 diag::err_incomplete_type))
732 return ExprError();
733 // Second argument is a callable, but we can't fully validate it until we try
734 // calling it.
735 QualType FnArgType = TheCall->getArg(1)->getType();
736 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() &&
737 !FnArgType->isBlockPointerType() &&
738 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) {
739 auto *BT = FnArgType->getAs<BuiltinType>();
740 switch (BT ? BT->getKind() : BuiltinType::Void) {
741 case BuiltinType::Dependent:
742 case BuiltinType::Overload:
743 case BuiltinType::BoundMember:
744 case BuiltinType::PseudoObject:
745 case BuiltinType::UnknownAny:
746 case BuiltinType::BuiltinFn:
747 // This might be a callable.
748 break;
749
750 default:
751 S.Diag(TheCall->getArg(1)->getBeginLoc(),
752 diag::err_expected_callable_argument)
753 << 2 << TheCall->getDirectCallee() << FnArgType;
754 return ExprError();
755 }
756 }
757
758 BuiltinDumpStructGenerator Generator(S, TheCall);
759
760 // Wrap parentheses around the given pointer. This is not necessary for
761 // correct code generation, but it means that when we pretty-print the call
762 // arguments in our diagnostics we will produce '(&s)->n' instead of the
763 // incorrect '&s->n'.
764 Expr *PtrArg = PtrArgResult.get();
765 PtrArg = new (S.Context)
766 ParenExpr(PtrArg->getBeginLoc(),
767 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg);
768 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0))
769 return ExprError();
770
771 return Generator.buildWrapper();
772}
773
774static bool BuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
775 if (S.checkArgCount(BuiltinCall, 2))
776 return true;
777
778 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
779 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
780 Expr *Call = BuiltinCall->getArg(0);
781 Expr *Chain = BuiltinCall->getArg(1);
782
783 if (Call->getStmtClass() != Stmt::CallExprClass) {
784 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
785 << Call->getSourceRange();
786 return true;
787 }
788
789 auto CE = cast<CallExpr>(Call);
790 if (CE->getCallee()->getType()->isBlockPointerType()) {
791 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
792 << Call->getSourceRange();
793 return true;
794 }
795
796 const Decl *TargetDecl = CE->getCalleeDecl();
797 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
798 if (FD->getBuiltinID()) {
799 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
800 << Call->getSourceRange();
801 return true;
802 }
803
804 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
805 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
806 << Call->getSourceRange();
807 return true;
808 }
809
810 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
811 if (ChainResult.isInvalid())
812 return true;
813 if (!ChainResult.get()->getType()->isPointerType()) {
814 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
815 << Chain->getSourceRange();
816 return true;
817 }
818
819 QualType ReturnTy = CE->getCallReturnType(S.Context);
820 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
821 QualType BuiltinTy = S.Context.getFunctionType(
822 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
823 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
824
825 Builtin =
826 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
827
828 BuiltinCall->setType(CE->getType());
829 BuiltinCall->setValueKind(CE->getValueKind());
830 BuiltinCall->setObjectKind(CE->getObjectKind());
831 BuiltinCall->setCallee(Builtin);
832 BuiltinCall->setArg(1, ChainResult.get());
833
834 return false;
835}
836
837namespace {
838
839class ScanfDiagnosticFormatHandler
841 // Accepts the argument index (relative to the first destination index) of the
842 // argument whose size we want.
843 using ComputeSizeFunction =
844 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>;
845
846 // Accepts the argument index (relative to the first destination index), the
847 // destination size, and the source size).
848 using DiagnoseFunction =
849 llvm::function_ref<void(unsigned, unsigned, unsigned)>;
850
851 ComputeSizeFunction ComputeSizeArgument;
852 DiagnoseFunction Diagnose;
853
854public:
855 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument,
856 DiagnoseFunction Diagnose)
857 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {}
858
859 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
860 const char *StartSpecifier,
861 unsigned specifierLen) override {
862 if (!FS.consumesDataArgument())
863 return true;
864
865 unsigned NulByte = 0;
866 switch ((FS.getConversionSpecifier().getKind())) {
867 default:
868 return true;
871 NulByte = 1;
872 break;
874 break;
875 }
876
877 analyze_format_string::OptionalAmount FW = FS.getFieldWidth();
878 if (FW.getHowSpecified() !=
879 analyze_format_string::OptionalAmount::HowSpecified::Constant)
880 return true;
881
882 unsigned SourceSize = FW.getConstantAmount() + NulByte;
883
884 std::optional<llvm::APSInt> DestSizeAPS =
885 ComputeSizeArgument(FS.getArgIndex());
886 if (!DestSizeAPS)
887 return true;
888
889 unsigned DestSize = DestSizeAPS->getZExtValue();
890
891 if (DestSize < SourceSize)
892 Diagnose(FS.getArgIndex(), DestSize, SourceSize);
893
894 return true;
895 }
896};
897
898class EstimateSizeFormatHandler
900 size_t Size;
901 /// Whether the format string contains Linux kernel's format specifier
902 /// extension.
903 bool IsKernelCompatible = true;
904
905public:
906 EstimateSizeFormatHandler(StringRef Format)
907 : Size(std::min(Format.find(0), Format.size()) +
908 1 /* null byte always written by sprintf */) {}
909
910 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
911 const char *, unsigned SpecifierLen,
912 const TargetInfo &) override {
913
914 const size_t FieldWidth = computeFieldWidth(FS);
915 const size_t Precision = computePrecision(FS);
916
917 // The actual format.
918 switch (FS.getConversionSpecifier().getKind()) {
919 // Just a char.
922 Size += std::max(FieldWidth, (size_t)1);
923 break;
924 // Just an integer.
934 Size += std::max(FieldWidth, Precision);
935 break;
936
937 // %g style conversion switches between %f or %e style dynamically.
938 // %g removes trailing zeros, and does not print decimal point if there are
939 // no digits that follow it. Thus %g can print a single digit.
940 // FIXME: If it is alternative form:
941 // For g and G conversions, trailing zeros are not removed from the result.
944 Size += 1;
945 break;
946
947 // Floating point number in the form '[+]ddd.ddd'.
950 Size += std::max(FieldWidth, 1 /* integer part */ +
951 (Precision ? 1 + Precision
952 : 0) /* period + decimal */);
953 break;
954
955 // Floating point number in the form '[-]d.ddde[+-]dd'.
958 Size +=
959 std::max(FieldWidth,
960 1 /* integer part */ +
961 (Precision ? 1 + Precision : 0) /* period + decimal */ +
962 1 /* e or E letter */ + 2 /* exponent */);
963 break;
964
965 // Floating point number in the form '[-]0xh.hhhhp±dd'.
968 Size +=
969 std::max(FieldWidth,
970 2 /* 0x */ + 1 /* integer part */ +
971 (Precision ? 1 + Precision : 0) /* period + decimal */ +
972 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
973 break;
974
975 // Just a string.
978 Size += FieldWidth;
979 break;
980
981 // Just a pointer in the form '0xddd'.
983 // Linux kernel has its own extesion for `%p` specifier.
984 // Kernel Document:
985 // https://docs.kernel.org/core-api/printk-formats.html#pointer-types
986 IsKernelCompatible = false;
987 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
988 break;
989
990 // A plain percent.
992 Size += 1;
993 break;
994
995 default:
996 break;
997 }
998
999 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
1000
1001 if (FS.hasAlternativeForm()) {
1002 switch (FS.getConversionSpecifier().getKind()) {
1003 // For o conversion, it increases the precision, if and only if necessary,
1004 // to force the first digit of the result to be a zero
1005 // (if the value and precision are both 0, a single 0 is printed)
1007 // For b conversion, a nonzero result has 0b prefixed to it.
1009 // For x (or X) conversion, a nonzero result has 0x (or 0X) prefixed to
1010 // it.
1013 // Note: even when the prefix is added, if
1014 // (prefix_width <= FieldWidth - formatted_length) holds,
1015 // the prefix does not increase the format
1016 // size. e.g.(("%#3x", 0xf) is "0xf")
1017
1018 // If the result is zero, o, b, x, X adds nothing.
1019 break;
1020 // For a, A, e, E, f, F, g, and G conversions,
1021 // the result of converting a floating-point number always contains a
1022 // decimal-point
1031 Size += (Precision ? 0 : 1);
1032 break;
1033 // For other conversions, the behavior is undefined.
1034 default:
1035 break;
1036 }
1037 }
1038 assert(SpecifierLen <= Size && "no underflow");
1039 Size -= SpecifierLen;
1040 return true;
1041 }
1042
1043 size_t getSizeLowerBound() const { return Size; }
1044 bool isKernelCompatible() const { return IsKernelCompatible; }
1045
1046private:
1047 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
1048 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
1049 size_t FieldWidth = 0;
1051 FieldWidth = FW.getConstantAmount();
1052 return FieldWidth;
1053 }
1054
1055 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
1056 const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
1057 size_t Precision = 0;
1058
1059 // See man 3 printf for default precision value based on the specifier.
1060 switch (FW.getHowSpecified()) {
1062 switch (FS.getConversionSpecifier().getKind()) {
1063 default:
1064 break;
1068 Precision = 1;
1069 break;
1076 Precision = 1;
1077 break;
1084 Precision = 6;
1085 break;
1087 Precision = 1;
1088 break;
1089 }
1090 break;
1092 Precision = FW.getConstantAmount();
1093 break;
1094 default:
1095 break;
1096 }
1097 return Precision;
1098 }
1099};
1100
1101} // namespace
1102
1103static bool ProcessFormatStringLiteral(const Expr *FormatExpr,
1104 StringRef &FormatStrRef, size_t &StrLen,
1105 ASTContext &Context) {
1106 if (const auto *Format = dyn_cast<StringLiteral>(FormatExpr);
1107 Format && (Format->isOrdinary() || Format->isUTF8())) {
1108 FormatStrRef = Format->getString();
1109 const ConstantArrayType *T =
1110 Context.getAsConstantArrayType(Format->getType());
1111 assert(T && "String literal not of constant array type!");
1112 size_t TypeSize = T->getZExtSize();
1113 // In case there's a null byte somewhere.
1114 StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
1115 return true;
1116 }
1117 return false;
1118}
1119
1120void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
1121 CallExpr *TheCall) {
1122 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
1124 return;
1125
1126 bool UseDABAttr = false;
1127 const FunctionDecl *UseDecl = FD;
1128
1129 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>();
1130 if (DABAttr) {
1131 UseDecl = DABAttr->getFunction();
1132 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
1133 UseDABAttr = true;
1134 }
1135
1136 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true);
1137
1138 if (!BuiltinID)
1139 return;
1140
1141 const TargetInfo &TI = getASTContext().getTargetInfo();
1142 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
1143
1144 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> {
1145 // If we refer to a diagnose_as_builtin attribute, we need to change the
1146 // argument index to refer to the arguments of the called function. Unless
1147 // the index is out of bounds, which presumably means it's a variadic
1148 // function.
1149 if (!UseDABAttr)
1150 return Index;
1151 unsigned DABIndices = DABAttr->argIndices_size();
1152 unsigned NewIndex = Index < DABIndices
1153 ? DABAttr->argIndices_begin()[Index]
1154 : Index - DABIndices + FD->getNumParams();
1155 if (NewIndex >= TheCall->getNumArgs())
1156 return std::nullopt;
1157 return NewIndex;
1158 };
1159
1160 auto ComputeExplicitObjectSizeArgument =
1161 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1162 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1163 if (!IndexOptional)
1164 return std::nullopt;
1165 unsigned NewIndex = *IndexOptional;
1167 Expr *SizeArg = TheCall->getArg(NewIndex);
1168 if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
1169 return std::nullopt;
1170 llvm::APSInt Integer = Result.Val.getInt();
1171 Integer.setIsUnsigned(true);
1172 return Integer;
1173 };
1174
1175 auto ComputeSizeArgument =
1176 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1177 // If the parameter has a pass_object_size attribute, then we should use its
1178 // (potentially) more strict checking mode. Otherwise, conservatively assume
1179 // type 0.
1180 int BOSType = 0;
1181 // This check can fail for variadic functions.
1182 if (Index < FD->getNumParams()) {
1183 if (const auto *POS =
1184 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
1185 BOSType = POS->getType();
1186 }
1187
1188 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1189 if (!IndexOptional)
1190 return std::nullopt;
1191 unsigned NewIndex = *IndexOptional;
1192
1193 if (NewIndex >= TheCall->getNumArgs())
1194 return std::nullopt;
1195
1196 const Expr *ObjArg = TheCall->getArg(NewIndex);
1198 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
1199 return std::nullopt;
1200
1201 // Get the object size in the target's size_t width.
1202 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
1203 };
1204
1205 auto ComputeStrLenArgument =
1206 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1207 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1208 if (!IndexOptional)
1209 return std::nullopt;
1210 unsigned NewIndex = *IndexOptional;
1211
1212 const Expr *ObjArg = TheCall->getArg(NewIndex);
1214 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
1215 return std::nullopt;
1216 // Add 1 for null byte.
1217 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth);
1218 };
1219
1220 std::optional<llvm::APSInt> SourceSize;
1221 std::optional<llvm::APSInt> DestinationSize;
1222 unsigned DiagID = 0;
1223 bool IsChkVariant = false;
1224
1225 auto GetFunctionName = [&]() {
1226 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
1227 // Skim off the details of whichever builtin was called to produce a better
1228 // diagnostic, as it's unlikely that the user wrote the __builtin
1229 // explicitly.
1230 if (IsChkVariant) {
1231 FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
1232 FunctionName = FunctionName.drop_back(std::strlen("_chk"));
1233 } else {
1234 FunctionName.consume_front("__builtin_");
1235 }
1236 return FunctionName;
1237 };
1238
1239 switch (BuiltinID) {
1240 default:
1241 return;
1242 case Builtin::BI__builtin_strcpy:
1243 case Builtin::BIstrcpy: {
1244 DiagID = diag::warn_fortify_strlen_overflow;
1245 SourceSize = ComputeStrLenArgument(1);
1246 DestinationSize = ComputeSizeArgument(0);
1247 break;
1248 }
1249
1250 case Builtin::BI__builtin___strcpy_chk: {
1251 DiagID = diag::warn_fortify_strlen_overflow;
1252 SourceSize = ComputeStrLenArgument(1);
1253 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1254 IsChkVariant = true;
1255 break;
1256 }
1257
1258 case Builtin::BIscanf:
1259 case Builtin::BIfscanf:
1260 case Builtin::BIsscanf: {
1261 unsigned FormatIndex = 1;
1262 unsigned DataIndex = 2;
1263 if (BuiltinID == Builtin::BIscanf) {
1264 FormatIndex = 0;
1265 DataIndex = 1;
1266 }
1267
1268 const auto *FormatExpr =
1269 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
1270
1271 StringRef FormatStrRef;
1272 size_t StrLen;
1273 if (!ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context))
1274 return;
1275
1276 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
1277 unsigned SourceSize) {
1278 DiagID = diag::warn_fortify_scanf_overflow;
1279 unsigned Index = ArgIndex + DataIndex;
1280 StringRef FunctionName = GetFunctionName();
1281 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall,
1282 PDiag(DiagID) << FunctionName << (Index + 1)
1283 << DestSize << SourceSize);
1284 };
1285
1286 auto ShiftedComputeSizeArgument = [&](unsigned Index) {
1287 return ComputeSizeArgument(Index + DataIndex);
1288 };
1289 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose);
1290 const char *FormatBytes = FormatStrRef.data();
1292 FormatBytes + StrLen, getLangOpts(),
1294
1295 // Unlike the other cases, in this one we have already issued the diagnostic
1296 // here, so no need to continue (because unlike the other cases, here the
1297 // diagnostic refers to the argument number).
1298 return;
1299 }
1300
1301 case Builtin::BIsprintf:
1302 case Builtin::BI__builtin___sprintf_chk: {
1303 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
1304 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
1305
1306 StringRef FormatStrRef;
1307 size_t StrLen;
1308 if (ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1309 EstimateSizeFormatHandler H(FormatStrRef);
1310 const char *FormatBytes = FormatStrRef.data();
1312 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
1313 Context.getTargetInfo(), false)) {
1314 DiagID = H.isKernelCompatible()
1315 ? diag::warn_format_overflow
1316 : diag::warn_format_overflow_non_kprintf;
1317 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
1318 .extOrTrunc(SizeTypeWidth);
1319 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
1320 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1321 IsChkVariant = true;
1322 } else {
1323 DestinationSize = ComputeSizeArgument(0);
1324 }
1325 break;
1326 }
1327 }
1328 return;
1329 }
1330 case Builtin::BI__builtin___memcpy_chk:
1331 case Builtin::BI__builtin___memmove_chk:
1332 case Builtin::BI__builtin___memset_chk:
1333 case Builtin::BI__builtin___strlcat_chk:
1334 case Builtin::BI__builtin___strlcpy_chk:
1335 case Builtin::BI__builtin___strncat_chk:
1336 case Builtin::BI__builtin___strncpy_chk:
1337 case Builtin::BI__builtin___stpncpy_chk:
1338 case Builtin::BI__builtin___memccpy_chk:
1339 case Builtin::BI__builtin___mempcpy_chk: {
1340 DiagID = diag::warn_builtin_chk_overflow;
1341 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
1342 DestinationSize =
1343 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1344 IsChkVariant = true;
1345 break;
1346 }
1347
1348 case Builtin::BI__builtin___snprintf_chk:
1349 case Builtin::BI__builtin___vsnprintf_chk: {
1350 DiagID = diag::warn_builtin_chk_overflow;
1351 SourceSize = ComputeExplicitObjectSizeArgument(1);
1352 DestinationSize = ComputeExplicitObjectSizeArgument(3);
1353 IsChkVariant = true;
1354 break;
1355 }
1356
1357 case Builtin::BIstrncat:
1358 case Builtin::BI__builtin_strncat:
1359 case Builtin::BIstrncpy:
1360 case Builtin::BI__builtin_strncpy:
1361 case Builtin::BIstpncpy:
1362 case Builtin::BI__builtin_stpncpy: {
1363 // Whether these functions overflow depends on the runtime strlen of the
1364 // string, not just the buffer size, so emitting the "always overflow"
1365 // diagnostic isn't quite right. We should still diagnose passing a buffer
1366 // size larger than the destination buffer though; this is a runtime abort
1367 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
1368 DiagID = diag::warn_fortify_source_size_mismatch;
1369 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1370 DestinationSize = ComputeSizeArgument(0);
1371 break;
1372 }
1373
1374 case Builtin::BImemcpy:
1375 case Builtin::BI__builtin_memcpy:
1376 case Builtin::BImemmove:
1377 case Builtin::BI__builtin_memmove:
1378 case Builtin::BImemset:
1379 case Builtin::BI__builtin_memset:
1380 case Builtin::BImempcpy:
1381 case Builtin::BI__builtin_mempcpy: {
1382 DiagID = diag::warn_fortify_source_overflow;
1383 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1384 DestinationSize = ComputeSizeArgument(0);
1385 break;
1386 }
1387 case Builtin::BIsnprintf:
1388 case Builtin::BI__builtin_snprintf:
1389 case Builtin::BIvsnprintf:
1390 case Builtin::BI__builtin_vsnprintf: {
1391 DiagID = diag::warn_fortify_source_size_mismatch;
1392 SourceSize = ComputeExplicitObjectSizeArgument(1);
1393 const auto *FormatExpr = TheCall->getArg(2)->IgnoreParenImpCasts();
1394 StringRef FormatStrRef;
1395 size_t StrLen;
1396 if (SourceSize &&
1397 ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1398 EstimateSizeFormatHandler H(FormatStrRef);
1399 const char *FormatBytes = FormatStrRef.data();
1401 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
1402 Context.getTargetInfo(), /*isFreeBSDKPrintf=*/false)) {
1403 llvm::APSInt FormatSize =
1404 llvm::APSInt::getUnsigned(H.getSizeLowerBound())
1405 .extOrTrunc(SizeTypeWidth);
1406 if (FormatSize > *SourceSize && *SourceSize != 0) {
1407 unsigned TruncationDiagID =
1408 H.isKernelCompatible() ? diag::warn_format_truncation
1409 : diag::warn_format_truncation_non_kprintf;
1410 SmallString<16> SpecifiedSizeStr;
1411 SmallString<16> FormatSizeStr;
1412 SourceSize->toString(SpecifiedSizeStr, /*Radix=*/10);
1413 FormatSize.toString(FormatSizeStr, /*Radix=*/10);
1414 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1415 PDiag(TruncationDiagID)
1416 << GetFunctionName() << SpecifiedSizeStr
1417 << FormatSizeStr);
1418 }
1419 }
1420 }
1421 DestinationSize = ComputeSizeArgument(0);
1422 }
1423 }
1424
1425 if (!SourceSize || !DestinationSize ||
1426 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0)
1427 return;
1428
1429 StringRef FunctionName = GetFunctionName();
1430
1431 SmallString<16> DestinationStr;
1432 SmallString<16> SourceStr;
1433 DestinationSize->toString(DestinationStr, /*Radix=*/10);
1434 SourceSize->toString(SourceStr, /*Radix=*/10);
1435 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1436 PDiag(DiagID)
1437 << FunctionName << DestinationStr << SourceStr);
1438}
1439
1440static bool BuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
1441 Scope::ScopeFlags NeededScopeFlags,
1442 unsigned DiagID) {
1443 // Scopes aren't available during instantiation. Fortunately, builtin
1444 // functions cannot be template args so they cannot be formed through template
1445 // instantiation. Therefore checking once during the parse is sufficient.
1446 if (SemaRef.inTemplateInstantiation())
1447 return false;
1448
1449 Scope *S = SemaRef.getCurScope();
1450 while (S && !S->isSEHExceptScope())
1451 S = S->getParent();
1452 if (!S || !(S->getFlags() & NeededScopeFlags)) {
1453 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
1454 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
1455 << DRE->getDecl()->getIdentifier();
1456 return true;
1457 }
1458
1459 return false;
1460}
1461
1462static inline bool isBlockPointer(Expr *Arg) {
1463 return Arg->getType()->isBlockPointerType();
1464}
1465
1466/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
1467/// void*, which is a requirement of device side enqueue.
1468static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
1469 const BlockPointerType *BPT =
1470 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
1471 ArrayRef<QualType> Params =
1472 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
1473 unsigned ArgCounter = 0;
1474 bool IllegalParams = false;
1475 // Iterate through the block parameters until either one is found that is not
1476 // a local void*, or the block is valid.
1477 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
1478 I != E; ++I, ++ArgCounter) {
1479 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
1480 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
1482 // Get the location of the error. If a block literal has been passed
1483 // (BlockExpr) then we can point straight to the offending argument,
1484 // else we just point to the variable reference.
1485 SourceLocation ErrorLoc;
1486 if (isa<BlockExpr>(BlockArg)) {
1487 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
1488 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
1489 } else if (isa<DeclRefExpr>(BlockArg)) {
1490 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
1491 }
1492 S.Diag(ErrorLoc,
1493 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
1494 IllegalParams = true;
1495 }
1496 }
1497
1498 return IllegalParams;
1499}
1500
1502 // OpenCL device can support extension but not the feature as extension
1503 // requires subgroup independent forward progress, but subgroup independent
1504 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
1505 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) &&
1506 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups",
1507 S.getLangOpts())) {
1508 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
1509 << 1 << Call->getDirectCallee()
1510 << "cl_khr_subgroups or __opencl_c_subgroups";
1511 return true;
1512 }
1513 return false;
1514}
1515
1516static bool OpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
1517 if (S.checkArgCount(TheCall, 2))
1518 return true;
1519
1520 if (checkOpenCLSubgroupExt(S, TheCall))
1521 return true;
1522
1523 // First argument is an ndrange_t type.
1524 Expr *NDRangeArg = TheCall->getArg(0);
1525 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1526 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1527 << TheCall->getDirectCallee() << "'ndrange_t'";
1528 return true;
1529 }
1530
1531 Expr *BlockArg = TheCall->getArg(1);
1532 if (!isBlockPointer(BlockArg)) {
1533 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1534 << TheCall->getDirectCallee() << "block";
1535 return true;
1536 }
1537 return checkOpenCLBlockArgs(S, BlockArg);
1538}
1539
1540/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
1541/// get_kernel_work_group_size
1542/// and get_kernel_preferred_work_group_size_multiple builtin functions.
1544 if (S.checkArgCount(TheCall, 1))
1545 return true;
1546
1547 Expr *BlockArg = TheCall->getArg(0);
1548 if (!isBlockPointer(BlockArg)) {
1549 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1550 << TheCall->getDirectCallee() << "block";
1551 return true;
1552 }
1553 return checkOpenCLBlockArgs(S, BlockArg);
1554}
1555
1556/// Diagnose integer type and any valid implicit conversion to it.
1557static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
1558 const QualType &IntType);
1559
1561 unsigned Start, unsigned End) {
1562 bool IllegalParams = false;
1563 for (unsigned I = Start; I <= End; ++I)
1564 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
1565 S.Context.getSizeType());
1566 return IllegalParams;
1567}
1568
1569/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
1570/// 'local void*' parameter of passed block.
1572 Expr *BlockArg,
1573 unsigned NumNonVarArgs) {
1574 const BlockPointerType *BPT =
1575 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
1576 unsigned NumBlockParams =
1577 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
1578 unsigned TotalNumArgs = TheCall->getNumArgs();
1579
1580 // For each argument passed to the block, a corresponding uint needs to
1581 // be passed to describe the size of the local memory.
1582 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
1583 S.Diag(TheCall->getBeginLoc(),
1584 diag::err_opencl_enqueue_kernel_local_size_args);
1585 return true;
1586 }
1587
1588 // Check that the sizes of the local memory are specified by integers.
1589 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
1590 TotalNumArgs - 1);
1591}
1592
1593/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
1594/// overload formats specified in Table 6.13.17.1.
1595/// int enqueue_kernel(queue_t queue,
1596/// kernel_enqueue_flags_t flags,
1597/// const ndrange_t ndrange,
1598/// void (^block)(void))
1599/// int enqueue_kernel(queue_t queue,
1600/// kernel_enqueue_flags_t flags,
1601/// const ndrange_t ndrange,
1602/// uint num_events_in_wait_list,
1603/// clk_event_t *event_wait_list,
1604/// clk_event_t *event_ret,
1605/// void (^block)(void))
1606/// int enqueue_kernel(queue_t queue,
1607/// kernel_enqueue_flags_t flags,
1608/// const ndrange_t ndrange,
1609/// void (^block)(local void*, ...),
1610/// uint size0, ...)
1611/// int enqueue_kernel(queue_t queue,
1612/// kernel_enqueue_flags_t flags,
1613/// const ndrange_t ndrange,
1614/// uint num_events_in_wait_list,
1615/// clk_event_t *event_wait_list,
1616/// clk_event_t *event_ret,
1617/// void (^block)(local void*, ...),
1618/// uint size0, ...)
1619static bool OpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
1620 unsigned NumArgs = TheCall->getNumArgs();
1621
1622 if (NumArgs < 4) {
1623 S.Diag(TheCall->getBeginLoc(),
1624 diag::err_typecheck_call_too_few_args_at_least)
1625 << 0 << 4 << NumArgs << /*is non object*/ 0;
1626 return true;
1627 }
1628
1629 Expr *Arg0 = TheCall->getArg(0);
1630 Expr *Arg1 = TheCall->getArg(1);
1631 Expr *Arg2 = TheCall->getArg(2);
1632 Expr *Arg3 = TheCall->getArg(3);
1633
1634 // First argument always needs to be a queue_t type.
1635 if (!Arg0->getType()->isQueueT()) {
1636 S.Diag(TheCall->getArg(0)->getBeginLoc(),
1637 diag::err_opencl_builtin_expected_type)
1638 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
1639 return true;
1640 }
1641
1642 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
1643 if (!Arg1->getType()->isIntegerType()) {
1644 S.Diag(TheCall->getArg(1)->getBeginLoc(),
1645 diag::err_opencl_builtin_expected_type)
1646 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
1647 return true;
1648 }
1649
1650 // Third argument is always an ndrange_t type.
1651 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1652 S.Diag(TheCall->getArg(2)->getBeginLoc(),
1653 diag::err_opencl_builtin_expected_type)
1654 << TheCall->getDirectCallee() << "'ndrange_t'";
1655 return true;
1656 }
1657
1658 // With four arguments, there is only one form that the function could be
1659 // called in: no events and no variable arguments.
1660 if (NumArgs == 4) {
1661 // check that the last argument is the right block type.
1662 if (!isBlockPointer(Arg3)) {
1663 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1664 << TheCall->getDirectCallee() << "block";
1665 return true;
1666 }
1667 // we have a block type, check the prototype
1668 const BlockPointerType *BPT =
1669 cast<BlockPointerType>(Arg3->getType().getCanonicalType());
1670 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1671 S.Diag(Arg3->getBeginLoc(),
1672 diag::err_opencl_enqueue_kernel_blocks_no_args);
1673 return true;
1674 }
1675 return false;
1676 }
1677 // we can have block + varargs.
1678 if (isBlockPointer(Arg3))
1679 return (checkOpenCLBlockArgs(S, Arg3) ||
1680 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
1681 // last two cases with either exactly 7 args or 7 args and varargs.
1682 if (NumArgs >= 7) {
1683 // check common block argument.
1684 Expr *Arg6 = TheCall->getArg(6);
1685 if (!isBlockPointer(Arg6)) {
1686 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1687 << TheCall->getDirectCallee() << "block";
1688 return true;
1689 }
1690 if (checkOpenCLBlockArgs(S, Arg6))
1691 return true;
1692
1693 // Forth argument has to be any integer type.
1694 if (!Arg3->getType()->isIntegerType()) {
1695 S.Diag(TheCall->getArg(3)->getBeginLoc(),
1696 diag::err_opencl_builtin_expected_type)
1697 << TheCall->getDirectCallee() << "integer";
1698 return true;
1699 }
1700 // check remaining common arguments.
1701 Expr *Arg4 = TheCall->getArg(4);
1702 Expr *Arg5 = TheCall->getArg(5);
1703
1704 // Fifth argument is always passed as a pointer to clk_event_t.
1705 if (!Arg4->isNullPointerConstant(S.Context,
1708 S.Diag(TheCall->getArg(4)->getBeginLoc(),
1709 diag::err_opencl_builtin_expected_type)
1710 << TheCall->getDirectCallee()
1712 return true;
1713 }
1714
1715 // Sixth argument is always passed as a pointer to clk_event_t.
1716 if (!Arg5->isNullPointerConstant(S.Context,
1718 !(Arg5->getType()->isPointerType() &&
1719 Arg5->getType()->getPointeeType()->isClkEventT())) {
1720 S.Diag(TheCall->getArg(5)->getBeginLoc(),
1721 diag::err_opencl_builtin_expected_type)
1722 << TheCall->getDirectCallee()
1724 return true;
1725 }
1726
1727 if (NumArgs == 7)
1728 return false;
1729
1730 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
1731 }
1732
1733 // None of the specific case has been detected, give generic error
1734 S.Diag(TheCall->getBeginLoc(),
1735 diag::err_opencl_enqueue_kernel_incorrect_args);
1736 return true;
1737}
1738
1739/// Returns OpenCL access qual.
1740static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1741 return D->getAttr<OpenCLAccessAttr>();
1742}
1743
1744/// Returns true if pipe element type is different from the pointer.
1746 const Expr *Arg0 = Call->getArg(0);
1747 // First argument type should always be pipe.
1748 if (!Arg0->getType()->isPipeType()) {
1749 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1750 << Call->getDirectCallee() << Arg0->getSourceRange();
1751 return true;
1752 }
1753 OpenCLAccessAttr *AccessQual =
1754 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1755 // Validates the access qualifier is compatible with the call.
1756 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1757 // read_only and write_only, and assumed to be read_only if no qualifier is
1758 // specified.
1759 switch (Call->getDirectCallee()->getBuiltinID()) {
1760 case Builtin::BIread_pipe:
1761 case Builtin::BIreserve_read_pipe:
1762 case Builtin::BIcommit_read_pipe:
1763 case Builtin::BIwork_group_reserve_read_pipe:
1764 case Builtin::BIsub_group_reserve_read_pipe:
1765 case Builtin::BIwork_group_commit_read_pipe:
1766 case Builtin::BIsub_group_commit_read_pipe:
1767 if (!(!AccessQual || AccessQual->isReadOnly())) {
1768 S.Diag(Arg0->getBeginLoc(),
1769 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1770 << "read_only" << Arg0->getSourceRange();
1771 return true;
1772 }
1773 break;
1774 case Builtin::BIwrite_pipe:
1775 case Builtin::BIreserve_write_pipe:
1776 case Builtin::BIcommit_write_pipe:
1777 case Builtin::BIwork_group_reserve_write_pipe:
1778 case Builtin::BIsub_group_reserve_write_pipe:
1779 case Builtin::BIwork_group_commit_write_pipe:
1780 case Builtin::BIsub_group_commit_write_pipe:
1781 if (!(AccessQual && AccessQual->isWriteOnly())) {
1782 S.Diag(Arg0->getBeginLoc(),
1783 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1784 << "write_only" << Arg0->getSourceRange();
1785 return true;
1786 }
1787 break;
1788 default:
1789 break;
1790 }
1791 return false;
1792}
1793
1794/// Returns true if pipe element type is different from the pointer.
1795static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1796 const Expr *Arg0 = Call->getArg(0);
1797 const Expr *ArgIdx = Call->getArg(Idx);
1798 const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
1799 const QualType EltTy = PipeTy->getElementType();
1800 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1801 // The Idx argument should be a pointer and the type of the pointer and
1802 // the type of pipe element should also be the same.
1803 if (!ArgTy ||
1805 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1806 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1807 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1808 << ArgIdx->getType() << ArgIdx->getSourceRange();
1809 return true;
1810 }
1811 return false;
1812}
1813
1814// Performs semantic analysis for the read/write_pipe call.
1815// \param S Reference to the semantic analyzer.
1816// \param Call A pointer to the builtin call.
1817// \return True if a semantic error has been found, false otherwise.
1818static bool BuiltinRWPipe(Sema &S, CallExpr *Call) {
1819 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1820 // functions have two forms.
1821 switch (Call->getNumArgs()) {
1822 case 2:
1823 if (checkOpenCLPipeArg(S, Call))
1824 return true;
1825 // The call with 2 arguments should be
1826 // read/write_pipe(pipe T, T*).
1827 // Check packet type T.
1829 return true;
1830 break;
1831
1832 case 4: {
1833 if (checkOpenCLPipeArg(S, Call))
1834 return true;
1835 // The call with 4 arguments should be
1836 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1837 // Check reserve_id_t.
1838 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1839 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1840 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1841 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1842 return true;
1843 }
1844
1845 // Check the index.
1846 const Expr *Arg2 = Call->getArg(2);
1847 if (!Arg2->getType()->isIntegerType() &&
1848 !Arg2->getType()->isUnsignedIntegerType()) {
1849 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1850 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1851 << Arg2->getType() << Arg2->getSourceRange();
1852 return true;
1853 }
1854
1855 // Check packet type T.
1857 return true;
1858 } break;
1859 default:
1860 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1861 << Call->getDirectCallee() << Call->getSourceRange();
1862 return true;
1863 }
1864
1865 return false;
1866}
1867
1868// Performs a semantic analysis on the {work_group_/sub_group_
1869// /_}reserve_{read/write}_pipe
1870// \param S Reference to the semantic analyzer.
1871// \param Call The call to the builtin function to be analyzed.
1872// \return True if a semantic error was found, false otherwise.
1874 if (S.checkArgCount(Call, 2))
1875 return true;
1876
1877 if (checkOpenCLPipeArg(S, Call))
1878 return true;
1879
1880 // Check the reserve size.
1881 if (!Call->getArg(1)->getType()->isIntegerType() &&
1882 !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
1883 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1884 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1885 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1886 return true;
1887 }
1888
1889 // Since return type of reserve_read/write_pipe built-in function is
1890 // reserve_id_t, which is not defined in the builtin def file , we used int
1891 // as return type and need to override the return type of these functions.
1892 Call->setType(S.Context.OCLReserveIDTy);
1893
1894 return false;
1895}
1896
1897// Performs a semantic analysis on {work_group_/sub_group_
1898// /_}commit_{read/write}_pipe
1899// \param S Reference to the semantic analyzer.
1900// \param Call The call to the builtin function to be analyzed.
1901// \return True if a semantic error was found, false otherwise.
1903 if (S.checkArgCount(Call, 2))
1904 return true;
1905
1906 if (checkOpenCLPipeArg(S, Call))
1907 return true;
1908
1909 // Check reserve_id_t.
1910 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1911 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1912 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1913 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1914 return true;
1915 }
1916
1917 return false;
1918}
1919
1920// Performs a semantic analysis on the call to built-in Pipe
1921// Query Functions.
1922// \param S Reference to the semantic analyzer.
1923// \param Call The call to the builtin function to be analyzed.
1924// \return True if a semantic error was found, false otherwise.
1926 if (S.checkArgCount(Call, 1))
1927 return true;
1928
1929 if (!Call->getArg(0)->getType()->isPipeType()) {
1930 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1931 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1932 return true;
1933 }
1934
1935 return false;
1936}
1937
1938// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1939// Performs semantic analysis for the to_global/local/private call.
1940// \param S Reference to the semantic analyzer.
1941// \param BuiltinID ID of the builtin function.
1942// \param Call A pointer to the builtin call.
1943// \return True if a semantic error has been found, false otherwise.
1944static bool OpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, CallExpr *Call) {
1945 if (S.checkArgCount(Call, 1))
1946 return true;
1947
1948 auto RT = Call->getArg(0)->getType();
1949 if (!RT->isPointerType() || RT->getPointeeType()
1950 .getAddressSpace() == LangAS::opencl_constant) {
1951 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1952 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1953 return true;
1954 }
1955
1956 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1957 S.Diag(Call->getArg(0)->getBeginLoc(),
1958 diag::warn_opencl_generic_address_space_arg)
1959 << Call->getDirectCallee()->getNameInfo().getAsString()
1960 << Call->getArg(0)->getSourceRange();
1961 }
1962
1963 RT = RT->getPointeeType();
1964 auto Qual = RT.getQualifiers();
1965 switch (BuiltinID) {
1966 case Builtin::BIto_global:
1967 Qual.setAddressSpace(LangAS::opencl_global);
1968 break;
1969 case Builtin::BIto_local:
1970 Qual.setAddressSpace(LangAS::opencl_local);
1971 break;
1972 case Builtin::BIto_private:
1973 Qual.setAddressSpace(LangAS::opencl_private);
1974 break;
1975 default:
1976 llvm_unreachable("Invalid builtin function");
1977 }
1979 RT.getUnqualifiedType(), Qual)));
1980
1981 return false;
1982}
1983
1984namespace {
1985enum PointerAuthOpKind {
1986 PAO_Strip,
1987 PAO_Sign,
1988 PAO_Auth,
1989 PAO_SignGeneric,
1990 PAO_Discriminator,
1991 PAO_BlendPointer,
1992 PAO_BlendInteger
1993};
1994}
1995
1996static bool checkPointerAuthEnabled(Sema &S, Expr *E) {
1997 if (S.getLangOpts().PointerAuthIntrinsics)
1998 return false;
1999
2000 S.Diag(E->getExprLoc(), diag::err_ptrauth_disabled) << E->getSourceRange();
2001 return true;
2002}
2003
2004static bool checkPointerAuthKey(Sema &S, Expr *&Arg) {
2005 // Convert it to type 'int'.
2006 if (convertArgumentToType(S, Arg, S.Context.IntTy))
2007 return true;
2008
2009 // Value-dependent expressions are okay; wait for template instantiation.
2010 if (Arg->isValueDependent())
2011 return false;
2012
2013 unsigned KeyValue;
2014 return S.checkConstantPointerAuthKey(Arg, KeyValue);
2015}
2016
2018 // Attempt to constant-evaluate the expression.
2019 std::optional<llvm::APSInt> KeyValue = Arg->getIntegerConstantExpr(Context);
2020 if (!KeyValue) {
2021 Diag(Arg->getExprLoc(), diag::err_expr_not_ice)
2022 << 0 << Arg->getSourceRange();
2023 return true;
2024 }
2025
2026 // Ask the target to validate the key parameter.
2027 if (!Context.getTargetInfo().validatePointerAuthKey(*KeyValue)) {
2029 {
2030 llvm::raw_svector_ostream Str(Value);
2031 Str << *KeyValue;
2032 }
2033
2034 Diag(Arg->getExprLoc(), diag::err_ptrauth_invalid_key)
2035 << Value << Arg->getSourceRange();
2036 return true;
2037 }
2038
2039 Result = KeyValue->getZExtValue();
2040 return false;
2041}
2042
2043static std::pair<const ValueDecl *, CharUnits>
2045 // Must evaluate as a pointer.
2047 if (!E->EvaluateAsRValue(Result, S.Context) || !Result.Val.isLValue())
2048 return {nullptr, CharUnits()};
2049
2050 const auto *BaseDecl =
2051 Result.Val.getLValueBase().dyn_cast<const ValueDecl *>();
2052 if (!BaseDecl)
2053 return {nullptr, CharUnits()};
2054
2055 return {BaseDecl, Result.Val.getLValueOffset()};
2056}
2057
2058static bool checkPointerAuthValue(Sema &S, Expr *&Arg, PointerAuthOpKind OpKind,
2059 bool RequireConstant = false) {
2060 if (Arg->hasPlaceholderType()) {
2062 if (R.isInvalid())
2063 return true;
2064 Arg = R.get();
2065 }
2066
2067 auto AllowsPointer = [](PointerAuthOpKind OpKind) {
2068 return OpKind != PAO_BlendInteger;
2069 };
2070 auto AllowsInteger = [](PointerAuthOpKind OpKind) {
2071 return OpKind == PAO_Discriminator || OpKind == PAO_BlendInteger ||
2072 OpKind == PAO_SignGeneric;
2073 };
2074
2075 // Require the value to have the right range of type.
2076 QualType ExpectedTy;
2077 if (AllowsPointer(OpKind) && Arg->getType()->isPointerType()) {
2078 ExpectedTy = Arg->getType().getUnqualifiedType();
2079 } else if (AllowsPointer(OpKind) && Arg->getType()->isNullPtrType()) {
2080 ExpectedTy = S.Context.VoidPtrTy;
2081 } else if (AllowsInteger(OpKind) &&
2083 ExpectedTy = S.Context.getUIntPtrType();
2084
2085 } else {
2086 // Diagnose the failures.
2087 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_value_bad_type)
2088 << unsigned(OpKind == PAO_Discriminator ? 1
2089 : OpKind == PAO_BlendPointer ? 2
2090 : OpKind == PAO_BlendInteger ? 3
2091 : 0)
2092 << unsigned(AllowsInteger(OpKind) ? (AllowsPointer(OpKind) ? 2 : 1) : 0)
2093 << Arg->getType() << Arg->getSourceRange();
2094 return true;
2095 }
2096
2097 // Convert to that type. This should just be an lvalue-to-rvalue
2098 // conversion.
2099 if (convertArgumentToType(S, Arg, ExpectedTy))
2100 return true;
2101
2102 if (!RequireConstant) {
2103 // Warn about null pointers for non-generic sign and auth operations.
2104 if ((OpKind == PAO_Sign || OpKind == PAO_Auth) &&
2106 S.Diag(Arg->getExprLoc(), OpKind == PAO_Sign
2107 ? diag::warn_ptrauth_sign_null_pointer
2108 : diag::warn_ptrauth_auth_null_pointer)
2109 << Arg->getSourceRange();
2110 }
2111
2112 return false;
2113 }
2114
2115 // Perform special checking on the arguments to ptrauth_sign_constant.
2116
2117 // The main argument.
2118 if (OpKind == PAO_Sign) {
2119 // Require the value we're signing to have a special form.
2120 auto [BaseDecl, Offset] = findConstantBaseAndOffset(S, Arg);
2121 bool Invalid;
2122
2123 // Must be rooted in a declaration reference.
2124 if (!BaseDecl)
2125 Invalid = true;
2126
2127 // If it's a function declaration, we can't have an offset.
2128 else if (isa<FunctionDecl>(BaseDecl))
2129 Invalid = !Offset.isZero();
2130
2131 // Otherwise we're fine.
2132 else
2133 Invalid = false;
2134
2135 if (Invalid)
2136 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_bad_constant_pointer);
2137 return Invalid;
2138 }
2139
2140 // The discriminator argument.
2141 assert(OpKind == PAO_Discriminator);
2142
2143 // Must be a pointer or integer or blend thereof.
2144 Expr *Pointer = nullptr;
2145 Expr *Integer = nullptr;
2146 if (auto *Call = dyn_cast<CallExpr>(Arg->IgnoreParens())) {
2147 if (Call->getBuiltinCallee() ==
2148 Builtin::BI__builtin_ptrauth_blend_discriminator) {
2149 Pointer = Call->getArg(0);
2150 Integer = Call->getArg(1);
2151 }
2152 }
2153 if (!Pointer && !Integer) {
2154 if (Arg->getType()->isPointerType())
2155 Pointer = Arg;
2156 else
2157 Integer = Arg;
2158 }
2159
2160 // Check the pointer.
2161 bool Invalid = false;
2162 if (Pointer) {
2163 assert(Pointer->getType()->isPointerType());
2164
2165 // TODO: if we're initializing a global, check that the address is
2166 // somehow related to what we're initializing. This probably will
2167 // never really be feasible and we'll have to catch it at link-time.
2168 auto [BaseDecl, Offset] = findConstantBaseAndOffset(S, Pointer);
2169 if (!BaseDecl || !isa<VarDecl>(BaseDecl))
2170 Invalid = true;
2171 }
2172
2173 // Check the integer.
2174 if (Integer) {
2175 assert(Integer->getType()->isIntegerType());
2176 if (!Integer->isEvaluatable(S.Context))
2177 Invalid = true;
2178 }
2179
2180 if (Invalid)
2181 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_bad_constant_discriminator);
2182 return Invalid;
2183}
2184
2186 if (S.checkArgCount(Call, 2))
2187 return ExprError();
2189 return ExprError();
2190 if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_Strip) ||
2191 checkPointerAuthKey(S, Call->getArgs()[1]))
2192 return ExprError();
2193
2194 Call->setType(Call->getArgs()[0]->getType());
2195 return Call;
2196}
2197
2199 if (S.checkArgCount(Call, 2))
2200 return ExprError();
2202 return ExprError();
2203 if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_BlendPointer) ||
2204 checkPointerAuthValue(S, Call->getArgs()[1], PAO_BlendInteger))
2205 return ExprError();
2206
2207 Call->setType(S.Context.getUIntPtrType());
2208 return Call;
2209}
2210
2212 if (S.checkArgCount(Call, 2))
2213 return ExprError();
2215 return ExprError();
2216 if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_SignGeneric) ||
2217 checkPointerAuthValue(S, Call->getArgs()[1], PAO_Discriminator))
2218 return ExprError();
2219
2220 Call->setType(S.Context.getUIntPtrType());
2221 return Call;
2222}
2223
2225 PointerAuthOpKind OpKind,
2226 bool RequireConstant) {
2227 if (S.checkArgCount(Call, 3))
2228 return ExprError();
2230 return ExprError();
2231 if (checkPointerAuthValue(S, Call->getArgs()[0], OpKind, RequireConstant) ||
2232 checkPointerAuthKey(S, Call->getArgs()[1]) ||
2233 checkPointerAuthValue(S, Call->getArgs()[2], PAO_Discriminator,
2234 RequireConstant))
2235 return ExprError();
2236
2237 Call->setType(Call->getArgs()[0]->getType());
2238 return Call;
2239}
2240
2242 if (S.checkArgCount(Call, 5))
2243 return ExprError();
2245 return ExprError();
2246 if (checkPointerAuthValue(S, Call->getArgs()[0], PAO_Auth) ||
2247 checkPointerAuthKey(S, Call->getArgs()[1]) ||
2248 checkPointerAuthValue(S, Call->getArgs()[2], PAO_Discriminator) ||
2249 checkPointerAuthKey(S, Call->getArgs()[3]) ||
2250 checkPointerAuthValue(S, Call->getArgs()[4], PAO_Discriminator))
2251 return ExprError();
2252
2253 Call->setType(Call->getArgs()[0]->getType());
2254 return Call;
2255}
2256
2259 return ExprError();
2260
2261 // We've already performed normal call type-checking.
2262 const Expr *Arg = Call->getArg(0)->IgnoreParenImpCasts();
2263
2264 // Operand must be an ordinary or UTF-8 string literal.
2265 const auto *Literal = dyn_cast<StringLiteral>(Arg);
2266 if (!Literal || Literal->getCharByteWidth() != 1) {
2267 S.Diag(Arg->getExprLoc(), diag::err_ptrauth_string_not_literal)
2268 << (Literal ? 1 : 0) << Arg->getSourceRange();
2269 return ExprError();
2270 }
2271
2272 return Call;
2273}
2274
2276 if (S.checkArgCount(TheCall, 1))
2277 return ExprError();
2278
2279 // Compute __builtin_launder's parameter type from the argument.
2280 // The parameter type is:
2281 // * The type of the argument if it's not an array or function type,
2282 // Otherwise,
2283 // * The decayed argument type.
2284 QualType ParamTy = [&]() {
2285 QualType ArgTy = TheCall->getArg(0)->getType();
2286 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
2287 return S.Context.getPointerType(Ty->getElementType());
2288 if (ArgTy->isFunctionType()) {
2289 return S.Context.getPointerType(ArgTy);
2290 }
2291 return ArgTy;
2292 }();
2293
2294 TheCall->setType(ParamTy);
2295
2296 auto DiagSelect = [&]() -> std::optional<unsigned> {
2297 if (!ParamTy->isPointerType())
2298 return 0;
2299 if (ParamTy->isFunctionPointerType())
2300 return 1;
2301 if (ParamTy->isVoidPointerType())
2302 return 2;
2303 return std::optional<unsigned>{};
2304 }();
2305 if (DiagSelect) {
2306 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
2307 << *DiagSelect << TheCall->getSourceRange();
2308 return ExprError();
2309 }
2310
2311 // We either have an incomplete class type, or we have a class template
2312 // whose instantiation has not been forced. Example:
2313 //
2314 // template <class T> struct Foo { T value; };
2315 // Foo<int> *p = nullptr;
2316 // auto *d = __builtin_launder(p);
2317 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
2318 diag::err_incomplete_type))
2319 return ExprError();
2320
2321 assert(ParamTy->getPointeeType()->isObjectType() &&
2322 "Unhandled non-object pointer case");
2323
2324 InitializedEntity Entity =
2326 ExprResult Arg =
2327 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
2328 if (Arg.isInvalid())
2329 return ExprError();
2330 TheCall->setArg(0, Arg.get());
2331
2332 return TheCall;
2333}
2334
2335// Emit an error and return true if the current object format type is in the
2336// list of unsupported types.
2338 Sema &S, unsigned BuiltinID, CallExpr *TheCall,
2339 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) {
2340 llvm::Triple::ObjectFormatType CurObjFormat =
2341 S.getASTContext().getTargetInfo().getTriple().getObjectFormat();
2342 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) {
2343 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2344 << TheCall->getSourceRange();
2345 return true;
2346 }
2347 return false;
2348}
2349
2350// Emit an error and return true if the current architecture is not in the list
2351// of supported architectures.
2352static bool
2353CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
2354 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
2355 llvm::Triple::ArchType CurArch =
2356 S.getASTContext().getTargetInfo().getTriple().getArch();
2357 if (llvm::is_contained(SupportedArchs, CurArch))
2358 return false;
2359 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2360 << TheCall->getSourceRange();
2361 return true;
2362}
2363
2364static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
2365 SourceLocation CallSiteLoc);
2366
2367bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2368 CallExpr *TheCall) {
2369 switch (TI.getTriple().getArch()) {
2370 default:
2371 // Some builtins don't require additional checking, so just consider these
2372 // acceptable.
2373 return false;
2374 case llvm::Triple::arm:
2375 case llvm::Triple::armeb:
2376 case llvm::Triple::thumb:
2377 case llvm::Triple::thumbeb:
2378 return ARM().CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
2379 case llvm::Triple::aarch64:
2380 case llvm::Triple::aarch64_32:
2381 case llvm::Triple::aarch64_be:
2382 return ARM().CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
2383 case llvm::Triple::bpfeb:
2384 case llvm::Triple::bpfel:
2385 return BPF().CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
2386 case llvm::Triple::hexagon:
2387 return Hexagon().CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
2388 case llvm::Triple::mips:
2389 case llvm::Triple::mipsel:
2390 case llvm::Triple::mips64:
2391 case llvm::Triple::mips64el:
2392 return MIPS().CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
2393 case llvm::Triple::systemz:
2394 return SystemZ().CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
2395 case llvm::Triple::x86:
2396 case llvm::Triple::x86_64:
2397 return X86().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
2398 case llvm::Triple::ppc:
2399 case llvm::Triple::ppcle:
2400 case llvm::Triple::ppc64:
2401 case llvm::Triple::ppc64le:
2402 return PPC().CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
2403 case llvm::Triple::amdgcn:
2404 return AMDGPU().CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
2405 case llvm::Triple::riscv32:
2406 case llvm::Triple::riscv64:
2407 return RISCV().CheckBuiltinFunctionCall(TI, BuiltinID, TheCall);
2408 case llvm::Triple::loongarch32:
2409 case llvm::Triple::loongarch64:
2410 return LoongArch().CheckLoongArchBuiltinFunctionCall(TI, BuiltinID,
2411 TheCall);
2412 case llvm::Triple::wasm32:
2413 case llvm::Triple::wasm64:
2414 return Wasm().CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
2415 case llvm::Triple::nvptx:
2416 case llvm::Triple::nvptx64:
2417 return NVPTX().CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
2418 }
2419}
2420
2421// Check if \p Ty is a valid type for the elementwise math builtins. If it is
2422// not a valid type, emit an error message and return true. Otherwise return
2423// false.
2425 QualType ArgTy, int ArgIndex) {
2426 if (!ArgTy->getAs<VectorType>() &&
2428 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
2429 << ArgIndex << /* vector, integer or float ty*/ 0 << ArgTy;
2430 }
2431
2432 return false;
2433}
2434
2436 QualType ArgTy, int ArgIndex) {
2437 QualType EltTy = ArgTy;
2438 if (auto *VecTy = EltTy->getAs<VectorType>())
2439 EltTy = VecTy->getElementType();
2440
2441 if (!EltTy->isRealFloatingType()) {
2442 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
2443 << ArgIndex << /* vector or float ty*/ 5 << ArgTy;
2444 }
2445
2446 return false;
2447}
2448
2449/// BuiltinCpu{Supports|Is} - Handle __builtin_cpu_{supports|is}(char *).
2450/// This checks that the target supports the builtin and that the string
2451/// argument is constant and valid.
2452static bool BuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
2453 const TargetInfo *AuxTI, unsigned BuiltinID) {
2454 assert((BuiltinID == Builtin::BI__builtin_cpu_supports ||
2455 BuiltinID == Builtin::BI__builtin_cpu_is) &&
2456 "Expecting __builtin_cpu_...");
2457
2458 bool IsCPUSupports = BuiltinID == Builtin::BI__builtin_cpu_supports;
2459 const TargetInfo *TheTI = &TI;
2460 auto SupportsBI = [=](const TargetInfo *TInfo) {
2461 return TInfo && ((IsCPUSupports && TInfo->supportsCpuSupports()) ||
2462 (!IsCPUSupports && TInfo->supportsCpuIs()));
2463 };
2464 if (!SupportsBI(&TI) && SupportsBI(AuxTI))
2465 TheTI = AuxTI;
2466
2467 if ((!IsCPUSupports && !TheTI->supportsCpuIs()) ||
2468 (IsCPUSupports && !TheTI->supportsCpuSupports()))
2469 return S.Diag(TheCall->getBeginLoc(),
2470 TI.getTriple().isOSAIX()
2471 ? diag::err_builtin_aix_os_unsupported
2472 : diag::err_builtin_target_unsupported)
2473 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2474
2475 Expr *Arg = TheCall->getArg(0)->IgnoreParenImpCasts();
2476 // Check if the argument is a string literal.
2477 if (!isa<StringLiteral>(Arg))
2478 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
2479 << Arg->getSourceRange();
2480
2481 // Check the contents of the string.
2482 StringRef Feature = cast<StringLiteral>(Arg)->getString();
2483 if (IsCPUSupports && !TheTI->validateCpuSupports(Feature)) {
2484 S.Diag(TheCall->getBeginLoc(), diag::warn_invalid_cpu_supports)
2485 << Arg->getSourceRange();
2486 return false;
2487 }
2488 if (!IsCPUSupports && !TheTI->validateCpuIs(Feature))
2489 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
2490 << Arg->getSourceRange();
2491 return false;
2492}
2493
2494/// Checks that __builtin_popcountg was called with a single argument, which is
2495/// an unsigned integer.
2496static bool BuiltinPopcountg(Sema &S, CallExpr *TheCall) {
2497 if (S.checkArgCount(TheCall, 1))
2498 return true;
2499
2500 ExprResult ArgRes = S.DefaultLvalueConversion(TheCall->getArg(0));
2501 if (ArgRes.isInvalid())
2502 return true;
2503
2504 Expr *Arg = ArgRes.get();
2505 TheCall->setArg(0, Arg);
2506
2507 QualType ArgTy = Arg->getType();
2508
2509 if (!ArgTy->isUnsignedIntegerType()) {
2510 S.Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2511 << 1 << /*unsigned integer ty*/ 7 << ArgTy;
2512 return true;
2513 }
2514 return false;
2515}
2516
2517/// Checks that __builtin_{clzg,ctzg} was called with a first argument, which is
2518/// an unsigned integer, and an optional second argument, which is promoted to
2519/// an 'int'.
2520static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
2521 if (S.checkArgCountRange(TheCall, 1, 2))
2522 return true;
2523
2524 ExprResult Arg0Res = S.DefaultLvalueConversion(TheCall->getArg(0));
2525 if (Arg0Res.isInvalid())
2526 return true;
2527
2528 Expr *Arg0 = Arg0Res.get();
2529 TheCall->setArg(0, Arg0);
2530
2531 QualType Arg0Ty = Arg0->getType();
2532
2533 if (!Arg0Ty->isUnsignedIntegerType()) {
2534 S.Diag(Arg0->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2535 << 1 << /*unsigned integer ty*/ 7 << Arg0Ty;
2536 return true;
2537 }
2538
2539 if (TheCall->getNumArgs() > 1) {
2540 ExprResult Arg1Res = S.UsualUnaryConversions(TheCall->getArg(1));
2541 if (Arg1Res.isInvalid())
2542 return true;
2543
2544 Expr *Arg1 = Arg1Res.get();
2545 TheCall->setArg(1, Arg1);
2546
2547 QualType Arg1Ty = Arg1->getType();
2548
2549 if (!Arg1Ty->isSpecificBuiltinType(BuiltinType::Int)) {
2550 S.Diag(Arg1->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2551 << 2 << /*'int' ty*/ 8 << Arg1Ty;
2552 return true;
2553 }
2554 }
2555
2556 return false;
2557}
2558
2560Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
2561 CallExpr *TheCall) {
2562 ExprResult TheCallResult(TheCall);
2563
2564 // Find out if any arguments are required to be integer constant expressions.
2565 unsigned ICEArguments = 0;
2567 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
2568 if (Error != ASTContext::GE_None)
2569 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
2570
2571 // If any arguments are required to be ICE's, check and diagnose.
2572 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
2573 // Skip arguments not required to be ICE's.
2574 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
2575
2576 llvm::APSInt Result;
2577 // If we don't have enough arguments, continue so we can issue better
2578 // diagnostic in checkArgCount(...)
2579 if (ArgNo < TheCall->getNumArgs() &&
2580 BuiltinConstantArg(TheCall, ArgNo, Result))
2581 return true;
2582 ICEArguments &= ~(1 << ArgNo);
2583 }
2584
2585 FPOptions FPO;
2586 switch (BuiltinID) {
2587 case Builtin::BI__builtin_cpu_supports:
2588 case Builtin::BI__builtin_cpu_is:
2589 if (BuiltinCpu(*this, Context.getTargetInfo(), TheCall,
2590 Context.getAuxTargetInfo(), BuiltinID))
2591 return ExprError();
2592 break;
2593 case Builtin::BI__builtin_cpu_init:
2595 Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2596 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2597 return ExprError();
2598 }
2599 break;
2600 case Builtin::BI__builtin___CFStringMakeConstantString:
2601 // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
2602 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
2604 *this, BuiltinID, TheCall,
2605 {llvm::Triple::GOFF, llvm::Triple::XCOFF}))
2606 return ExprError();
2607 assert(TheCall->getNumArgs() == 1 &&
2608 "Wrong # arguments to builtin CFStringMakeConstantString");
2609 if (ObjC().CheckObjCString(TheCall->getArg(0)))
2610 return ExprError();
2611 break;
2612 case Builtin::BI__builtin_ms_va_start:
2613 case Builtin::BI__builtin_stdarg_start:
2614 case Builtin::BI__builtin_va_start:
2615 if (BuiltinVAStart(BuiltinID, TheCall))
2616 return ExprError();
2617 break;
2618 case Builtin::BI__va_start: {
2619 switch (Context.getTargetInfo().getTriple().getArch()) {
2620 case llvm::Triple::aarch64:
2621 case llvm::Triple::arm:
2622 case llvm::Triple::thumb:
2623 if (BuiltinVAStartARMMicrosoft(TheCall))
2624 return ExprError();
2625 break;
2626 default:
2627 if (BuiltinVAStart(BuiltinID, TheCall))
2628 return ExprError();
2629 break;
2630 }
2631 break;
2632 }
2633
2634 // The acquire, release, and no fence variants are ARM and AArch64 only.
2635 case Builtin::BI_interlockedbittestandset_acq:
2636 case Builtin::BI_interlockedbittestandset_rel:
2637 case Builtin::BI_interlockedbittestandset_nf:
2638 case Builtin::BI_interlockedbittestandreset_acq:
2639 case Builtin::BI_interlockedbittestandreset_rel:
2640 case Builtin::BI_interlockedbittestandreset_nf:
2642 *this, BuiltinID, TheCall,
2643 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
2644 return ExprError();
2645 break;
2646
2647 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
2648 case Builtin::BI_bittest64:
2649 case Builtin::BI_bittestandcomplement64:
2650 case Builtin::BI_bittestandreset64:
2651 case Builtin::BI_bittestandset64:
2652 case Builtin::BI_interlockedbittestandreset64:
2653 case Builtin::BI_interlockedbittestandset64:
2655 *this, BuiltinID, TheCall,
2656 {llvm::Triple::x86_64, llvm::Triple::arm, llvm::Triple::thumb,
2657 llvm::Triple::aarch64, llvm::Triple::amdgcn}))
2658 return ExprError();
2659 break;
2660
2661 case Builtin::BI__builtin_set_flt_rounds:
2663 *this, BuiltinID, TheCall,
2664 {llvm::Triple::x86, llvm::Triple::x86_64, llvm::Triple::arm,
2665 llvm::Triple::thumb, llvm::Triple::aarch64, llvm::Triple::amdgcn}))
2666 return ExprError();
2667 break;
2668
2669 case Builtin::BI__builtin_isgreater:
2670 case Builtin::BI__builtin_isgreaterequal:
2671 case Builtin::BI__builtin_isless:
2672 case Builtin::BI__builtin_islessequal:
2673 case Builtin::BI__builtin_islessgreater:
2674 case Builtin::BI__builtin_isunordered:
2675 if (BuiltinUnorderedCompare(TheCall, BuiltinID))
2676 return ExprError();
2677 break;
2678 case Builtin::BI__builtin_fpclassify:
2679 if (BuiltinFPClassification(TheCall, 6, BuiltinID))
2680 return ExprError();
2681 break;
2682 case Builtin::BI__builtin_isfpclass:
2683 if (BuiltinFPClassification(TheCall, 2, BuiltinID))
2684 return ExprError();
2685 break;
2686 case Builtin::BI__builtin_isfinite:
2687 case Builtin::BI__builtin_isinf:
2688 case Builtin::BI__builtin_isinf_sign:
2689 case Builtin::BI__builtin_isnan:
2690 case Builtin::BI__builtin_issignaling:
2691 case Builtin::BI__builtin_isnormal:
2692 case Builtin::BI__builtin_issubnormal:
2693 case Builtin::BI__builtin_iszero:
2694 case Builtin::BI__builtin_signbit:
2695 case Builtin::BI__builtin_signbitf:
2696 case Builtin::BI__builtin_signbitl:
2697 if (BuiltinFPClassification(TheCall, 1, BuiltinID))
2698 return ExprError();
2699 break;
2700 case Builtin::BI__builtin_shufflevector:
2701 return BuiltinShuffleVector(TheCall);
2702 // TheCall will be freed by the smart pointer here, but that's fine, since
2703 // BuiltinShuffleVector guts it, but then doesn't release it.
2704 case Builtin::BI__builtin_prefetch:
2705 if (BuiltinPrefetch(TheCall))
2706 return ExprError();
2707 break;
2708 case Builtin::BI__builtin_alloca_with_align:
2709 case Builtin::BI__builtin_alloca_with_align_uninitialized:
2710 if (BuiltinAllocaWithAlign(TheCall))
2711 return ExprError();
2712 [[fallthrough]];
2713 case Builtin::BI__builtin_alloca:
2714 case Builtin::BI__builtin_alloca_uninitialized:
2715 Diag(TheCall->getBeginLoc(), diag::warn_alloca)
2716 << TheCall->getDirectCallee();
2717 break;
2718 case Builtin::BI__arithmetic_fence:
2719 if (BuiltinArithmeticFence(TheCall))
2720 return ExprError();
2721 break;
2722 case Builtin::BI__assume:
2723 case Builtin::BI__builtin_assume:
2724 if (BuiltinAssume(TheCall))
2725 return ExprError();
2726 break;
2727 case Builtin::BI__builtin_assume_aligned:
2728 if (BuiltinAssumeAligned(TheCall))
2729 return ExprError();
2730 break;
2731 case Builtin::BI__builtin_dynamic_object_size:
2732 case Builtin::BI__builtin_object_size:
2733 if (BuiltinConstantArgRange(TheCall, 1, 0, 3))
2734 return ExprError();
2735 break;
2736 case Builtin::BI__builtin_longjmp:
2737 if (BuiltinLongjmp(TheCall))
2738 return ExprError();
2739 break;
2740 case Builtin::BI__builtin_setjmp:
2741 if (BuiltinSetjmp(TheCall))
2742 return ExprError();
2743 break;
2744 case Builtin::BI__builtin_classify_type:
2745 if (checkArgCount(TheCall, 1))
2746 return true;
2747 TheCall->setType(Context.IntTy);
2748 break;
2749 case Builtin::BI__builtin_complex:
2750 if (BuiltinComplex(TheCall))
2751 return ExprError();
2752 break;
2753 case Builtin::BI__builtin_constant_p: {
2754 if (checkArgCount(TheCall, 1))
2755 return true;
2757 if (Arg.isInvalid()) return true;
2758 TheCall->setArg(0, Arg.get());
2759 TheCall->setType(Context.IntTy);
2760 break;
2761 }
2762 case Builtin::BI__builtin_launder:
2763 return BuiltinLaunder(*this, TheCall);
2764 case Builtin::BI__sync_fetch_and_add:
2765 case Builtin::BI__sync_fetch_and_add_1:
2766 case Builtin::BI__sync_fetch_and_add_2:
2767 case Builtin::BI__sync_fetch_and_add_4:
2768 case Builtin::BI__sync_fetch_and_add_8:
2769 case Builtin::BI__sync_fetch_and_add_16:
2770 case Builtin::BI__sync_fetch_and_sub:
2771 case Builtin::BI__sync_fetch_and_sub_1:
2772 case Builtin::BI__sync_fetch_and_sub_2:
2773 case Builtin::BI__sync_fetch_and_sub_4:
2774 case Builtin::BI__sync_fetch_and_sub_8:
2775 case Builtin::BI__sync_fetch_and_sub_16:
2776 case Builtin::BI__sync_fetch_and_or:
2777 case Builtin::BI__sync_fetch_and_or_1:
2778 case Builtin::BI__sync_fetch_and_or_2:
2779 case Builtin::BI__sync_fetch_and_or_4:
2780 case Builtin::BI__sync_fetch_and_or_8:
2781 case Builtin::BI__sync_fetch_and_or_16:
2782 case Builtin::BI__sync_fetch_and_and:
2783 case Builtin::BI__sync_fetch_and_and_1:
2784 case Builtin::BI__sync_fetch_and_and_2:
2785 case Builtin::BI__sync_fetch_and_and_4:
2786 case Builtin::BI__sync_fetch_and_and_8:
2787 case Builtin::BI__sync_fetch_and_and_16:
2788 case Builtin::BI__sync_fetch_and_xor:
2789 case Builtin::BI__sync_fetch_and_xor_1:
2790 case Builtin::BI__sync_fetch_and_xor_2:
2791 case Builtin::BI__sync_fetch_and_xor_4:
2792 case Builtin::BI__sync_fetch_and_xor_8:
2793 case Builtin::BI__sync_fetch_and_xor_16:
2794 case Builtin::BI__sync_fetch_and_nand:
2795 case Builtin::BI__sync_fetch_and_nand_1:
2796 case Builtin::BI__sync_fetch_and_nand_2:
2797 case Builtin::BI__sync_fetch_and_nand_4:
2798 case Builtin::BI__sync_fetch_and_nand_8:
2799 case Builtin::BI__sync_fetch_and_nand_16:
2800 case Builtin::BI__sync_add_and_fetch:
2801 case Builtin::BI__sync_add_and_fetch_1:
2802 case Builtin::BI__sync_add_and_fetch_2:
2803 case Builtin::BI__sync_add_and_fetch_4:
2804 case Builtin::BI__sync_add_and_fetch_8:
2805 case Builtin::BI__sync_add_and_fetch_16:
2806 case Builtin::BI__sync_sub_and_fetch:
2807 case Builtin::BI__sync_sub_and_fetch_1:
2808 case Builtin::BI__sync_sub_and_fetch_2:
2809 case Builtin::BI__sync_sub_and_fetch_4:
2810 case Builtin::BI__sync_sub_and_fetch_8:
2811 case Builtin::BI__sync_sub_and_fetch_16:
2812 case Builtin::BI__sync_and_and_fetch:
2813 case Builtin::BI__sync_and_and_fetch_1:
2814 case Builtin::BI__sync_and_and_fetch_2:
2815 case Builtin::BI__sync_and_and_fetch_4:
2816 case Builtin::BI__sync_and_and_fetch_8:
2817 case Builtin::BI__sync_and_and_fetch_16:
2818 case Builtin::BI__sync_or_and_fetch:
2819 case Builtin::BI__sync_or_and_fetch_1:
2820 case Builtin::BI__sync_or_and_fetch_2:
2821 case Builtin::BI__sync_or_and_fetch_4:
2822 case Builtin::BI__sync_or_and_fetch_8:
2823 case Builtin::BI__sync_or_and_fetch_16:
2824 case Builtin::BI__sync_xor_and_fetch:
2825 case Builtin::BI__sync_xor_and_fetch_1:
2826 case Builtin::BI__sync_xor_and_fetch_2:
2827 case Builtin::BI__sync_xor_and_fetch_4:
2828 case Builtin::BI__sync_xor_and_fetch_8:
2829 case Builtin::BI__sync_xor_and_fetch_16:
2830 case Builtin::BI__sync_nand_and_fetch:
2831 case Builtin::BI__sync_nand_and_fetch_1:
2832 case Builtin::BI__sync_nand_and_fetch_2:
2833 case Builtin::BI__sync_nand_and_fetch_4:
2834 case Builtin::BI__sync_nand_and_fetch_8:
2835 case Builtin::BI__sync_nand_and_fetch_16:
2836 case Builtin::BI__sync_val_compare_and_swap:
2837 case Builtin::BI__sync_val_compare_and_swap_1:
2838 case Builtin::BI__sync_val_compare_and_swap_2:
2839 case Builtin::BI__sync_val_compare_and_swap_4:
2840 case Builtin::BI__sync_val_compare_and_swap_8:
2841 case Builtin::BI__sync_val_compare_and_swap_16:
2842 case Builtin::BI__sync_bool_compare_and_swap:
2843 case Builtin::BI__sync_bool_compare_and_swap_1:
2844 case Builtin::BI__sync_bool_compare_and_swap_2:
2845 case Builtin::BI__sync_bool_compare_and_swap_4:
2846 case Builtin::BI__sync_bool_compare_and_swap_8:
2847 case Builtin::BI__sync_bool_compare_and_swap_16:
2848 case Builtin::BI__sync_lock_test_and_set:
2849 case Builtin::BI__sync_lock_test_and_set_1:
2850 case Builtin::BI__sync_lock_test_and_set_2:
2851 case Builtin::BI__sync_lock_test_and_set_4:
2852 case Builtin::BI__sync_lock_test_and_set_8:
2853 case Builtin::BI__sync_lock_test_and_set_16:
2854 case Builtin::BI__sync_lock_release:
2855 case Builtin::BI__sync_lock_release_1:
2856 case Builtin::BI__sync_lock_release_2:
2857 case Builtin::BI__sync_lock_release_4:
2858 case Builtin::BI__sync_lock_release_8:
2859 case Builtin::BI__sync_lock_release_16:
2860 case Builtin::BI__sync_swap:
2861 case Builtin::BI__sync_swap_1:
2862 case Builtin::BI__sync_swap_2:
2863 case Builtin::BI__sync_swap_4:
2864 case Builtin::BI__sync_swap_8:
2865 case Builtin::BI__sync_swap_16:
2866 return BuiltinAtomicOverloaded(TheCallResult);
2867 case Builtin::BI__sync_synchronize:
2868 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
2869 << TheCall->getCallee()->getSourceRange();
2870 break;
2871 case Builtin::BI__builtin_nontemporal_load:
2872 case Builtin::BI__builtin_nontemporal_store:
2873 return BuiltinNontemporalOverloaded(TheCallResult);
2874 case Builtin::BI__builtin_memcpy_inline: {
2875 clang::Expr *SizeOp = TheCall->getArg(2);
2876 // We warn about copying to or from `nullptr` pointers when `size` is
2877 // greater than 0. When `size` is value dependent we cannot evaluate its
2878 // value so we bail out.
2879 if (SizeOp->isValueDependent())
2880 break;
2881 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) {
2882 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
2883 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
2884 }
2885 break;
2886 }
2887 case Builtin::BI__builtin_memset_inline: {
2888 clang::Expr *SizeOp = TheCall->getArg(2);
2889 // We warn about filling to `nullptr` pointers when `size` is greater than
2890 // 0. When `size` is value dependent we cannot evaluate its value so we bail
2891 // out.
2892 if (SizeOp->isValueDependent())
2893 break;
2894 if (!SizeOp->EvaluateKnownConstInt(Context).isZero())
2895 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
2896 break;
2897 }
2898#define BUILTIN(ID, TYPE, ATTRS)
2899#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
2900 case Builtin::BI##ID: \
2901 return AtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
2902#include "clang/Basic/Builtins.inc"
2903 case Builtin::BI__annotation:
2904 if (BuiltinMSVCAnnotation(*this, TheCall))
2905 return ExprError();
2906 break;
2907 case Builtin::BI__builtin_annotation:
2908 if (BuiltinAnnotation(*this, TheCall))
2909 return ExprError();
2910 break;
2911 case Builtin::BI__builtin_addressof:
2912 if (BuiltinAddressof(*this, TheCall))
2913 return ExprError();
2914 break;
2915 case Builtin::BI__builtin_function_start:
2916 if (BuiltinFunctionStart(*this, TheCall))
2917 return ExprError();
2918 break;
2919 case Builtin::BI__builtin_is_aligned:
2920 case Builtin::BI__builtin_align_up:
2921 case Builtin::BI__builtin_align_down:
2922 if (BuiltinAlignment(*this, TheCall, BuiltinID))
2923 return ExprError();
2924 break;
2925 case Builtin::BI__builtin_add_overflow:
2926 case Builtin::BI__builtin_sub_overflow:
2927 case Builtin::BI__builtin_mul_overflow:
2928 if (BuiltinOverflow(*this, TheCall, BuiltinID))
2929 return ExprError();
2930 break;
2931 case Builtin::BI__builtin_operator_new:
2932 case Builtin::BI__builtin_operator_delete: {
2933 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
2934 ExprResult Res =
2935 BuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
2936 if (Res.isInvalid())
2937 CorrectDelayedTyposInExpr(TheCallResult.get());
2938 return Res;
2939 }
2940 case Builtin::BI__builtin_dump_struct:
2941 return BuiltinDumpStruct(*this, TheCall);
2942 case Builtin::BI__builtin_expect_with_probability: {
2943 // We first want to ensure we are called with 3 arguments
2944 if (checkArgCount(TheCall, 3))
2945 return ExprError();
2946 // then check probability is constant float in range [0.0, 1.0]
2947 const Expr *ProbArg = TheCall->getArg(2);
2949 Expr::EvalResult Eval;
2950 Eval.Diag = &Notes;
2951 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
2952 !Eval.Val.isFloat()) {
2953 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
2954 << ProbArg->getSourceRange();
2955 for (const PartialDiagnosticAt &PDiag : Notes)
2956 Diag(PDiag.first, PDiag.second);
2957 return ExprError();
2958 }
2959 llvm::APFloat Probability = Eval.Val.getFloat();
2960 bool LoseInfo = false;
2961 Probability.convert(llvm::APFloat::IEEEdouble(),
2962 llvm::RoundingMode::Dynamic, &LoseInfo);
2963 if (!(Probability >= llvm::APFloat(0.0) &&
2964 Probability <= llvm::APFloat(1.0))) {
2965 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
2966 << ProbArg->getSourceRange();
2967 return ExprError();
2968 }
2969 break;
2970 }
2971 case Builtin::BI__builtin_preserve_access_index:
2972 if (BuiltinPreserveAI(*this, TheCall))
2973 return ExprError();
2974 break;
2975 case Builtin::BI__builtin_call_with_static_chain:
2976 if (BuiltinCallWithStaticChain(*this, TheCall))
2977 return ExprError();
2978 break;
2979 case Builtin::BI__exception_code:
2980 case Builtin::BI_exception_code:
2981 if (BuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
2982 diag::err_seh___except_block))
2983 return ExprError();
2984 break;
2985 case Builtin::BI__exception_info:
2986 case Builtin::BI_exception_info:
2987 if (BuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
2988 diag::err_seh___except_filter))
2989 return ExprError();
2990 break;
2991 case Builtin::BI__GetExceptionInfo:
2992 if (checkArgCount(TheCall, 1))
2993 return ExprError();
2994
2996 TheCall->getBeginLoc(),
2998 TheCall))
2999 return ExprError();
3000
3001 TheCall->setType(Context.VoidPtrTy);
3002 break;
3003 case Builtin::BIaddressof:
3004 case Builtin::BI__addressof:
3005 case Builtin::BIforward:
3006 case Builtin::BIforward_like:
3007 case Builtin::BImove:
3008 case Builtin::BImove_if_noexcept:
3009 case Builtin::BIas_const: {
3010 // These are all expected to be of the form
3011 // T &/&&/* f(U &/&&)
3012 // where T and U only differ in qualification.
3013 if (checkArgCount(TheCall, 1))
3014 return ExprError();
3015 QualType Param = FDecl->getParamDecl(0)->getType();
3016 QualType Result = FDecl->getReturnType();
3017 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof ||
3018 BuiltinID == Builtin::BI__addressof;
3019 if (!(Param->isReferenceType() &&
3020 (ReturnsPointer ? Result->isAnyPointerType()
3021 : Result->isReferenceType()) &&
3023 Result->getPointeeType()))) {
3024 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported)
3025 << FDecl;
3026 return ExprError();
3027 }
3028 break;
3029 }
3030 case Builtin::BI__builtin_ptrauth_strip:
3031 return PointerAuthStrip(*this, TheCall);
3032 case Builtin::BI__builtin_ptrauth_blend_discriminator:
3033 return PointerAuthBlendDiscriminator(*this, TheCall);
3034 case Builtin::BI__builtin_ptrauth_sign_constant:
3035 return PointerAuthSignOrAuth(*this, TheCall, PAO_Sign,
3036 /*RequireConstant=*/true);
3037 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
3038 return PointerAuthSignOrAuth(*this, TheCall, PAO_Sign,
3039 /*RequireConstant=*/false);
3040 case Builtin::BI__builtin_ptrauth_auth:
3041 return PointerAuthSignOrAuth(*this, TheCall, PAO_Auth,
3042 /*RequireConstant=*/false);
3043 case Builtin::BI__builtin_ptrauth_sign_generic_data:
3044 return PointerAuthSignGenericData(*this, TheCall);
3045 case Builtin::BI__builtin_ptrauth_auth_and_resign:
3046 return PointerAuthAuthAndResign(*this, TheCall);
3047 case Builtin::BI__builtin_ptrauth_string_discriminator:
3048 return PointerAuthStringDiscriminator(*this, TheCall);
3049 // OpenCL v2.0, s6.13.16 - Pipe functions
3050 case Builtin::BIread_pipe:
3051 case Builtin::BIwrite_pipe:
3052 // Since those two functions are declared with var args, we need a semantic
3053 // check for the argument.
3054 if (BuiltinRWPipe(*this, TheCall))
3055 return ExprError();
3056 break;
3057 case Builtin::BIreserve_read_pipe:
3058 case Builtin::BIreserve_write_pipe:
3059 case Builtin::BIwork_group_reserve_read_pipe:
3060 case Builtin::BIwork_group_reserve_write_pipe:
3061 if (BuiltinReserveRWPipe(*this, TheCall))
3062 return ExprError();
3063 break;
3064 case Builtin::BIsub_group_reserve_read_pipe:
3065 case Builtin::BIsub_group_reserve_write_pipe:
3066 if (checkOpenCLSubgroupExt(*this, TheCall) ||
3067 BuiltinReserveRWPipe(*this, TheCall))
3068 return ExprError();
3069 break;
3070 case Builtin::BIcommit_read_pipe:
3071 case Builtin::BIcommit_write_pipe:
3072 case Builtin::BIwork_group_commit_read_pipe:
3073 case Builtin::BIwork_group_commit_write_pipe:
3074 if (BuiltinCommitRWPipe(*this, TheCall))
3075 return ExprError();
3076 break;
3077 case Builtin::BIsub_group_commit_read_pipe:
3078 case Builtin::BIsub_group_commit_write_pipe:
3079 if (checkOpenCLSubgroupExt(*this, TheCall) ||
3080 BuiltinCommitRWPipe(*this, TheCall))
3081 return ExprError();
3082 break;
3083 case Builtin::BIget_pipe_num_packets:
3084 case Builtin::BIget_pipe_max_packets:
3085 if (BuiltinPipePackets(*this, TheCall))
3086 return ExprError();
3087 break;
3088 case Builtin::BIto_global:
3089 case Builtin::BIto_local:
3090 case Builtin::BIto_private:
3091 if (OpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
3092 return ExprError();
3093 break;
3094 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
3095 case Builtin::BIenqueue_kernel:
3096 if (OpenCLBuiltinEnqueueKernel(*this, TheCall))
3097 return ExprError();
3098 break;
3099 case Builtin::BIget_kernel_work_group_size:
3100 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
3101 if (OpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
3102 return ExprError();
3103 break;
3104 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
3105 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
3106 if (OpenCLBuiltinNDRangeAndBlock(*this, TheCall))
3107 return ExprError();
3108 break;
3109 case Builtin::BI__builtin_os_log_format:
3111 [[fallthrough]];
3112 case Builtin::BI__builtin_os_log_format_buffer_size:
3113 if (BuiltinOSLogFormat(TheCall))
3114 return ExprError();
3115 break;
3116 case Builtin::BI__builtin_frame_address:
3117 case Builtin::BI__builtin_return_address: {
3118 if (BuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
3119 return ExprError();
3120
3121 // -Wframe-address warning if non-zero passed to builtin
3122 // return/frame address.
3124 if (!TheCall->getArg(0)->isValueDependent() &&
3125 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
3126 Result.Val.getInt() != 0)
3127 Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
3128 << ((BuiltinID == Builtin::BI__builtin_return_address)
3129 ? "__builtin_return_address"
3130 : "__builtin_frame_address")
3131 << TheCall->getSourceRange();
3132 break;
3133 }
3134
3135 case Builtin::BI__builtin_nondeterministic_value: {
3136 if (BuiltinNonDeterministicValue(TheCall))
3137 return ExprError();
3138 break;
3139 }
3140
3141 // __builtin_elementwise_abs restricts the element type to signed integers or
3142 // floating point types only.
3143 case Builtin::BI__builtin_elementwise_abs: {
3144 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3145 return ExprError();
3146
3147 QualType ArgTy = TheCall->getArg(0)->getType();
3148 QualType EltTy = ArgTy;
3149
3150 if (auto *VecTy = EltTy->getAs<VectorType>())
3151 EltTy = VecTy->getElementType();
3152 if (EltTy->isUnsignedIntegerType()) {
3153 Diag(TheCall->getArg(0)->getBeginLoc(),
3154 diag::err_builtin_invalid_arg_type)
3155 << 1 << /* signed integer or float ty*/ 3 << ArgTy;
3156 return ExprError();
3157 }
3158 break;
3159 }
3160
3161 // These builtins restrict the element type to floating point
3162 // types only.
3163 case Builtin::BI__builtin_elementwise_ceil:
3164 case Builtin::BI__builtin_elementwise_cos:
3165 case Builtin::BI__builtin_elementwise_exp:
3166 case Builtin::BI__builtin_elementwise_exp2:
3167 case Builtin::BI__builtin_elementwise_floor:
3168 case Builtin::BI__builtin_elementwise_log:
3169 case Builtin::BI__builtin_elementwise_log2:
3170 case Builtin::BI__builtin_elementwise_log10:
3171 case Builtin::BI__builtin_elementwise_roundeven:
3172 case Builtin::BI__builtin_elementwise_round:
3173 case Builtin::BI__builtin_elementwise_rint:
3174 case Builtin::BI__builtin_elementwise_nearbyint:
3175 case Builtin::BI__builtin_elementwise_sin:
3176 case Builtin::BI__builtin_elementwise_sqrt:
3177 case Builtin::BI__builtin_elementwise_tan:
3178 case Builtin::BI__builtin_elementwise_trunc:
3179 case Builtin::BI__builtin_elementwise_canonicalize: {
3180 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3181 return ExprError();
3182
3183 QualType ArgTy = TheCall->getArg(0)->getType();
3184 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
3185 ArgTy, 1))
3186 return ExprError();
3187 break;
3188 }
3189 case Builtin::BI__builtin_elementwise_fma: {
3190 if (BuiltinElementwiseTernaryMath(TheCall))
3191 return ExprError();
3192 break;
3193 }
3194
3195 // These builtins restrict the element type to floating point
3196 // types only, and take in two arguments.
3197 case Builtin::BI__builtin_elementwise_pow: {
3198 if (BuiltinElementwiseMath(TheCall))
3199 return ExprError();
3200
3201 QualType ArgTy = TheCall->getArg(0)->getType();
3202 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
3203 ArgTy, 1) ||
3204 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
3205 ArgTy, 2))
3206 return ExprError();
3207 break;
3208 }
3209
3210 // These builtins restrict the element type to integer
3211 // types only.
3212 case Builtin::BI__builtin_elementwise_add_sat:
3213 case Builtin::BI__builtin_elementwise_sub_sat: {
3214 if (BuiltinElementwiseMath(TheCall))
3215 return ExprError();
3216
3217 const Expr *Arg = TheCall->getArg(0);
3218 QualType ArgTy = Arg->getType();
3219 QualType EltTy = ArgTy;
3220
3221 if (auto *VecTy = EltTy->getAs<VectorType>())
3222 EltTy = VecTy->getElementType();
3223
3224 if (!EltTy->isIntegerType()) {
3225 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
3226 << 1 << /* integer ty */ 6 << ArgTy;
3227 return ExprError();
3228 }
3229 break;
3230 }
3231
3232 case Builtin::BI__builtin_elementwise_min:
3233 case Builtin::BI__builtin_elementwise_max:
3234 if (BuiltinElementwiseMath(TheCall))
3235 return ExprError();
3236 break;
3237
3238 case Builtin::BI__builtin_elementwise_bitreverse: {
3239 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3240 return ExprError();
3241
3242 const Expr *Arg = TheCall->getArg(0);
3243 QualType ArgTy = Arg->getType();
3244 QualType EltTy = ArgTy;
3245
3246 if (auto *VecTy = EltTy->getAs<VectorType>())
3247 EltTy = VecTy->getElementType();
3248
3249 if (!EltTy->isIntegerType()) {
3250 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
3251 << 1 << /* integer ty */ 6 << ArgTy;
3252 return ExprError();
3253 }
3254 break;
3255 }
3256
3257 case Builtin::BI__builtin_elementwise_copysign: {
3258 if (checkArgCount(TheCall, 2))
3259 return ExprError();
3260
3261 ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0));
3262 ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1));
3263 if (Magnitude.isInvalid() || Sign.isInvalid())
3264 return ExprError();
3265
3266 QualType MagnitudeTy = Magnitude.get()->getType();
3267 QualType SignTy = Sign.get()->getType();
3268 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
3269 MagnitudeTy, 1) ||
3270 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
3271 SignTy, 2)) {
3272 return ExprError();
3273 }
3274
3275 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) {
3276 return Diag(Sign.get()->getBeginLoc(),
3277 diag::err_typecheck_call_different_arg_types)
3278 << MagnitudeTy << SignTy;
3279 }
3280
3281 TheCall->setArg(0, Magnitude.get());
3282 TheCall->setArg(1, Sign.get());
3283 TheCall->setType(Magnitude.get()->getType());
3284 break;
3285 }
3286 case Builtin::BI__builtin_reduce_max:
3287 case Builtin::BI__builtin_reduce_min: {
3288 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
3289 return ExprError();
3290
3291 const Expr *Arg = TheCall->getArg(0);
3292 const auto *TyA = Arg->getType()->getAs<VectorType>();
3293
3294 QualType ElTy;
3295 if (TyA)
3296 ElTy = TyA->getElementType();
3297 else if (Arg->getType()->isSizelessVectorType())
3299
3300 if (ElTy.isNull()) {
3301 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
3302 << 1 << /* vector ty*/ 4 << Arg->getType();
3303 return ExprError();
3304 }
3305
3306 TheCall->setType(ElTy);
3307 break;
3308 }
3309
3310 // These builtins support vectors of integers only.
3311 // TODO: ADD/MUL should support floating-point types.
3312 case Builtin::BI__builtin_reduce_add:
3313 case Builtin::BI__builtin_reduce_mul:
3314 case Builtin::BI__builtin_reduce_xor:
3315 case Builtin::BI__builtin_reduce_or:
3316 case Builtin::BI__builtin_reduce_and: {
3317 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
3318 return ExprError();
3319
3320 const Expr *Arg = TheCall->getArg(0);
3321 const auto *TyA = Arg->getType()->getAs<VectorType>();
3322
3323 QualType ElTy;
3324 if (TyA)
3325 ElTy = TyA->getElementType();
3326 else if (Arg->getType()->isSizelessVectorType())
3328
3329 if (ElTy.isNull() || !ElTy->isIntegerType()) {
3330 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
3331 << 1 << /* vector of integers */ 6 << Arg->getType();
3332 return ExprError();
3333 }
3334
3335 TheCall->setType(ElTy);
3336 break;
3337 }
3338
3339 case Builtin::BI__builtin_matrix_transpose:
3340 return BuiltinMatrixTranspose(TheCall, TheCallResult);
3341
3342 case Builtin::BI__builtin_matrix_column_major_load:
3343 return BuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
3344
3345 case Builtin::BI__builtin_matrix_column_major_store:
3346 return BuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
3347
3348 case Builtin::BI__builtin_get_device_side_mangled_name: {
3349 auto Check = [](CallExpr *TheCall) {
3350 if (TheCall->getNumArgs() != 1)
3351 return false;
3352 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
3353 if (!DRE)
3354 return false;
3355 auto *D = DRE->getDecl();
3356 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
3357 return false;
3358 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
3359 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
3360 };
3361 if (!Check(TheCall)) {
3362 Diag(TheCall->getBeginLoc(),
3363 diag::err_hip_invalid_args_builtin_mangled_name);
3364 return ExprError();
3365 }
3366 break;
3367 }
3368 case Builtin::BI__builtin_popcountg:
3369 if (BuiltinPopcountg(*this, TheCall))
3370 return ExprError();
3371 break;
3372 case Builtin::BI__builtin_clzg:
3373 case Builtin::BI__builtin_ctzg:
3374 if (BuiltinCountZeroBitsGeneric(*this, TheCall))
3375 return ExprError();
3376 break;
3377
3378 case Builtin::BI__builtin_allow_runtime_check: {
3379 Expr *Arg = TheCall->getArg(0);
3380 // Check if the argument is a string literal.
3381 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
3382 Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3383 << Arg->getSourceRange();
3384 return ExprError();
3385 }
3386 break;
3387 }
3388 }
3389
3390 if (getLangOpts().HLSL && CheckHLSLBuiltinFunctionCall(BuiltinID, TheCall))
3391 return ExprError();
3392
3393 // Since the target specific builtins for each arch overlap, only check those
3394 // of the arch we are compiling for.
3395 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
3396 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
3397 assert(Context.getAuxTargetInfo() &&
3398 "Aux Target Builtin, but not an aux target?");
3399
3400 if (CheckTSBuiltinFunctionCall(
3402 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
3403 return ExprError();
3404 } else {
3405 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
3406 TheCall))
3407 return ExprError();
3408 }
3409 }
3410
3411 return TheCallResult;
3412}
3413
3414/// Returns true if the argument consists of one contiguous run of 1s with any
3415/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
3416/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
3417/// since all 1s are not contiguous.
3418bool Sema::ValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
3419 llvm::APSInt Result;
3420 // We can't check the value of a dependent argument.
3421 Expr *Arg = TheCall->getArg(ArgNum);
3422 if (Arg->isTypeDependent() || Arg->isValueDependent())
3423 return false;
3424
3425 // Check constant-ness first.
3426 if (BuiltinConstantArg(TheCall, ArgNum, Result))
3427 return true;
3428
3429 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
3430 if (Result.isShiftedMask() || (~Result).isShiftedMask())
3431 return false;
3432
3433 return Diag(TheCall->getBeginLoc(),
3434 diag::err_argument_not_contiguous_bit_field)
3435 << ArgNum << Arg->getSourceRange();
3436}
3437
3438// Helper function for CheckHLSLBuiltinFunctionCall
3440 assert(TheCall->getNumArgs() > 1);
3441 ExprResult A = TheCall->getArg(0);
3442
3443 QualType ArgTyA = A.get()->getType();
3444
3445 auto *VecTyA = ArgTyA->getAs<VectorType>();
3446 SourceLocation BuiltinLoc = TheCall->getBeginLoc();
3447
3448 for (unsigned i = 1; i < TheCall->getNumArgs(); ++i) {
3449 ExprResult B = TheCall->getArg(i);
3450 QualType ArgTyB = B.get()->getType();
3451 auto *VecTyB = ArgTyB->getAs<VectorType>();
3452 if (VecTyA == nullptr && VecTyB == nullptr)
3453 return false;
3454
3455 if (VecTyA && VecTyB) {
3456 bool retValue = false;
3457 if (VecTyA->getElementType() != VecTyB->getElementType()) {
3458 // Note: type promotion is intended to be handeled via the intrinsics
3459 // and not the builtin itself.
3460 S->Diag(TheCall->getBeginLoc(),
3461 diag::err_vec_builtin_incompatible_vector)
3462 << TheCall->getDirectCallee() << /*useAllTerminology*/ true
3463 << SourceRange(A.get()->getBeginLoc(), B.get()->getEndLoc());
3464 retValue = true;
3465 }
3466 if (VecTyA->getNumElements() != VecTyB->getNumElements()) {
3467 // You should only be hitting this case if you are calling the builtin
3468 // directly. HLSL intrinsics should avoid this case via a
3469 // HLSLVectorTruncation.
3470 S->Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
3471 << TheCall->getDirectCallee() << /*useAllTerminology*/ true
3472 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
3473 TheCall->getArg(1)->getEndLoc());
3474 retValue = true;
3475 }
3476 return retValue;
3477 }
3478 }
3479
3480 // Note: if we get here one of the args is a scalar which
3481 // requires a VectorSplat on Arg0 or Arg1
3482 S->Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
3483 << TheCall->getDirectCallee() << /*useAllTerminology*/ true
3484 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
3485 TheCall->getArg(1)->getEndLoc());
3486 return true;
3487}
3488
3490 Sema *S, CallExpr *TheCall, QualType ExpectedType,
3491 llvm::function_ref<bool(clang::QualType PassedType)> Check) {
3492 for (unsigned i = 0; i < TheCall->getNumArgs(); ++i) {
3493 QualType PassedType = TheCall->getArg(i)->getType();
3494 if (Check(PassedType)) {
3495 if (auto *VecTyA = PassedType->getAs<VectorType>())
3497 ExpectedType, VecTyA->getNumElements(), VecTyA->getVectorKind());
3498 S->Diag(TheCall->getArg(0)->getBeginLoc(),
3499 diag::err_typecheck_convert_incompatible)
3500 << PassedType << ExpectedType << 1 << 0 << 0;
3501 return true;
3502 }
3503 }
3504 return false;
3505}
3506
3508 auto checkAllFloatTypes = [](clang::QualType PassedType) -> bool {
3509 return !PassedType->hasFloatingRepresentation();
3510 };
3511 return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
3512 checkAllFloatTypes);
3513}
3514
3516 auto checkFloatorHalf = [](clang::QualType PassedType) -> bool {
3517 clang::QualType BaseType =
3518 PassedType->isVectorType()
3519 ? PassedType->getAs<clang::VectorType>()->getElementType()
3520 : PassedType;
3521 return !BaseType->isHalfType() && !BaseType->isFloat32Type();
3522 };
3523 return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
3524 checkFloatorHalf);
3525}
3526
3528 auto checkDoubleVector = [](clang::QualType PassedType) -> bool {
3529 if (const auto *VecTy = PassedType->getAs<VectorType>())
3530 return VecTy->getElementType()->isDoubleType();
3531 return false;
3532 };
3533 return CheckArgsTypesAreCorrect(S, TheCall, S->Context.FloatTy,
3534 checkDoubleVector);
3535}
3536
3538 auto checkAllUnsignedTypes = [](clang::QualType PassedType) -> bool {
3539 return !PassedType->hasUnsignedIntegerRepresentation();
3540 };
3541 return CheckArgsTypesAreCorrect(S, TheCall, S->Context.UnsignedIntTy,
3542 checkAllUnsignedTypes);
3543}
3544
3546 QualType ReturnType) {
3547 auto *VecTyA = TheCall->getArg(0)->getType()->getAs<VectorType>();
3548 if (VecTyA)
3549 ReturnType = S->Context.getVectorType(ReturnType, VecTyA->getNumElements(),
3551 TheCall->setType(ReturnType);
3552}
3553
3554// Note: returning true in this case results in CheckBuiltinFunctionCall
3555// returning an ExprError
3556bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3557 switch (BuiltinID) {
3558 case Builtin::BI__builtin_hlsl_elementwise_all:
3559 case Builtin::BI__builtin_hlsl_elementwise_any: {
3560 if (checkArgCount(TheCall, 1))
3561 return true;
3562 break;
3563 }
3564 case Builtin::BI__builtin_hlsl_elementwise_clamp: {
3565 if (checkArgCount(TheCall, 3))
3566 return true;
3567 if (CheckVectorElementCallArgs(this, TheCall))
3568 return true;
3569 if (BuiltinElementwiseTernaryMath(
3570 TheCall, /*CheckForFloatArgs*/
3571 TheCall->getArg(0)->getType()->hasFloatingRepresentation()))
3572 return true;
3573 break;
3574 }
3575 case Builtin::BI__builtin_hlsl_dot: {
3576 if (checkArgCount(TheCall, 2))
3577 return true;
3578 if (CheckVectorElementCallArgs(this, TheCall))
3579 return true;
3580 if (BuiltinVectorToScalarMath(TheCall))
3581 return true;
3582 if (CheckNoDoubleVectors(this, TheCall))
3583 return true;
3584 break;
3585 }
3586 case Builtin::BI__builtin_hlsl_elementwise_rcp: {
3587 if (CheckAllArgsHaveFloatRepresentation(this, TheCall))
3588 return true;
3589 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3590 return true;
3591 break;
3592 }
3593 case Builtin::BI__builtin_hlsl_elementwise_rsqrt:
3594 case Builtin::BI__builtin_hlsl_elementwise_frac: {
3595 if (CheckFloatOrHalfRepresentations(this, TheCall))
3596 return true;
3597 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3598 return true;
3599 break;
3600 }
3601 case Builtin::BI__builtin_hlsl_elementwise_isinf: {
3602 if (CheckFloatOrHalfRepresentations(this, TheCall))
3603 return true;
3604 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
3605 return true;
3606 SetElementTypeAsReturnType(this, TheCall, this->Context.BoolTy);
3607 break;
3608 }
3609 case Builtin::BI__builtin_hlsl_lerp: {
3610 if (checkArgCount(TheCall, 3))
3611 return true;
3612 if (CheckVectorElementCallArgs(this, TheCall))
3613 return true;
3614 if (BuiltinElementwiseTernaryMath(TheCall))
3615 return true;
3616 if (CheckFloatOrHalfRepresentations(this, TheCall))
3617 return true;
3618 break;
3619 }
3620 case Builtin::BI__builtin_hlsl_mad: {
3621 if (checkArgCount(TheCall, 3))
3622 return true;
3623 if (CheckVectorElementCallArgs(this, TheCall))
3624 return true;
3625 if (BuiltinElementwiseTernaryMath(
3626 TheCall, /*CheckForFloatArgs*/
3627 TheCall->getArg(0)->getType()->hasFloatingRepresentation()))
3628 return true;
3629 break;
3630 }
3631 // Note these are llvm builtins that we want to catch invalid intrinsic
3632 // generation. Normal handling of these builitns will occur elsewhere.
3633 case Builtin::BI__builtin_elementwise_bitreverse: {
3634 if (CheckUnsignedIntRepresentation(this, TheCall))
3635 return true;
3636 break;
3637 }
3638 case Builtin::BI__builtin_elementwise_ceil:
3639 case Builtin::BI__builtin_elementwise_cos:
3640 case Builtin::BI__builtin_elementwise_exp:
3641 case Builtin::BI__builtin_elementwise_exp2:
3642 case Builtin::BI__builtin_elementwise_floor:
3643 case Builtin::BI__builtin_elementwise_log:
3644 case Builtin::BI__builtin_elementwise_log2:
3645 case Builtin::BI__builtin_elementwise_log10:
3646 case Builtin::BI__builtin_elementwise_pow:
3647 case Builtin::BI__builtin_elementwise_roundeven:
3648 case Builtin::BI__builtin_elementwise_sin:
3649 case Builtin::BI__builtin_elementwise_sqrt:
3650 case Builtin::BI__builtin_elementwise_tan:
3651 case Builtin::BI__builtin_elementwise_trunc: {
3652 if (CheckFloatOrHalfRepresentations(this, TheCall))
3653 return true;
3654 break;
3655 }
3656 }
3657 return false;
3658}
3659
3660/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
3661/// parameter with the FormatAttr's correct format_idx and firstDataArg.
3662/// Returns true when the format fits the function and the FormatStringInfo has
3663/// been populated.
3664bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
3665 bool IsVariadic, FormatStringInfo *FSI) {
3666 if (Format->getFirstArg() == 0)
3668 else if (IsVariadic)
3670 else
3672 FSI->FormatIdx = Format->getFormatIdx() - 1;
3673 FSI->FirstDataArg =
3674 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1;
3675
3676 // The way the format attribute works in GCC, the implicit this argument
3677 // of member functions is counted. However, it doesn't appear in our own
3678 // lists, so decrement format_idx in that case.
3679 if (IsCXXMember) {
3680 if(FSI->FormatIdx == 0)
3681 return false;
3682 --FSI->FormatIdx;
3683 if (FSI->FirstDataArg != 0)
3684 --FSI->FirstDataArg;
3685 }
3686 return true;
3687}
3688
3689/// Checks if a the given expression evaluates to null.
3690///
3691/// Returns true if the value evaluates to null.
3692static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
3693 // Treat (smart) pointers constructed from nullptr as null, whether we can
3694 // const-evaluate them or not.
3695 // This must happen first: the smart pointer expr might have _Nonnull type!
3696 if (isa<CXXNullPtrLiteralExpr>(
3699 return true;
3700
3701 // If the expression has non-null type, it doesn't evaluate to null.
3702 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) {
3703 if (*nullability == NullabilityKind::NonNull)
3704 return false;
3705 }
3706
3707 // As a special case, transparent unions initialized with zero are
3708 // considered null for the purposes of the nonnull attribute.
3709 if (const RecordType *UT = Expr->getType()->getAsUnionType();
3710 UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
3711 if (const auto *CLE = dyn_cast<CompoundLiteralExpr>(Expr))
3712 if (const auto *ILE = dyn_cast<InitListExpr>(CLE->getInitializer()))
3713 Expr = ILE->getInit(0);
3714 }
3715
3716 bool Result;
3717 return (!Expr->isValueDependent() &&
3719 !Result);
3720}
3721
3723 const Expr *ArgExpr,
3724 SourceLocation CallSiteLoc) {
3725 if (CheckNonNullExpr(S, ArgExpr))
3726 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
3727 S.PDiag(diag::warn_null_arg)
3728 << ArgExpr->getSourceRange());
3729}
3730
3731bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
3732 FormatStringInfo FSI;
3733 if ((GetFormatStringType(Format) == FST_NSString) &&
3734 getFormatStringInfo(Format, false, true, &FSI)) {
3735 Idx = FSI.FormatIdx;
3736 return true;
3737 }
3738 return false;
3739}
3740
3741/// Diagnose use of %s directive in an NSString which is being passed
3742/// as formatting string to formatting method.
3743static void
3745 const NamedDecl *FDecl,
3746 Expr **Args,
3747 unsigned NumArgs) {
3748 unsigned Idx = 0;
3749 bool Format = false;
3751 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
3752 Idx = 2;
3753 Format = true;
3754 }
3755 else
3756 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
3757 if (S.GetFormatNSStringIdx(I, Idx)) {
3758 Format = true;
3759 break;
3760 }
3761 }
3762 if (!Format || NumArgs <= Idx)
3763 return;
3764 const Expr *FormatExpr = Args[Idx];
3765 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
3766 FormatExpr = CSCE->getSubExpr();
3767 const StringLiteral *FormatString;
3768 if (const ObjCStringLiteral *OSL =
3769 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
3770 FormatString = OSL->getString();
3771 else
3772 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
3773 if (!FormatString)
3774 return;
3775 if (S.FormatStringHasSArg(FormatString)) {
3776 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
3777 << "%s" << 1 << 1;
3778 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
3779 << FDecl->getDeclName();
3780 }
3781}
3782
3783/// Determine whether the given type has a non-null nullability annotation.
3785 if (auto nullability = type->getNullability())
3786 return *nullability == NullabilityKind::NonNull;
3787
3788 return false;
3789}
3790
3792 const NamedDecl *FDecl,
3793 const FunctionProtoType *Proto,
3795 SourceLocation CallSiteLoc) {
3796 assert((FDecl || Proto) && "Need a function declaration or prototype");
3797
3798 // Already checked by constant evaluator.
3800 return;
3801 // Check the attributes attached to the method/function itself.
3802 llvm::SmallBitVector NonNullArgs;
3803 if (FDecl) {
3804 // Handle the nonnull attribute on the function/method declaration itself.
3805 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
3806 if (!NonNull->args_size()) {
3807 // Easy case: all pointer arguments are nonnull.
3808 for (const auto *Arg : Args)
3809 if (S.isValidPointerAttrType(Arg->getType()))
3810 CheckNonNullArgument(S, Arg, CallSiteLoc);
3811 return;
3812 }
3813
3814 for (const ParamIdx &Idx : NonNull->args()) {
3815 unsigned IdxAST = Idx.getASTIndex();
3816 if (IdxAST >= Args.size())
3817 continue;
3818 if (NonNullArgs.empty())
3819 NonNullArgs.resize(Args.size());
3820 NonNullArgs.set(IdxAST);
3821 }
3822 }
3823 }
3824
3825 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
3826 // Handle the nonnull attribute on the parameters of the
3827 // function/method.
3829 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
3830 parms = FD->parameters();
3831 else
3832 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
3833
3834 unsigned ParamIndex = 0;
3835 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
3836 I != E; ++I, ++ParamIndex) {
3837 const ParmVarDecl *PVD = *I;
3838 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) {
3839 if (NonNullArgs.empty())
3840 NonNullArgs.resize(Args.size());
3841
3842 NonNullArgs.set(ParamIndex);
3843 }
3844 }
3845 } else {
3846 // If we have a non-function, non-method declaration but no
3847 // function prototype, try to dig out the function prototype.
3848 if (!Proto) {
3849 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
3850 QualType type = VD->getType().getNonReferenceType();
3851 if (auto pointerType = type->getAs<PointerType>())
3852 type = pointerType->getPointeeType();
3853 else if (auto blockType = type->getAs<BlockPointerType>())
3854 type = blockType->getPointeeType();
3855 // FIXME: data member pointers?
3856
3857 // Dig out the function prototype, if there is one.
3858 Proto = type->getAs<FunctionProtoType>();
3859 }
3860 }
3861
3862 // Fill in non-null argument information from the nullability
3863 // information on the parameter types (if we have them).
3864 if (Proto) {
3865 unsigned Index = 0;
3866 for (auto paramType : Proto->getParamTypes()) {
3867 if (isNonNullType(paramType)) {
3868 if (NonNullArgs.empty())
3869 NonNullArgs.resize(Args.size());
3870
3871 NonNullArgs.set(Index);
3872 }
3873
3874 ++Index;
3875 }
3876 }
3877 }
3878
3879 // Check for non-null arguments.
3880 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
3881 ArgIndex != ArgIndexEnd; ++ArgIndex) {
3882 if (NonNullArgs[ArgIndex])
3883 CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc());
3884 }
3885}
3886
3887/// Warn if a pointer or reference argument passed to a function points to an
3888/// object that is less aligned than the parameter. This can happen when
3889/// creating a typedef with a lower alignment than the original type and then
3890/// calling functions defined in terms of the original type.
3891void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
3892 StringRef ParamName, QualType ArgTy,
3893 QualType ParamTy) {
3894
3895 // If a function accepts a pointer or reference type
3896 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
3897 return;
3898
3899 // If the parameter is a pointer type, get the pointee type for the
3900 // argument too. If the parameter is a reference type, don't try to get
3901 // the pointee type for the argument.
3902 if (ParamTy->isPointerType())
3903 ArgTy = ArgTy->getPointeeType();
3904
3905 // Remove reference or pointer
3906 ParamTy = ParamTy->getPointeeType();
3907
3908 // Find expected alignment, and the actual alignment of the passed object.
3909 // getTypeAlignInChars requires complete types
3910 if (ArgTy.isNull() || ParamTy->isDependentType() ||
3911 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() ||
3912 ParamTy->isUndeducedType() || ArgTy->isUndeducedType())
3913 return;
3914
3915 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
3916 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
3917
3918 // If the argument is less aligned than the parameter, there is a
3919 // potential alignment issue.
3920 if (ArgAlign < ParamAlign)
3921 Diag(Loc, diag::warn_param_mismatched_alignment)
3922 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
3923 << ParamName << (FDecl != nullptr) << FDecl;
3924}
3925
3926/// Handles the checks for format strings, non-POD arguments to vararg
3927/// functions, NULL arguments passed to non-NULL parameters, diagnose_if
3928/// attributes and AArch64 SME attributes.
3930 const Expr *ThisArg, ArrayRef<const Expr *> Args,
3931 bool IsMemberFunction, SourceLocation Loc,
3933 // FIXME: We should check as much as we can in the template definition.
3935 return;
3936
3937 // Printf and scanf checking.
3938 llvm::SmallBitVector CheckedVarArgs;
3939 if (FDecl) {
3940 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
3941 // Only create vector if there are format attributes.
3942 CheckedVarArgs.resize(Args.size());
3943
3944 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
3945 CheckedVarArgs);
3946 }
3947 }
3948
3949 // Refuse POD arguments that weren't caught by the format string
3950 // checks above.
3951 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
3952 if (CallType != VariadicDoesNotApply &&
3953 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
3954 unsigned NumParams = Proto ? Proto->getNumParams()
3955 : isa_and_nonnull<FunctionDecl>(FDecl)
3956 ? cast<FunctionDecl>(FDecl)->getNumParams()
3957 : isa_and_nonnull<ObjCMethodDecl>(FDecl)
3958 ? cast<ObjCMethodDecl>(FDecl)->param_size()
3959 : 0;
3960
3961 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
3962 // Args[ArgIdx] can be null in malformed code.
3963 if (const Expr *Arg = Args[ArgIdx]) {
3964 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
3965 checkVariadicArgument(Arg, CallType);
3966 }
3967 }
3968 }
3969
3970 if (FDecl || Proto) {
3971 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
3972
3973 // Type safety checking.
3974 if (FDecl) {
3975 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
3976 CheckArgumentWithTypeTag(I, Args, Loc);
3977 }
3978 }
3979
3980 // Check that passed arguments match the alignment of original arguments.
3981 // Try to get the missing prototype from the declaration.
3982 if (!Proto && FDecl) {
3983 const auto *FT = FDecl->getFunctionType();
3984 if (isa_and_nonnull<FunctionProtoType>(FT))
3985 Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
3986 }
3987 if (Proto) {
3988 // For variadic functions, we may have more args than parameters.
3989 // For some K&R functions, we may have less args than parameters.
3990 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
3991 bool IsScalableRet = Proto->getReturnType()->isSizelessVectorType();
3992 bool IsScalableArg = false;
3993 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
3994 // Args[ArgIdx] can be null in malformed code.
3995 if (const Expr *Arg = Args[ArgIdx]) {
3996 if (Arg->containsErrors())
3997 continue;
3998
3999 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg &&
4000 FDecl->hasLinkage() &&
4001 FDecl->getFormalLinkage() != Linkage::Internal &&
4002 CallType == VariadicDoesNotApply)
4003 PPC().checkAIXMemberAlignment((Arg->getExprLoc()), Arg);
4004
4005 QualType ParamTy = Proto->getParamType(ArgIdx);
4006 if (ParamTy->isSizelessVectorType())
4007 IsScalableArg = true;
4008 QualType ArgTy = Arg->getType();
4009 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
4010 ArgTy, ParamTy);
4011 }
4012 }
4013
4014 // If the callee has an AArch64 SME attribute to indicate that it is an
4015 // __arm_streaming function, then the caller requires SME to be available.
4018 if (auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) {
4019 llvm::StringMap<bool> CallerFeatureMap;
4020 Context.getFunctionFeatureMap(CallerFeatureMap, CallerFD);
4021 if (!CallerFeatureMap.contains("sme"))
4022 Diag(Loc, diag::err_sme_call_in_non_sme_target);
4023 } else if (!Context.getTargetInfo().hasFeature("sme")) {
4024 Diag(Loc, diag::err_sme_call_in_non_sme_target);
4025 }
4026 }
4027
4028 // If the call requires a streaming-mode change and has scalable vector
4029 // arguments or return values, then warn the user that the streaming and
4030 // non-streaming vector lengths may be different.
4031 const auto *CallerFD = dyn_cast<FunctionDecl>(CurContext);
4032 if (CallerFD && (!FD || !FD->getBuiltinID()) &&
4033 (IsScalableArg || IsScalableRet)) {
4034 bool IsCalleeStreaming =
4036 bool IsCalleeStreamingCompatible =
4037 ExtInfo.AArch64SMEAttributes &
4039 SemaARM::ArmStreamingType CallerFnType = getArmStreamingFnType(CallerFD);
4040 if (!IsCalleeStreamingCompatible &&
4041 (CallerFnType == SemaARM::ArmStreamingCompatible ||
4042 ((CallerFnType == SemaARM::ArmStreaming) ^ IsCalleeStreaming))) {
4043 if (IsScalableArg)
4044 Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
4045 << /*IsArg=*/true;
4046 if (IsScalableRet)
4047 Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
4048 << /*IsArg=*/false;
4049 }
4050 }
4051
4052 FunctionType::ArmStateValue CalleeArmZAState =
4054 FunctionType::ArmStateValue CalleeArmZT0State =
4056 if (CalleeArmZAState != FunctionType::ARM_None ||
4057 CalleeArmZT0State != FunctionType::ARM_None) {
4058 bool CallerHasZAState = false;
4059 bool CallerHasZT0State = false;
4060 if (CallerFD) {
4061 auto *Attr = CallerFD->getAttr<ArmNewAttr>();
4062 if (Attr && Attr->isNewZA())
4063 CallerHasZAState = true;
4064 if (Attr && Attr->isNewZT0())
4065 CallerHasZT0State = true;
4066 if (const auto *FPT = CallerFD->getType()->getAs<FunctionProtoType>()) {
4067 CallerHasZAState |=
4069 FPT->getExtProtoInfo().AArch64SMEAttributes) !=
4071 CallerHasZT0State |=
4073 FPT->getExtProtoInfo().AArch64SMEAttributes) !=
4075 }
4076 }
4077
4078 if (CalleeArmZAState != FunctionType::ARM_None && !CallerHasZAState)
4079 Diag(Loc, diag::err_sme_za_call_no_za_state);
4080
4081 if (CalleeArmZT0State != FunctionType::ARM_None && !CallerHasZT0State)
4082 Diag(Loc, diag::err_sme_zt0_call_no_zt0_state);
4083
4084 if (CallerHasZAState && CalleeArmZAState == FunctionType::ARM_None &&
4085 CalleeArmZT0State != FunctionType::ARM_None) {
4086 Diag(Loc, diag::err_sme_unimplemented_za_save_restore);
4087 Diag(Loc, diag::note_sme_use_preserves_za);
4088 }
4089 }
4090 }
4091
4092 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
4093 auto *AA = FDecl->getAttr<AllocAlignAttr>();
4094 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
4095 if (!Arg->isValueDependent()) {
4096 Expr::EvalResult Align;
4097 if (Arg->EvaluateAsInt(Align, Context)) {
4098 const llvm::APSInt &I = Align.Val.getInt();
4099 if (!I.isPowerOf2())
4100 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
4101 << Arg->getSourceRange();
4102
4103 if (I > Sema::MaximumAlignment)
4104 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
4105 << Arg->getSourceRange() << Sema::MaximumAlignment;
4106 }
4107 }
4108 }
4109
4110 if (FD)
4111 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
4112}
4113
4115 if (ConceptDecl *Decl = AutoT->getTypeConstraintConcept()) {
4117 }
4118}
4119
4120/// CheckConstructorCall - Check a constructor call for correctness and safety
4121/// properties not enforced by the C type system.
4122void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
4124 const FunctionProtoType *Proto,
4126 VariadicCallType CallType =
4128
4129 auto *Ctor = cast<CXXConstructorDecl>(FDecl);
4130 CheckArgAlignment(
4131 Loc, FDecl, "'this'", Context.getPointerType(ThisType),
4132 Context.getPointerType(Ctor->getFunctionObjectParameterType()));
4133
4134 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
4135 Loc, SourceRange(), CallType);
4136}
4137
4138/// CheckFunctionCall - Check a direct function call for various correctness
4139/// and safety properties not strictly enforced by the C type system.
4141 const FunctionProtoType *Proto) {
4142 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
4143 isa<CXXMethodDecl>(FDecl);
4144 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
4145 IsMemberOperatorCall;
4146 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
4147 TheCall->getCallee());
4148 Expr** Args = TheCall->getArgs();
4149 unsigned NumArgs = TheCall->getNumArgs();
4150
4151 Expr *ImplicitThis = nullptr;
4152 if (IsMemberOperatorCall && !FDecl->hasCXXExplicitFunctionObjectParameter()) {
4153 // If this is a call to a member operator, hide the first
4154 // argument from checkCall.
4155 // FIXME: Our choice of AST representation here is less than ideal.
4156 ImplicitThis = Args[0];
4157 ++Args;
4158 --NumArgs;
4159 } else if (IsMemberFunction && !FDecl->isStatic() &&
4161 ImplicitThis =
4162 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
4163
4164 if (ImplicitThis) {
4165 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
4166 // used.
4167 QualType ThisType = ImplicitThis->getType();
4168 if (!ThisType->isPointerType()) {
4169 assert(!ThisType->isReferenceType());
4170 ThisType = Context.getPointerType(ThisType);
4171 }
4172
4173 QualType ThisTypeFromDecl = Context.getPointerType(
4174 cast<CXXMethodDecl>(FDecl)->getFunctionObjectParameterType());
4175
4176 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
4177 ThisTypeFromDecl);
4178 }
4179
4180 checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs),
4181 IsMemberFunction, TheCall->getRParenLoc(),
4182 TheCall->getCallee()->getSourceRange(), CallType);
4183
4184 IdentifierInfo *FnInfo = FDecl->getIdentifier();
4185 // None of the checks below are needed for functions that don't have
4186 // simple names (e.g., C++ conversion functions).
4187 if (!FnInfo)
4188 return false;
4189
4190 // Enforce TCB except for builtin calls, which are always allowed.
4191 if (FDecl->getBuiltinID() == 0)
4192 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl);
4193
4194 CheckAbsoluteValueFunction(TheCall, FDecl);
4195 CheckMaxUnsignedZero(TheCall, FDecl);
4196 CheckInfNaNFunction(TheCall, FDecl);
4197
4198 if (getLangOpts().ObjC)
4199 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
4200
4201 unsigned CMId = FDecl->getMemoryFunctionKind();
4202
4203 // Handle memory setting and copying functions.
4204 switch (CMId) {
4205 case 0:
4206 return false;
4207 case Builtin::BIstrlcpy: // fallthrough
4208 case Builtin::BIstrlcat:
4209 CheckStrlcpycatArguments(TheCall, FnInfo);
4210 break;
4211 case Builtin::BIstrncat:
4212 CheckStrncatArguments(TheCall, FnInfo);
4213 break;
4214 case Builtin::BIfree:
4215 CheckFreeArguments(TheCall);
4216 break;
4217 default:
4218 CheckMemaccessArguments(TheCall, CMId, FnInfo);
4219 }
4220
4221 return false;
4222}
4223
4224bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
4225 const FunctionProtoType *Proto) {
4226 QualType Ty;
4227 if (const auto *V = dyn_cast<VarDecl>(NDecl))
4228 Ty = V->getType().getNonReferenceType();
4229 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
4230 Ty = F->getType().getNonReferenceType();
4231 else
4232 return false;
4233
4234 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
4235 !Ty->isFunctionProtoType())
4236 return false;
4237
4238 VariadicCallType CallType;
4239 if (!Proto || !Proto->isVariadic()) {
4240 CallType = VariadicDoesNotApply;
4241 } else if (Ty->isBlockPointerType()) {
4242 CallType = VariadicBlock;
4243 } else { // Ty->isFunctionPointerType()
4244 CallType = VariadicFunction;
4245 }
4246
4247 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
4248 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
4249 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
4250 TheCall->getCallee()->getSourceRange(), CallType);
4251
4252 return false;
4253}
4254
4255/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
4256/// such as function pointers returned from functions.
4257bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
4258 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
4259 TheCall->getCallee());
4260 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
4261 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
4262 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
4263 TheCall->getCallee()->getSourceRange(), CallType);
4264
4265 return false;
4266}
4267
4268static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
4269 if (!llvm::isValidAtomicOrderingCABI(Ordering))
4270 return false;
4271
4272 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
4273 switch (Op) {
4274 case AtomicExpr::AO__c11_atomic_init:
4275 case AtomicExpr::AO__opencl_atomic_init:
4276 llvm_unreachable("There is no ordering argument for an init");
4277
4278 case AtomicExpr::AO__c11_atomic_load:
4279 case AtomicExpr::AO__opencl_atomic_load:
4280 case AtomicExpr::AO__hip_atomic_load:
4281 case AtomicExpr::AO__atomic_load_n:
4282 case AtomicExpr::AO__atomic_load:
4283 case AtomicExpr::AO__scoped_atomic_load_n:
4284 case AtomicExpr::AO__scoped_atomic_load:
4285 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
4286 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
4287
4288 case AtomicExpr::AO__c11_atomic_store:
4289 case AtomicExpr::AO__opencl_atomic_store:
4290 case AtomicExpr::AO__hip_atomic_store:
4291 case AtomicExpr::AO__atomic_store:
4292 case AtomicExpr::AO__atomic_store_n:
4293 case AtomicExpr::AO__scoped_atomic_store:
4294 case AtomicExpr::AO__scoped_atomic_store_n:
4295 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
4296 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
4297 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
4298
4299 default:
4300 return true;
4301 }
4302}
4303
4304ExprResult Sema::AtomicOpsOverloaded(ExprResult TheCallResult,
4306 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
4307 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
4308 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
4309 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()},
4310 DRE->getSourceRange(), TheCall->getRParenLoc(), Args,
4311 Op);
4312}
4313
4315 SourceLocation RParenLoc, MultiExprArg Args,
4317 AtomicArgumentOrder ArgOrder) {
4318 // All the non-OpenCL operations take one of the following forms.
4319 // The OpenCL operations take the __c11 forms with one extra argument for
4320 // synchronization scope.
4321 enum {
4322 // C __c11_atomic_init(A *, C)
4323 Init,
4324
4325 // C __c11_atomic_load(A *, int)
4326 Load,
4327
4328 // void __atomic_load(A *, CP, int)
4329 LoadCopy,
4330
4331 // void __atomic_store(A *, CP, int)
4332 Copy,
4333
4334 // C __c11_atomic_add(A *, M, int)
4335 Arithmetic,
4336
4337 // C __atomic_exchange_n(A *, CP, int)
4338 Xchg,
4339
4340 // void __atomic_exchange(A *, C *, CP, int)
4341 GNUXchg,
4342
4343 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
4344 C11CmpXchg,
4345
4346 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
4347 GNUCmpXchg
4348 } Form = Init;
4349
4350 const unsigned NumForm = GNUCmpXchg + 1;
4351 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
4352 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
4353 // where:
4354 // C is an appropriate type,
4355 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
4356 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
4357 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
4358 // the int parameters are for orderings.
4359
4360 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
4361 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
4362 "need to update code for modified forms");
4363 static_assert(AtomicExpr::AO__atomic_add_fetch == 0 &&
4364 AtomicExpr::AO__atomic_xor_fetch + 1 ==
4365 AtomicExpr::AO__c11_atomic_compare_exchange_strong,
4366 "need to update code for modified C11 atomics");
4367 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_compare_exchange_strong &&
4368 Op <= AtomicExpr::AO__opencl_atomic_store;
4369 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_compare_exchange_strong &&
4370 Op <= AtomicExpr::AO__hip_atomic_store;
4371 bool IsScoped = Op >= AtomicExpr::AO__scoped_atomic_add_fetch &&
4372 Op <= AtomicExpr::AO__scoped_atomic_xor_fetch;
4373 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_compare_exchange_strong &&
4374 Op <= AtomicExpr::AO__c11_atomic_store) ||
4375 IsOpenCL;
4376 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
4377 Op == AtomicExpr::AO__atomic_store_n ||
4378 Op == AtomicExpr::AO__atomic_exchange_n ||
4379 Op == AtomicExpr::AO__atomic_compare_exchange_n ||
4380 Op == AtomicExpr::AO__scoped_atomic_load_n ||
4381 Op == AtomicExpr::AO__scoped_atomic_store_n ||
4382 Op == AtomicExpr::AO__scoped_atomic_exchange_n ||
4383 Op == AtomicExpr::AO__scoped_atomic_compare_exchange_n;
4384 // Bit mask for extra allowed value types other than integers for atomic
4385 // arithmetic operations. Add/sub allow pointer and floating point. Min/max
4386 // allow floating point.
4387 enum ArithOpExtraValueType {
4388 AOEVT_None = 0,
4389 AOEVT_Pointer = 1,
4390 AOEVT_FP = 2,
4391 };
4392 unsigned ArithAllows = AOEVT_None;
4393
4394 switch (Op) {
4395 case AtomicExpr::AO__c11_atomic_init:
4396 case AtomicExpr::AO__opencl_atomic_init:
4397 Form = Init;
4398 break;
4399
4400 case AtomicExpr::AO__c11_atomic_load:
4401 case AtomicExpr::AO__opencl_atomic_load:
4402 case AtomicExpr::AO__hip_atomic_load:
4403 case AtomicExpr::AO__atomic_load_n:
4404 case AtomicExpr::AO__scoped_atomic_load_n:
4405 Form = Load;
4406 break;
4407
4408 case AtomicExpr::AO__atomic_load:
4409 case AtomicExpr::AO__scoped_atomic_load:
4410 Form = LoadCopy;
4411 break;
4412
4413 case AtomicExpr::AO__c11_atomic_store:
4414 case AtomicExpr::AO__opencl_atomic_store:
4415 case AtomicExpr::AO__hip_atomic_store:
4416 case AtomicExpr::AO__atomic_store:
4417 case AtomicExpr::AO__atomic_store_n:
4418 case AtomicExpr::AO__scoped_atomic_store:
4419 case AtomicExpr::AO__scoped_atomic_store_n:
4420 Form = Copy;
4421 break;
4422 case AtomicExpr::AO__atomic_fetch_add:
4423 case AtomicExpr::AO__atomic_fetch_sub:
4424 case AtomicExpr::AO__atomic_add_fetch:
4425 case AtomicExpr::AO__atomic_sub_fetch:
4426 case AtomicExpr::AO__scoped_atomic_fetch_add:
4427 case AtomicExpr::AO__scoped_atomic_fetch_sub:
4428 case AtomicExpr::AO__scoped_atomic_add_fetch:
4429 case AtomicExpr::AO__scoped_atomic_sub_fetch:
4430 case AtomicExpr::AO__c11_atomic_fetch_add:
4431 case AtomicExpr::AO__c11_atomic_fetch_sub:
4432 case AtomicExpr::AO__opencl_atomic_fetch_add:
4433 case AtomicExpr::AO__opencl_atomic_fetch_sub:
4434 case AtomicExpr::AO__hip_atomic_fetch_add:
4435 case AtomicExpr::AO__hip_atomic_fetch_sub:
4436 ArithAllows = AOEVT_Pointer | AOEVT_FP;
4437 Form = Arithmetic;
4438 break;
4439 case AtomicExpr::AO__atomic_fetch_max:
4440 case AtomicExpr::AO__atomic_fetch_min:
4441 case AtomicExpr::AO__atomic_max_fetch:
4442 case AtomicExpr::AO__atomic_min_fetch:
4443 case AtomicExpr::AO__scoped_atomic_fetch_max:
4444 case AtomicExpr::AO__scoped_atomic_fetch_min:
4445 case AtomicExpr::AO__scoped_atomic_max_fetch:
4446 case AtomicExpr::AO__scoped_atomic_min_fetch:
4447 case AtomicExpr::AO__c11_atomic_fetch_max:
4448 case AtomicExpr::AO__c11_atomic_fetch_min:
4449 case AtomicExpr::AO__opencl_atomic_fetch_max:
4450 case AtomicExpr::AO__opencl_atomic_fetch_min:
4451 case AtomicExpr::AO__hip_atomic_fetch_max:
4452 case AtomicExpr::AO__hip_atomic_fetch_min:
4453 ArithAllows = AOEVT_FP;
4454 Form = Arithmetic;
4455 break;
4456 case AtomicExpr::AO__c11_atomic_fetch_and:
4457 case AtomicExpr::AO__c11_atomic_fetch_or:
4458 case AtomicExpr::AO__c11_atomic_fetch_xor:
4459 case AtomicExpr::AO__hip_atomic_fetch_and:
4460 case AtomicExpr::AO__hip_atomic_fetch_or:
4461 case AtomicExpr::AO__hip_atomic_fetch_xor:
4462 case AtomicExpr::AO__c11_atomic_fetch_nand:
4463 case AtomicExpr::AO__opencl_atomic_fetch_and:
4464 case AtomicExpr::AO__opencl_atomic_fetch_or:
4465 case AtomicExpr::AO__opencl_atomic_fetch_xor:
4466 case AtomicExpr::AO__atomic_fetch_and:
4467 case AtomicExpr::AO__atomic_fetch_or:
4468 case AtomicExpr::AO__atomic_fetch_xor:
4469 case AtomicExpr::AO__atomic_fetch_nand:
4470 case AtomicExpr::AO__atomic_and_fetch:
4471 case AtomicExpr::AO__atomic_or_fetch:
4472 case AtomicExpr::AO__atomic_xor_fetch:
4473 case AtomicExpr::AO__atomic_nand_fetch:
4474 case AtomicExpr::AO__scoped_atomic_fetch_and:
4475 case AtomicExpr::AO__scoped_atomic_fetch_or:
4476 case AtomicExpr::AO__scoped_atomic_fetch_xor:
4477 case AtomicExpr::AO__scoped_atomic_fetch_nand:
4478 case AtomicExpr::AO__scoped_atomic_and_fetch:
4479 case AtomicExpr::AO__scoped_atomic_or_fetch:
4480 case AtomicExpr::AO__scoped_atomic_xor_fetch:
4481 case AtomicExpr::AO__scoped_atomic_nand_fetch:
4482 Form = Arithmetic;
4483 break;
4484
4485 case AtomicExpr::AO__c11_atomic_exchange:
4486 case AtomicExpr::AO__hip_atomic_exchange:
4487 case AtomicExpr::AO__opencl_atomic_exchange:
4488 case AtomicExpr::AO__atomic_exchange_n:
4489 case AtomicExpr::AO__scoped_atomic_exchange_n:
4490 Form = Xchg;
4491 break;
4492
4493 case AtomicExpr::AO__atomic_exchange:
4494 case AtomicExpr::AO__scoped_atomic_exchange:
4495 Form = GNUXchg;
4496 break;
4497
4498 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
4499 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
4500 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
4501 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
4502 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
4503 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
4504 Form = C11CmpXchg;
4505 break;
4506
4507 case AtomicExpr::AO__atomic_compare_exchange:
4508 case AtomicExpr::AO__atomic_compare_exchange_n:
4509 case AtomicExpr::AO__scoped_atomic_compare_exchange:
4510 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
4511 Form = GNUCmpXchg;
4512 break;
4513 }
4514
4515 unsigned AdjustedNumArgs = NumArgs[Form];
4516 if ((IsOpenCL || IsHIP || IsScoped) &&
4517 Op != AtomicExpr::AO__opencl_atomic_init)
4518 ++AdjustedNumArgs;
4519 // Check we have the right number of arguments.
4520 if (Args.size() < AdjustedNumArgs) {
4521 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
4522 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
4523 << /*is non object*/ 0 << ExprRange;
4524 return ExprError();
4525 } else if (Args.size() > AdjustedNumArgs) {
4526 Diag(Args[AdjustedNumArgs]->getBeginLoc(),
4527 diag::err_typecheck_call_too_many_args)
4528 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
4529 << /*is non object*/ 0 << ExprRange;
4530 return ExprError();
4531 }
4532
4533 // Inspect the first argument of the atomic operation.
4534 Expr *Ptr = Args[0];
4536 if (ConvertedPtr.isInvalid())
4537 return ExprError();
4538
4539 Ptr = ConvertedPtr.get();
4540 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
4541 if (!pointerType) {
4542 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
4543 << Ptr->getType() << 0 << Ptr->getSourceRange();
4544 return ExprError();
4545 }
4546
4547 // For a __c11 builtin, this should be a pointer to an _Atomic type.
4548 QualType AtomTy = pointerType->getPointeeType(); // 'A'
4549 QualType ValType = AtomTy; // 'C'
4550 if (IsC11) {
4551 if (!AtomTy->isAtomicType()) {
4552 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
4553 << Ptr->getType() << Ptr->getSourceRange();
4554 return ExprError();
4555 }
4556 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
4558 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
4559 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
4560 << Ptr->getSourceRange();
4561 return ExprError();
4562 }
4563 ValType = AtomTy->castAs<AtomicType>()->getValueType();
4564 } else if (Form != Load && Form != LoadCopy) {
4565 if (ValType.isConstQualified()) {
4566 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
4567 << Ptr->getType() << Ptr->getSourceRange();
4568 return ExprError();
4569 }
4570 }
4571
4572 // Pointer to object of size zero is not allowed.
4573 if (Context.getTypeInfoInChars(AtomTy).Width.isZero()) {
4574 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
4575 << Ptr->getType() << 1 << Ptr->getSourceRange();
4576 return ExprError();
4577 }
4578
4579 // For an arithmetic operation, the implied arithmetic must be well-formed.
4580 if (Form == Arithmetic) {
4581 // GCC does not enforce these rules for GNU atomics, but we do to help catch
4582 // trivial type errors.
4583 auto IsAllowedValueType = [&](QualType ValType,
4584 unsigned AllowedType) -> bool {
4585 if (ValType->isIntegerType())
4586 return true;
4587 if (ValType->isPointerType())
4588 return AllowedType & AOEVT_Pointer;
4589 if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP)))
4590 return false;
4591 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
4592 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
4594 &llvm::APFloat::x87DoubleExtended())
4595 return false;
4596 return true;
4597 };
4598 if (!IsAllowedValueType(ValType, ArithAllows)) {
4599 auto DID = ArithAllows & AOEVT_FP
4600 ? (ArithAllows & AOEVT_Pointer
4601 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp
4602 : diag::err_atomic_op_needs_atomic_int_or_fp)
4603 : diag::err_atomic_op_needs_atomic_int;
4604 Diag(ExprRange.getBegin(), DID)
4605 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4606 return ExprError();
4607 }
4608 if (IsC11 && ValType->isPointerType() &&
4610 diag::err_incomplete_type)) {
4611 return ExprError();
4612 }
4613 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
4614 // For __atomic_*_n operations, the value type must be a scalar integral or
4615 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
4616 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
4617 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
4618 return ExprError();
4619 }
4620
4621 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
4622 !AtomTy->isScalarType()) {
4623 // For GNU atomics, require a trivially-copyable type. This is not part of
4624 // the GNU atomics specification but we enforce it for consistency with
4625 // other atomics which generally all require a trivially-copyable type. This
4626 // is because atomics just copy bits.
4627 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
4628 << Ptr->getType() << Ptr->getSourceRange();
4629 return ExprError();
4630 }
4631
4632 switch (ValType.getObjCLifetime()) {
4635 // okay
4636 break;
4637
4641 // FIXME: Can this happen? By this point, ValType should be known
4642 // to be trivially copyable.
4643 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
4644 << ValType << Ptr->getSourceRange();
4645 return ExprError();
4646 }
4647
4648 // All atomic operations have an overload which takes a pointer to a volatile
4649 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
4650 // into the result or the other operands. Similarly atomic_load takes a
4651 // pointer to a const 'A'.
4652 ValType.removeLocalVolatile();
4653 ValType.removeLocalConst();
4654 QualType ResultType = ValType;
4655 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
4656 Form == Init)
4657 ResultType = Context.VoidTy;
4658 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
4659 ResultType = Context.BoolTy;
4660
4661 // The type of a parameter passed 'by value'. In the GNU atomics, such
4662 // arguments are actually passed as pointers.
4663 QualType ByValType = ValType; // 'CP'
4664 bool IsPassedByAddress = false;
4665 if (!IsC11 && !IsHIP && !IsN) {
4666 ByValType = Ptr->getType();
4667 IsPassedByAddress = true;
4668 }
4669
4670 SmallVector<Expr *, 5> APIOrderedArgs;
4671 if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
4672 APIOrderedArgs.push_back(Args[0]);
4673 switch (Form) {
4674 case Init:
4675 case Load:
4676 APIOrderedArgs.push_back(Args[1]); // Val1/Order
4677 break;
4678 case LoadCopy:
4679 case Copy:
4680 case Arithmetic:
4681 case Xchg:
4682 APIOrderedArgs.push_back(Args[2]); // Val1
4683 APIOrderedArgs.push_back(Args[1]); // Order
4684 break;
4685 case GNUXchg:
4686 APIOrderedArgs.push_back(Args[2]); // Val1
4687 APIOrderedArgs.push_back(Args[3]); // Val2
4688 APIOrderedArgs.push_back(Args[1]); // Order
4689 break;
4690 case C11CmpXchg:
4691 APIOrderedArgs.push_back(Args[2]); // Val1
4692 APIOrderedArgs.push_back(Args[4]); // Val2
4693 APIOrderedArgs.push_back(Args[1]); // Order
4694 APIOrderedArgs.push_back(Args[3]); // OrderFail
4695 break;
4696 case GNUCmpXchg:
4697 APIOrderedArgs.push_back(Args[2]); // Val1
4698 APIOrderedArgs.push_back(Args[4]); // Val2
4699 APIOrderedArgs.push_back(Args[5]); // Weak
4700 APIOrderedArgs.push_back(Args[1]); // Order
4701 APIOrderedArgs.push_back(Args[3]); // OrderFail
4702 break;
4703 }
4704 } else
4705 APIOrderedArgs.append(Args.begin(), Args.end());
4706
4707 // The first argument's non-CV pointer type is used to deduce the type of
4708 // subsequent arguments, except for:
4709 // - weak flag (always converted to bool)
4710 // - memory order (always converted to int)
4711 // - scope (always converted to int)
4712 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
4713 QualType Ty;
4714 if (i < NumVals[Form] + 1) {
4715 switch (i) {
4716 case 0:
4717 // The first argument is always a pointer. It has a fixed type.
4718 // It is always dereferenced, a nullptr is undefined.
4719 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
4720 // Nothing else to do: we already know all we want about this pointer.
4721 continue;
4722 case 1:
4723 // The second argument is the non-atomic operand. For arithmetic, this
4724 // is always passed by value, and for a compare_exchange it is always
4725 // passed by address. For the rest, GNU uses by-address and C11 uses
4726 // by-value.
4727 assert(Form != Load);
4728 if (Form == Arithmetic && ValType->isPointerType())
4730 else if (Form == Init || Form == Arithmetic)
4731 Ty = ValType;
4732 else if (Form == Copy || Form == Xchg) {
4733 if (IsPassedByAddress) {
4734 // The value pointer is always dereferenced, a nullptr is undefined.
4735 CheckNonNullArgument(*this, APIOrderedArgs[i],
4736 ExprRange.getBegin());
4737 }
4738 Ty = ByValType;
4739 } else {
4740 Expr *ValArg = APIOrderedArgs[i];
4741 // The value pointer is always dereferenced, a nullptr is undefined.
4742 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin());
4744 // Keep address space of non-atomic pointer type.
4745 if (const PointerType *PtrTy =
4746 ValArg->getType()->getAs<PointerType>()) {
4747 AS = PtrTy->getPointeeType().getAddressSpace();
4748 }
4751 }
4752 break;
4753 case 2:
4754 // The third argument to compare_exchange / GNU exchange is the desired
4755 // value, either by-value (for the C11 and *_n variant) or as a pointer.
4756 if (IsPassedByAddress)
4757 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
4758 Ty = ByValType;
4759 break;
4760 case 3:
4761 // The fourth argument to GNU compare_exchange is a 'weak' flag.
4762 Ty = Context.BoolTy;
4763 break;
4764 }
4765 } else {
4766 // The order(s) and scope are always converted to int.
4767 Ty = Context.IntTy;
4768 }
4769
4770 InitializedEntity Entity =
4772 ExprResult Arg = APIOrderedArgs[i];
4773 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4774 if (Arg.isInvalid())
4775 return true;
4776 APIOrderedArgs[i] = Arg.get();
4777 }
4778
4779 // Permute the arguments into a 'consistent' order.
4780 SmallVector<Expr*, 5> SubExprs;
4781 SubExprs.push_back(Ptr);
4782 switch (Form) {
4783 case Init:
4784 // Note, AtomicExpr::getVal1() has a special case for this atomic.
4785 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4786 break;
4787 case Load:
4788 SubExprs.push_back(APIOrderedArgs[1]); // Order
4789 break;
4790 case LoadCopy:
4791 case Copy:
4792 case Arithmetic:
4793 case Xchg:
4794 SubExprs.push_back(APIOrderedArgs[2]); // Order
4795 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4796 break;
4797 case GNUXchg:
4798 // Note, AtomicExpr::getVal2() has a special case for this atomic.
4799 SubExprs.push_back(APIOrderedArgs[3]); // Order
4800 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4801 SubExprs.push_back(APIOrderedArgs[2]); // Val2
4802 break;
4803 case C11CmpXchg:
4804 SubExprs.push_back(APIOrderedArgs[3]); // Order
4805 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4806 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail
4807 SubExprs.push_back(APIOrderedArgs[2]); // Val2
4808 break;
4809 case GNUCmpXchg:
4810 SubExprs.push_back(APIOrderedArgs[4]); // Order
4811 SubExprs.push_back(APIOrderedArgs[1]); // Val1
4812 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail
4813 SubExprs.push_back(APIOrderedArgs[2]); // Val2
4814 SubExprs.push_back(APIOrderedArgs[3]); // Weak
4815 break;
4816 }
4817
4818 // If the memory orders are constants, check they are valid.
4819 if (SubExprs.size() >= 2 && Form != Init) {
4820 std::optional<llvm::APSInt> Success =
4821 SubExprs[1]->getIntegerConstantExpr(Context);
4822 if (Success && !isValidOrderingForOp(Success->getSExtValue(), Op)) {
4823 Diag(SubExprs[1]->getBeginLoc(),
4824 diag::warn_atomic_op_has_invalid_memory_order)
4825 << /*success=*/(Form == C11CmpXchg || Form == GNUCmpXchg)
4826 << SubExprs[1]->getSourceRange();
4827 }
4828 if (SubExprs.size() >= 5) {
4829 if (std::optional<llvm::APSInt> Failure =
4830 SubExprs[3]->getIntegerConstantExpr(Context)) {
4831 if (!llvm::is_contained(
4832 {llvm::AtomicOrderingCABI::relaxed,
4833 llvm::AtomicOrderingCABI::consume,
4834 llvm::AtomicOrderingCABI::acquire,
4835 llvm::AtomicOrderingCABI::seq_cst},
4836 (llvm::AtomicOrderingCABI)Failure->getSExtValue())) {
4837 Diag(SubExprs[3]->getBeginLoc(),
4838 diag::warn_atomic_op_has_invalid_memory_order)
4839 << /*failure=*/2 << SubExprs[3]->getSourceRange();
4840 }
4841 }
4842 }
4843 }
4844
4845 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
4846 auto *Scope = Args[Args.size() - 1];
4847 if (std::optional<llvm::APSInt> Result =
4848 Scope->getIntegerConstantExpr(Context)) {
4849 if (!ScopeModel->isValid(Result->getZExtValue()))
4850 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
4851 << Scope->getSourceRange();
4852 }
4853 SubExprs.push_back(Scope);
4854 }
4855
4856 AtomicExpr *AE = new (Context)
4857 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
4858
4859 if ((Op == AtomicExpr::AO__c11_atomic_load ||
4860 Op == AtomicExpr::AO__c11_atomic_store ||
4861 Op == AtomicExpr::AO__opencl_atomic_load ||
4862 Op == AtomicExpr::AO__hip_atomic_load ||
4863 Op == AtomicExpr::AO__opencl_atomic_store ||
4864 Op == AtomicExpr::AO__hip_atomic_store) &&
4866 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
4867 << ((Op == AtomicExpr::AO__c11_atomic_load ||
4868 Op == AtomicExpr::AO__opencl_atomic_load ||
4869 Op == AtomicExpr::AO__hip_atomic_load)
4870 ? 0
4871 : 1);
4872
4873 if (ValType->isBitIntType()) {
4874 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit);
4875 return ExprError();
4876 }
4877
4878 return AE;
4879}
4880
4881/// checkBuiltinArgument - Given a call to a builtin function, perform
4882/// normal type-checking on the given argument, updating the call in
4883/// place. This is useful when a builtin function requires custom
4884/// type-checking for some of its arguments but not necessarily all of
4885/// them.
4886///
4887/// Returns true on error.
4888static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
4889 FunctionDecl *Fn = E->getDirectCallee();
4890 assert(Fn && "builtin call without direct callee!");
4891
4892 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
4893 InitializedEntity Entity =
4895
4896 ExprResult Arg = E->getArg(ArgIndex);
4897 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
4898 if (Arg.isInvalid())
4899 return true;
4900
4901 E->setArg(ArgIndex, Arg.get());
4902 return false;
4903}
4904
4905/// We have a call to a function like __sync_fetch_and_add, which is an
4906/// overloaded function based on the pointer type of its first argument.
4907/// The main BuildCallExpr routines have already promoted the types of
4908/// arguments because all of these calls are prototyped as void(...).
4909///
4910/// This function goes through and does final semantic checking for these
4911/// builtins, as well as generating any warnings.
4912ExprResult Sema::BuiltinAtomicOverloaded(ExprResult TheCallResult) {
4913 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
4914 Expr *Callee = TheCall->getCallee();
4915 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
4916 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
4917
4918 // Ensure that we have at least one argument to do type inference from.
4919 if (TheCall->getNumArgs() < 1) {
4920 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
4921 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
4922 << Callee->getSourceRange();
4923 return ExprError();
4924 }
4925
4926 // Inspect the first argument of the atomic builtin. This should always be
4927 // a pointer type, whose element is an integral scalar or pointer type.
4928 // Because it is a pointer type, we don't have to worry about any implicit
4929 // casts here.
4930 // FIXME: We don't allow floating point scalars as input.
4931 Expr *FirstArg = TheCall->getArg(0);
4932 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
4933 if (FirstArgResult.isInvalid())
4934 return ExprError();
4935 FirstArg = FirstArgResult.get();
4936 TheCall->setArg(0, FirstArg);
4937
4938 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
4939 if (!pointerType) {
4940 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
4941 << FirstArg->getType() << 0 << FirstArg->getSourceRange();
4942 return ExprError();
4943 }
4944
4945 QualType ValType = pointerType->getPointeeType();
4946 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
4947 !ValType->isBlockPointerType()) {
4948 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
4949 << FirstArg->getType() << 0 << FirstArg->getSourceRange();
4950 return ExprError();
4951 }
4952
4953 if (ValType.isConstQualified()) {
4954 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
4955 << FirstArg->getType() << FirstArg->getSourceRange();
4956 return ExprError();
4957 }
4958
4959 switch (ValType.getObjCLifetime()) {
4962 // okay
4963 break;
4964
4968 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
4969 << ValType << FirstArg->getSourceRange();
4970 return ExprError();
4971 }
4972
4973 // Strip any qualifiers off ValType.
4974 ValType = ValType.getUnqualifiedType();
4975
4976 // The majority of builtins return a value, but a few have special return
4977 // types, so allow them to override appropriately below.
4978 QualType ResultType = ValType;
4979
4980 // We need to figure out which concrete builtin this maps onto. For example,
4981 // __sync_fetch_and_add with a 2 byte object turns into
4982 // __sync_fetch_and_add_2.
4983#define BUILTIN_ROW(x) \
4984 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
4985 Builtin::BI##x##_8, Builtin::BI##x##_16 }
4986
4987 static const unsigned BuiltinIndices[][5] = {
4988 BUILTIN_ROW(__sync_fetch_and_add),
4989 BUILTIN_ROW(__sync_fetch_and_sub),
4990 BUILTIN_ROW(__sync_fetch_and_or),
4991 BUILTIN_ROW(__sync_fetch_and_and),
4992 BUILTIN_ROW(__sync_fetch_and_xor),
4993 BUILTIN_ROW(__sync_fetch_and_nand),
4994
4995 BUILTIN_ROW(__sync_add_and_fetch),
4996 BUILTIN_ROW(__sync_sub_and_fetch),
4997 BUILTIN_ROW(__sync_and_and_fetch),
4998 BUILTIN_ROW(__sync_or_and_fetch),
4999 BUILTIN_ROW(__sync_xor_and_fetch),
5000 BUILTIN_ROW(__sync_nand_and_fetch),
5001
5002 BUILTIN_ROW(__sync_val_compare_and_swap),
5003 BUILTIN_ROW(__sync_bool_compare_and_swap),
5004 BUILTIN_ROW(__sync_lock_test_and_set),
5005 BUILTIN_ROW(__sync_lock_release),
5006 BUILTIN_ROW(__sync_swap)
5007 };
5008#undef BUILTIN_ROW
5009
5010 // Determine the index of the size.
5011 unsigned SizeIndex;
5012 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
5013 case 1: SizeIndex = 0; break;
5014 case 2: SizeIndex = 1; break;
5015 case 4: SizeIndex = 2; break;
5016 case 8: SizeIndex = 3; break;
5017 case 16: SizeIndex = 4; break;
5018 default:
5019 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
5020 << FirstArg->getType() << FirstArg->getSourceRange();
5021 return ExprError();
5022 }
5023
5024 // Each of these builtins has one pointer argument, followed by some number of
5025 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
5026 // that we ignore. Find out which row of BuiltinIndices to read from as well
5027 // as the number of fixed args.
5028 unsigned BuiltinID = FDecl->getBuiltinID();
5029 unsigned BuiltinIndex, NumFixed = 1;
5030 bool WarnAboutSemanticsChange = false;
5031 switch (BuiltinID) {
5032 default: llvm_unreachable("Unknown overloaded atomic builtin!");
5033 case Builtin::BI__sync_fetch_and_add:
5034 case Builtin::BI__sync_fetch_and_add_1:
5035 case Builtin::BI__sync_fetch_and_add_2:
5036 case Builtin::BI__sync_fetch_and_add_4:
5037 case Builtin::BI__sync_fetch_and_add_8:
5038 case Builtin::BI__sync_fetch_and_add_16:
5039 BuiltinIndex = 0;
5040 break;
5041
5042 case Builtin::BI__sync_fetch_and_sub:
5043 case Builtin::BI__sync_fetch_and_sub_1:
5044 case Builtin::BI__sync_fetch_and_sub_2:
5045 case Builtin::BI__sync_fetch_and_sub_4:
5046 case Builtin::BI__sync_fetch_and_sub_8:
5047 case Builtin::BI__sync_fetch_and_sub_16:
5048 BuiltinIndex = 1;
5049 break;
5050
5051 case Builtin::BI__sync_fetch_and_or:
5052 case Builtin::BI__sync_fetch_and_or_1:
5053 case Builtin::BI__sync_fetch_and_or_2:
5054 case Builtin::BI__sync_fetch_and_or_4:
5055 case Builtin::BI__sync_fetch_and_or_8:
5056 case Builtin::BI__sync_fetch_and_or_16:
5057 BuiltinIndex = 2;
5058 break;
5059
5060 case Builtin::BI__sync_fetch_and_and:
5061 case Builtin::BI__sync_fetch_and_and_1:
5062 case Builtin::BI__sync_fetch_and_and_2:
5063 case Builtin::BI__sync_fetch_and_and_4:
5064 case Builtin::BI__sync_fetch_and_and_8:
5065 case Builtin::BI__sync_fetch_and_and_16:
5066 BuiltinIndex = 3;
5067 break;
5068