clang  15.0.0git
SemaChecking.cpp
Go to the documentation of this file.
1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements extra semantic analysis beyond what is enforced
10 // by the C type system.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/AST/APValue.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/AttrIterator.h"
18 #include "clang/AST/CharUnits.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclBase.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/FormatString.h"
30 #include "clang/AST/NSAPI.h"
33 #include "clang/AST/RecordLayout.h"
34 #include "clang/AST/Stmt.h"
35 #include "clang/AST/TemplateBase.h"
36 #include "clang/AST/Type.h"
37 #include "clang/AST/TypeLoc.h"
40 #include "clang/Basic/CharInfo.h"
41 #include "clang/Basic/Diagnostic.h"
43 #include "clang/Basic/LLVM.h"
50 #include "clang/Basic/Specifiers.h"
51 #include "clang/Basic/SyncScope.h"
54 #include "clang/Basic/TargetInfo.h"
55 #include "clang/Basic/TypeTraits.h"
56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
58 #include "clang/Sema/Lookup.h"
59 #include "clang/Sema/Ownership.h"
60 #include "clang/Sema/Scope.h"
61 #include "clang/Sema/ScopeInfo.h"
62 #include "clang/Sema/Sema.h"
64 #include "llvm/ADT/APFloat.h"
65 #include "llvm/ADT/APInt.h"
66 #include "llvm/ADT/APSInt.h"
67 #include "llvm/ADT/ArrayRef.h"
68 #include "llvm/ADT/DenseMap.h"
69 #include "llvm/ADT/FoldingSet.h"
70 #include "llvm/ADT/None.h"
71 #include "llvm/ADT/Optional.h"
72 #include "llvm/ADT/STLExtras.h"
73 #include "llvm/ADT/SmallBitVector.h"
74 #include "llvm/ADT/SmallPtrSet.h"
75 #include "llvm/ADT/SmallString.h"
76 #include "llvm/ADT/SmallVector.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/ADT/StringSet.h"
79 #include "llvm/ADT/StringSwitch.h"
80 #include "llvm/ADT/Triple.h"
81 #include "llvm/Support/AtomicOrdering.h"
82 #include "llvm/Support/Casting.h"
83 #include "llvm/Support/Compiler.h"
84 #include "llvm/Support/ConvertUTF.h"
85 #include "llvm/Support/ErrorHandling.h"
86 #include "llvm/Support/Format.h"
87 #include "llvm/Support/Locale.h"
88 #include "llvm/Support/MathExtras.h"
89 #include "llvm/Support/SaveAndRestore.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include <algorithm>
92 #include <bitset>
93 #include <cassert>
94 #include <cctype>
95 #include <cstddef>
96 #include <cstdint>
97 #include <functional>
98 #include <limits>
99 #include <string>
100 #include <tuple>
101 #include <utility>
102 
103 using namespace clang;
104 using namespace sema;
105 
107  unsigned ByteNo) const {
108  return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
109  Context.getTargetInfo());
110 }
111 
112 /// Checks that a call expression's argument count is at least the desired
113 /// number. This is useful when doing custom type-checking on a variadic
114 /// function. Returns true on error.
115 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
116  unsigned MinArgCount) {
117  unsigned ArgCount = Call->getNumArgs();
118  if (ArgCount >= MinArgCount)
119  return false;
120 
121  return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
122  << 0 /*function call*/ << MinArgCount << ArgCount
123  << Call->getSourceRange();
124 }
125 
126 /// Checks that a call expression's argument count is the desired number.
127 /// This is useful when doing custom type-checking. Returns true on error.
128 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
129  unsigned ArgCount = Call->getNumArgs();
130  if (ArgCount == DesiredArgCount)
131  return false;
132 
133  if (checkArgCountAtLeast(S, Call, DesiredArgCount))
134  return true;
135  assert(ArgCount > DesiredArgCount && "should have diagnosed this");
136 
137  // Highlight all the excess arguments.
138  SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
139  Call->getArg(ArgCount - 1)->getEndLoc());
140 
141  return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
142  << 0 /*function call*/ << DesiredArgCount << ArgCount
143  << Call->getArg(1)->getSourceRange();
144 }
145 
146 /// Check that the first argument to __builtin_annotation is an integer
147 /// and the second argument is a non-wide string literal.
148 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
149  if (checkArgCount(S, TheCall, 2))
150  return true;
151 
152  // First argument should be an integer.
153  Expr *ValArg = TheCall->getArg(0);
154  QualType Ty = ValArg->getType();
155  if (!Ty->isIntegerType()) {
156  S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
157  << ValArg->getSourceRange();
158  return true;
159  }
160 
161  // Second argument should be a constant string.
162  Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
163  StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
164  if (!Literal || !Literal->isAscii()) {
165  S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
166  << StrArg->getSourceRange();
167  return true;
168  }
169 
170  TheCall->setType(Ty);
171  return false;
172 }
173 
174 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
175  // We need at least one argument.
176  if (TheCall->getNumArgs() < 1) {
177  S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
178  << 0 << 1 << TheCall->getNumArgs()
179  << TheCall->getCallee()->getSourceRange();
180  return true;
181  }
182 
183  // All arguments should be wide string literals.
184  for (Expr *Arg : TheCall->arguments()) {
185  auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
186  if (!Literal || !Literal->isWide()) {
187  S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
188  << Arg->getSourceRange();
189  return true;
190  }
191  }
192 
193  return false;
194 }
195 
196 /// Check that the argument to __builtin_addressof is a glvalue, and set the
197 /// result type to the corresponding pointer type.
198 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
199  if (checkArgCount(S, TheCall, 1))
200  return true;
201 
202  ExprResult Arg(TheCall->getArg(0));
203  QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
204  if (ResultType.isNull())
205  return true;
206 
207  TheCall->setArg(0, Arg.get());
208  TheCall->setType(ResultType);
209  return false;
210 }
211 
212 /// Check that the argument to __builtin_function_start is a function.
213 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
214  if (checkArgCount(S, TheCall, 1))
215  return true;
216 
218  if (Arg.isInvalid())
219  return true;
220 
221  TheCall->setArg(0, Arg.get());
222  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
223  Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext()));
224 
225  if (!FD) {
226  S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type)
227  << TheCall->getSourceRange();
228  return true;
229  }
230 
231  return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
232  TheCall->getBeginLoc());
233 }
234 
235 /// Check the number of arguments and set the result type to
236 /// the argument type.
237 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
238  if (checkArgCount(S, TheCall, 1))
239  return true;
240 
241  TheCall->setType(TheCall->getArg(0)->getType());
242  return false;
243 }
244 
245 /// Check that the value argument for __builtin_is_aligned(value, alignment) and
246 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
247 /// type (but not a function pointer) and that the alignment is a power-of-two.
248 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
249  if (checkArgCount(S, TheCall, 2))
250  return true;
251 
252  clang::Expr *Source = TheCall->getArg(0);
253  bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
254 
255  auto IsValidIntegerType = [](QualType Ty) {
256  return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
257  };
258  QualType SrcTy = Source->getType();
259  // We should also be able to use it with arrays (but not functions!).
260  if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
261  SrcTy = S.Context.getDecayedType(SrcTy);
262  }
263  if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
264  SrcTy->isFunctionPointerType()) {
265  // FIXME: this is not quite the right error message since we don't allow
266  // floating point types, or member pointers.
267  S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
268  << SrcTy;
269  return true;
270  }
271 
272  clang::Expr *AlignOp = TheCall->getArg(1);
273  if (!IsValidIntegerType(AlignOp->getType())) {
274  S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
275  << AlignOp->getType();
276  return true;
277  }
278  Expr::EvalResult AlignResult;
279  unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
280  // We can't check validity of alignment if it is value dependent.
281  if (!AlignOp->isValueDependent() &&
282  AlignOp->EvaluateAsInt(AlignResult, S.Context,
284  llvm::APSInt AlignValue = AlignResult.Val.getInt();
285  llvm::APSInt MaxValue(
286  llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
287  if (AlignValue < 1) {
288  S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
289  return true;
290  }
291  if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
292  S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
293  << toString(MaxValue, 10);
294  return true;
295  }
296  if (!AlignValue.isPowerOf2()) {
297  S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
298  return true;
299  }
300  if (AlignValue == 1) {
301  S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
302  << IsBooleanAlignBuiltin;
303  }
304  }
305 
308  SourceLocation(), Source);
309  if (SrcArg.isInvalid())
310  return true;
311  TheCall->setArg(0, SrcArg.get());
312  ExprResult AlignArg =
314  S.Context, AlignOp->getType(), false),
315  SourceLocation(), AlignOp);
316  if (AlignArg.isInvalid())
317  return true;
318  TheCall->setArg(1, AlignArg.get());
319  // For align_up/align_down, the return type is the same as the (potentially
320  // decayed) argument type including qualifiers. For is_aligned(), the result
321  // is always bool.
322  TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
323  return false;
324 }
325 
326 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
327  unsigned BuiltinID) {
328  if (checkArgCount(S, TheCall, 3))
329  return true;
330 
331  // First two arguments should be integers.
332  for (unsigned I = 0; I < 2; ++I) {
334  if (Arg.isInvalid()) return true;
335  TheCall->setArg(I, Arg.get());
336 
337  QualType Ty = Arg.get()->getType();
338  if (!Ty->isIntegerType()) {
339  S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
340  << Ty << Arg.get()->getSourceRange();
341  return true;
342  }
343  }
344 
345  // Third argument should be a pointer to a non-const integer.
346  // IRGen correctly handles volatile, restrict, and address spaces, and
347  // the other qualifiers aren't possible.
348  {
350  if (Arg.isInvalid()) return true;
351  TheCall->setArg(2, Arg.get());
352 
353  QualType Ty = Arg.get()->getType();
354  const auto *PtrTy = Ty->getAs<PointerType>();
355  if (!PtrTy ||
356  !PtrTy->getPointeeType()->isIntegerType() ||
357  PtrTy->getPointeeType().isConstQualified()) {
358  S.Diag(Arg.get()->getBeginLoc(),
359  diag::err_overflow_builtin_must_be_ptr_int)
360  << Ty << Arg.get()->getSourceRange();
361  return true;
362  }
363  }
364 
365  // Disallow signed bit-precise integer args larger than 128 bits to mul
366  // function until we improve backend support.
367  if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
368  for (unsigned I = 0; I < 3; ++I) {
369  const auto Arg = TheCall->getArg(I);
370  // Third argument will be a pointer.
371  auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
372  if (Ty->isBitIntType() && Ty->isSignedIntegerType() &&
373  S.getASTContext().getIntWidth(Ty) > 128)
374  return S.Diag(Arg->getBeginLoc(),
375  diag::err_overflow_builtin_bit_int_max_size)
376  << 128;
377  }
378  }
379 
380  return false;
381 }
382 
383 namespace {
384 struct BuiltinDumpStructGenerator {
385  Sema &S;
386  CallExpr *TheCall;
387  SourceLocation Loc = TheCall->getBeginLoc();
388  SmallVector<Expr *, 32> Actions;
389  DiagnosticErrorTrap ErrorTracker;
390  PrintingPolicy Policy;
391 
392  BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall)
393  : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()),
394  Policy(S.Context.getPrintingPolicy()) {
395  Policy.AnonymousTagLocations = false;
396  }
397 
398  Expr *makeOpaqueValueExpr(Expr *Inner) {
399  auto *OVE = new (S.Context)
400  OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(),
401  Inner->getObjectKind(), Inner);
402  Actions.push_back(OVE);
403  return OVE;
404  }
405 
406  Expr *getStringLiteral(llvm::StringRef Str) {
408  // Wrap the literal in parentheses to attach a source location.
409  return new (S.Context) ParenExpr(Loc, Loc, Lit);
410  }
411 
412  bool callPrintFunction(llvm::StringRef Format,
413  llvm::ArrayRef<Expr *> Exprs = {}) {
415  assert(TheCall->getNumArgs() >= 2);
416  Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size());
417  Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end());
418  Args.push_back(getStringLiteral(Format));
419  Args.insert(Args.end(), Exprs.begin(), Exprs.end());
420 
421  // Register a note to explain why we're performing the call.
424  Ctx.PointOfInstantiation = Loc;
425  Ctx.CallArgs = Args.data();
426  Ctx.NumCallArgs = Args.size();
428 
429  ExprResult RealCall =
430  S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1),
431  TheCall->getBeginLoc(), Args, TheCall->getRParenLoc());
432 
434  if (!RealCall.isInvalid())
435  Actions.push_back(RealCall.get());
436  // Bail out if we've hit any errors, even if we managed to build the
437  // call. We don't want to produce more than one error.
438  return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred();
439  }
440 
441  Expr *getIndentString(unsigned Depth) {
442  if (!Depth)
443  return nullptr;
444 
446  Indent.resize(Depth * Policy.Indentation, ' ');
447  return getStringLiteral(Indent);
448  }
449 
451  return getStringLiteral(T.getAsString(Policy));
452  }
453 
454  bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) {
455  llvm::raw_svector_ostream OS(Str);
456 
457  // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
458  // than trying to print a single character.
459  if (auto *BT = T->getAs<BuiltinType>()) {
460  switch (BT->getKind()) {
461  case BuiltinType::Bool:
462  OS << "%d";
463  return true;
464  case BuiltinType::Char_U:
465  case BuiltinType::UChar:
466  OS << "%hhu";
467  return true;
468  case BuiltinType::Char_S:
469  case BuiltinType::SChar:
470  OS << "%hhd";
471  return true;
472  default:
473  break;
474  }
475  }
476 
478  if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) {
479  // We were able to guess how to format this.
480  if (Specifier.getConversionSpecifier().getKind() ==
482  // Wrap double-quotes around a '%s' specifier and limit its maximum
483  // length. Ideally we'd also somehow escape special characters in the
484  // contents but printf doesn't support that.
485  // FIXME: '%s' formatting is not safe in general.
486  OS << '"';
487  Specifier.setPrecision(analyze_printf::OptionalAmount(32u));
488  Specifier.toString(OS);
489  OS << '"';
490  // FIXME: It would be nice to include a '...' if the string doesn't fit
491  // in the length limit.
492  } else {
493  Specifier.toString(OS);
494  }
495  return true;
496  }
497 
498  if (T->isPointerType()) {
499  // Format all pointers with '%p'.
500  OS << "%p";
501  return true;
502  }
503 
504  return false;
505  }
506 
507  bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
508  Expr *IndentLit = getIndentString(Depth);
509  Expr *TypeLit = getTypeString(S.Context.getRecordType(RD));
510  if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit})
511  : callPrintFunction("%s", {TypeLit}))
512  return true;
513 
514  return dumpRecordValue(RD, E, IndentLit, Depth);
515  }
516 
517  // Dump a record value. E should be a pointer or lvalue referring to an RD.
518  bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent,
519  unsigned Depth) {
520  // FIXME: Decide what to do if RD is a union. At least we should probably
521  // turn off printing `const char*` members with `%s`, because that is very
522  // likely to crash if that's not the active member. Whatever we decide, we
523  // should document it.
524 
525  // Build an OpaqueValueExpr so we can refer to E more than once without
526  // triggering re-evaluation.
527  Expr *RecordArg = makeOpaqueValueExpr(E);
528  bool RecordArgIsPtr = RecordArg->getType()->isPointerType();
529 
530  if (callPrintFunction(" {\n"))
531  return true;
532 
533  // Dump each base class, regardless of whether they're aggregates.
534  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
535  for (const auto &Base : CXXRD->bases()) {
536  QualType BaseType =
537  RecordArgIsPtr ? S.Context.getPointerType(Base.getType())
538  : S.Context.getLValueReferenceType(Base.getType());
539  ExprResult BasePtr = S.BuildCStyleCastExpr(
540  Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc,
541  RecordArg);
542  if (BasePtr.isInvalid() ||
543  dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(),
544  Depth + 1))
545  return true;
546  }
547  }
548 
549  Expr *FieldIndentArg = getIndentString(Depth + 1);
550 
551  // Dump each field.
552  for (auto *D : RD->decls()) {
553  auto *IFD = dyn_cast<IndirectFieldDecl>(D);
554  auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
555  if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion())
556  continue;
557 
558  llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
559  llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg,
560  getTypeString(FD->getType()),
561  getStringLiteral(FD->getName())};
562 
563  if (FD->isBitField()) {
564  Format += ": %zu ";
566  llvm::APInt BitWidth(S.Context.getIntWidth(SizeT),
567  FD->getBitWidthValue(S.Context));
568  Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc));
569  }
570 
571  Format += "=";
572 
573  ExprResult Field =
575  CXXScopeSpec(), Loc, IFD,
576  DeclAccessPair::make(IFD, AS_public), RecordArg, Loc)
578  RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD,
580  DeclarationNameInfo(FD->getDeclName(), Loc));
581  if (Field.isInvalid())
582  return true;
583 
584  auto *InnerRD = FD->getType()->getAsRecordDecl();
585  auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD);
586  if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) {
587  // Recursively print the values of members of aggregate record type.
588  if (callPrintFunction(Format, Args) ||
589  dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1))
590  return true;
591  } else {
592  Format += " ";
593  if (appendFormatSpecifier(FD->getType(), Format)) {
594  // We know how to print this field.
595  Args.push_back(Field.get());
596  } else {
597  // We don't know how to print this field. Print out its address
598  // with a format specifier that a smart tool will be able to
599  // recognize and treat specially.
600  Format += "*%p";
601  ExprResult FieldAddr =
602  S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get());
603  if (FieldAddr.isInvalid())
604  return true;
605  Args.push_back(FieldAddr.get());
606  }
607  Format += "\n";
608  if (callPrintFunction(Format, Args))
609  return true;
610  }
611  }
612 
613  return RecordIndent ? callPrintFunction("%s}\n", RecordIndent)
614  : callPrintFunction("}\n");
615  }
616 
617  Expr *buildWrapper() {
618  auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions,
620  TheCall->setType(Wrapper->getType());
621  TheCall->setValueKind(Wrapper->getValueKind());
622  return Wrapper;
623  }
624 };
625 } // namespace
626 
628  if (checkArgCountAtLeast(S, TheCall, 2))
629  return ExprError();
630 
631  ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
632  if (PtrArgResult.isInvalid())
633  return ExprError();
634  TheCall->setArg(0, PtrArgResult.get());
635 
636  // First argument should be a pointer to a struct.
637  QualType PtrArgType = PtrArgResult.get()->getType();
638  if (!PtrArgType->isPointerType() ||
639  !PtrArgType->getPointeeType()->isRecordType()) {
640  S.Diag(PtrArgResult.get()->getBeginLoc(),
641  diag::err_expected_struct_pointer_argument)
642  << 1 << TheCall->getDirectCallee() << PtrArgType;
643  return ExprError();
644  }
645  const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl();
646 
647  // Second argument is a callable, but we can't fully validate it until we try
648  // calling it.
649  QualType FnArgType = TheCall->getArg(1)->getType();
650  if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() &&
651  !FnArgType->isBlockPointerType() &&
652  !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) {
653  auto *BT = FnArgType->getAs<BuiltinType>();
654  switch (BT ? BT->getKind() : BuiltinType::Void) {
655  case BuiltinType::Dependent:
656  case BuiltinType::Overload:
657  case BuiltinType::BoundMember:
658  case BuiltinType::PseudoObject:
659  case BuiltinType::UnknownAny:
660  case BuiltinType::BuiltinFn:
661  // This might be a callable.
662  break;
663 
664  default:
665  S.Diag(TheCall->getArg(1)->getBeginLoc(),
666  diag::err_expected_callable_argument)
667  << 2 << TheCall->getDirectCallee() << FnArgType;
668  return ExprError();
669  }
670  }
671 
672  BuiltinDumpStructGenerator Generator(S, TheCall);
673 
674  // Wrap parentheses around the given pointer. This is not necessary for
675  // correct code generation, but it means that when we pretty-print the call
676  // arguments in our diagnostics we will produce '(&s)->n' instead of the
677  // incorrect '&s->n'.
678  Expr *PtrArg = PtrArgResult.get();
679  PtrArg = new (S.Context)
680  ParenExpr(PtrArg->getBeginLoc(),
681  S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg);
682  if (Generator.dumpUnnamedRecord(RD, PtrArg, 0))
683  return ExprError();
684 
685  return Generator.buildWrapper();
686 }
687 
688 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
689  if (checkArgCount(S, BuiltinCall, 2))
690  return true;
691 
692  SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
693  Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
694  Expr *Call = BuiltinCall->getArg(0);
695  Expr *Chain = BuiltinCall->getArg(1);
696 
697  if (Call->getStmtClass() != Stmt::CallExprClass) {
698  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
699  << Call->getSourceRange();
700  return true;
701  }
702 
703  auto CE = cast<CallExpr>(Call);
704  if (CE->getCallee()->getType()->isBlockPointerType()) {
705  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
706  << Call->getSourceRange();
707  return true;
708  }
709 
710  const Decl *TargetDecl = CE->getCalleeDecl();
711  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
712  if (FD->getBuiltinID()) {
713  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
714  << Call->getSourceRange();
715  return true;
716  }
717 
718  if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
719  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
720  << Call->getSourceRange();
721  return true;
722  }
723 
724  ExprResult ChainResult = S.UsualUnaryConversions(Chain);
725  if (ChainResult.isInvalid())
726  return true;
727  if (!ChainResult.get()->getType()->isPointerType()) {
728  S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
729  << Chain->getSourceRange();
730  return true;
731  }
732 
733  QualType ReturnTy = CE->getCallReturnType(S.Context);
734  QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
735  QualType BuiltinTy = S.Context.getFunctionType(
736  ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
737  QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
738 
739  Builtin =
740  S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
741 
742  BuiltinCall->setType(CE->getType());
743  BuiltinCall->setValueKind(CE->getValueKind());
744  BuiltinCall->setObjectKind(CE->getObjectKind());
745  BuiltinCall->setCallee(Builtin);
746  BuiltinCall->setArg(1, ChainResult.get());
747 
748  return false;
749 }
750 
751 namespace {
752 
753 class ScanfDiagnosticFormatHandler
755  // Accepts the argument index (relative to the first destination index) of the
756  // argument whose size we want.
757  using ComputeSizeFunction =
758  llvm::function_ref<Optional<llvm::APSInt>(unsigned)>;
759 
760  // Accepts the argument index (relative to the first destination index), the
761  // destination size, and the source size).
762  using DiagnoseFunction =
763  llvm::function_ref<void(unsigned, unsigned, unsigned)>;
764 
765  ComputeSizeFunction ComputeSizeArgument;
766  DiagnoseFunction Diagnose;
767 
768 public:
769  ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument,
770  DiagnoseFunction Diagnose)
771  : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {}
772 
773  bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
774  const char *StartSpecifier,
775  unsigned specifierLen) override {
776  if (!FS.consumesDataArgument())
777  return true;
778 
779  unsigned NulByte = 0;
780  switch ((FS.getConversionSpecifier().getKind())) {
781  default:
782  return true;
785  NulByte = 1;
786  break;
788  break;
789  }
790 
791  analyze_format_string::OptionalAmount FW = FS.getFieldWidth();
792  if (FW.getHowSpecified() !=
793  analyze_format_string::OptionalAmount::HowSpecified::Constant)
794  return true;
795 
796  unsigned SourceSize = FW.getConstantAmount() + NulByte;
797 
798  Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex());
799  if (!DestSizeAPS)
800  return true;
801 
802  unsigned DestSize = DestSizeAPS->getZExtValue();
803 
804  if (DestSize < SourceSize)
805  Diagnose(FS.getArgIndex(), DestSize, SourceSize);
806 
807  return true;
808  }
809 };
810 
811 class EstimateSizeFormatHandler
813  size_t Size;
814 
815 public:
816  EstimateSizeFormatHandler(StringRef Format)
817  : Size(std::min(Format.find(0), Format.size()) +
818  1 /* null byte always written by sprintf */) {}
819 
820  bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
821  const char *, unsigned SpecifierLen,
822  const TargetInfo &) override {
823 
824  const size_t FieldWidth = computeFieldWidth(FS);
825  const size_t Precision = computePrecision(FS);
826 
827  // The actual format.
828  switch (FS.getConversionSpecifier().getKind()) {
829  // Just a char.
832  Size += std::max(FieldWidth, (size_t)1);
833  break;
834  // Just an integer.
844  Size += std::max(FieldWidth, Precision);
845  break;
846 
847  // %g style conversion switches between %f or %e style dynamically.
848  // %f always takes less space, so default to it.
851 
852  // Floating point number in the form '[+]ddd.ddd'.
855  Size += std::max(FieldWidth, 1 /* integer part */ +
856  (Precision ? 1 + Precision
857  : 0) /* period + decimal */);
858  break;
859 
860  // Floating point number in the form '[-]d.ddde[+-]dd'.
863  Size +=
864  std::max(FieldWidth,
865  1 /* integer part */ +
866  (Precision ? 1 + Precision : 0) /* period + decimal */ +
867  1 /* e or E letter */ + 2 /* exponent */);
868  break;
869 
870  // Floating point number in the form '[-]0xh.hhhhp±dd'.
873  Size +=
874  std::max(FieldWidth,
875  2 /* 0x */ + 1 /* integer part */ +
876  (Precision ? 1 + Precision : 0) /* period + decimal */ +
877  1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
878  break;
879 
880  // Just a string.
883  Size += FieldWidth;
884  break;
885 
886  // Just a pointer in the form '0xddd'.
888  Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
889  break;
890 
891  // A plain percent.
893  Size += 1;
894  break;
895 
896  default:
897  break;
898  }
899 
900  Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
901 
902  if (FS.hasAlternativeForm()) {
903  switch (FS.getConversionSpecifier().getKind()) {
904  default:
905  break;
906  // Force a leading '0'.
908  Size += 1;
909  break;
910  // Force a leading '0x'.
913  Size += 2;
914  break;
915  // Force a period '.' before decimal, even if precision is 0.
924  Size += (Precision ? 0 : 1);
925  break;
926  }
927  }
928  assert(SpecifierLen <= Size && "no underflow");
929  Size -= SpecifierLen;
930  return true;
931  }
932 
933  size_t getSizeLowerBound() const { return Size; }
934 
935 private:
936  static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
937  const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
938  size_t FieldWidth = 0;
940  FieldWidth = FW.getConstantAmount();
941  return FieldWidth;
942  }
943 
944  static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
945  const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
946  size_t Precision = 0;
947 
948  // See man 3 printf for default precision value based on the specifier.
949  switch (FW.getHowSpecified()) {
951  switch (FS.getConversionSpecifier().getKind()) {
952  default:
953  break;
957  Precision = 1;
958  break;
965  Precision = 1;
966  break;
973  Precision = 6;
974  break;
976  Precision = 1;
977  break;
978  }
979  break;
981  Precision = FW.getConstantAmount();
982  break;
983  default:
984  break;
985  }
986  return Precision;
987  }
988 };
989 
990 } // namespace
991 
992 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
993  CallExpr *TheCall) {
994  if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
995  isConstantEvaluated())
996  return;
997 
998  bool UseDABAttr = false;
999  const FunctionDecl *UseDecl = FD;
1000 
1001  const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>();
1002  if (DABAttr) {
1003  UseDecl = DABAttr->getFunction();
1004  assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
1005  UseDABAttr = true;
1006  }
1007 
1008  unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true);
1009 
1010  if (!BuiltinID)
1011  return;
1012 
1013  const TargetInfo &TI = getASTContext().getTargetInfo();
1014  unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
1015 
1016  auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> {
1017  // If we refer to a diagnose_as_builtin attribute, we need to change the
1018  // argument index to refer to the arguments of the called function. Unless
1019  // the index is out of bounds, which presumably means it's a variadic
1020  // function.
1021  if (!UseDABAttr)
1022  return Index;
1023  unsigned DABIndices = DABAttr->argIndices_size();
1024  unsigned NewIndex = Index < DABIndices
1025  ? DABAttr->argIndices_begin()[Index]
1026  : Index - DABIndices + FD->getNumParams();
1027  if (NewIndex >= TheCall->getNumArgs())
1028  return llvm::None;
1029  return NewIndex;
1030  };
1031 
1032  auto ComputeExplicitObjectSizeArgument =
1033  [&](unsigned Index) -> Optional<llvm::APSInt> {
1034  Optional<unsigned> IndexOptional = TranslateIndex(Index);
1035  if (!IndexOptional)
1036  return llvm::None;
1037  unsigned NewIndex = IndexOptional.getValue();
1038  Expr::EvalResult Result;
1039  Expr *SizeArg = TheCall->getArg(NewIndex);
1040  if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
1041  return llvm::None;
1042  llvm::APSInt Integer = Result.Val.getInt();
1043  Integer.setIsUnsigned(true);
1044  return Integer;
1045  };
1046 
1047  auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
1048  // If the parameter has a pass_object_size attribute, then we should use its
1049  // (potentially) more strict checking mode. Otherwise, conservatively assume
1050  // type 0.
1051  int BOSType = 0;
1052  // This check can fail for variadic functions.
1053  if (Index < FD->getNumParams()) {
1054  if (const auto *POS =
1055  FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
1056  BOSType = POS->getType();
1057  }
1058 
1059  Optional<unsigned> IndexOptional = TranslateIndex(Index);
1060  if (!IndexOptional)
1061  return llvm::None;
1062  unsigned NewIndex = IndexOptional.getValue();
1063 
1064  const Expr *ObjArg = TheCall->getArg(NewIndex);
1065  uint64_t Result;
1066  if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
1067  return llvm::None;
1068 
1069  // Get the object size in the target's size_t width.
1070  return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
1071  };
1072 
1073  auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
1074  Optional<unsigned> IndexOptional = TranslateIndex(Index);
1075  if (!IndexOptional)
1076  return llvm::None;
1077  unsigned NewIndex = IndexOptional.getValue();
1078 
1079  const Expr *ObjArg = TheCall->getArg(NewIndex);
1080  uint64_t Result;
1081  if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
1082  return llvm::None;
1083  // Add 1 for null byte.
1084  return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth);
1085  };
1086 
1087  Optional<llvm::APSInt> SourceSize;
1088  Optional<llvm::APSInt> DestinationSize;
1089  unsigned DiagID = 0;
1090  bool IsChkVariant = false;
1091 
1092  auto GetFunctionName = [&]() {
1093  StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
1094  // Skim off the details of whichever builtin was called to produce a better
1095  // diagnostic, as it's unlikely that the user wrote the __builtin
1096  // explicitly.
1097  if (IsChkVariant) {
1098  FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
1099  FunctionName = FunctionName.drop_back(std::strlen("_chk"));
1100  } else if (FunctionName.startswith("__builtin_")) {
1101  FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
1102  }
1103  return FunctionName;
1104  };
1105 
1106  switch (BuiltinID) {
1107  default:
1108  return;
1109  case Builtin::BI__builtin_strcpy:
1110  case Builtin::BIstrcpy: {
1111  DiagID = diag::warn_fortify_strlen_overflow;
1112  SourceSize = ComputeStrLenArgument(1);
1113  DestinationSize = ComputeSizeArgument(0);
1114  break;
1115  }
1116 
1117  case Builtin::BI__builtin___strcpy_chk: {
1118  DiagID = diag::warn_fortify_strlen_overflow;
1119  SourceSize = ComputeStrLenArgument(1);
1120  DestinationSize = ComputeExplicitObjectSizeArgument(2);
1121  IsChkVariant = true;
1122  break;
1123  }
1124 
1125  case Builtin::BIscanf:
1126  case Builtin::BIfscanf:
1127  case Builtin::BIsscanf: {
1128  unsigned FormatIndex = 1;
1129  unsigned DataIndex = 2;
1130  if (BuiltinID == Builtin::BIscanf) {
1131  FormatIndex = 0;
1132  DataIndex = 1;
1133  }
1134 
1135  const auto *FormatExpr =
1136  TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
1137 
1138  const auto *Format = dyn_cast<StringLiteral>(FormatExpr);
1139  if (!Format)
1140  return;
1141 
1142  if (!Format->isAscii() && !Format->isUTF8())
1143  return;
1144 
1145  auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
1146  unsigned SourceSize) {
1147  DiagID = diag::warn_fortify_scanf_overflow;
1148  unsigned Index = ArgIndex + DataIndex;
1149  StringRef FunctionName = GetFunctionName();
1150  DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall,
1151  PDiag(DiagID) << FunctionName << (Index + 1)
1152  << DestSize << SourceSize);
1153  };
1154 
1155  StringRef FormatStrRef = Format->getString();
1156  auto ShiftedComputeSizeArgument = [&](unsigned Index) {
1157  return ComputeSizeArgument(Index + DataIndex);
1158  };
1159  ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose);
1160  const char *FormatBytes = FormatStrRef.data();
1161  const ConstantArrayType *T =
1162  Context.getAsConstantArrayType(Format->getType());
1163  assert(T && "String literal not of constant array type!");
1164  size_t TypeSize = T->getSize().getZExtValue();
1165 
1166  // In case there's a null byte somewhere.
1167  size_t StrLen =
1168  std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
1169 
1171  FormatBytes + StrLen, getLangOpts(),
1172  Context.getTargetInfo());
1173 
1174  // Unlike the other cases, in this one we have already issued the diagnostic
1175  // here, so no need to continue (because unlike the other cases, here the
1176  // diagnostic refers to the argument number).
1177  return;
1178  }
1179 
1180  case Builtin::BIsprintf:
1181  case Builtin::BI__builtin___sprintf_chk: {
1182  size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
1183  auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
1184 
1185  if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
1186 
1187  if (!Format->isAscii() && !Format->isUTF8())
1188  return;
1189 
1190  StringRef FormatStrRef = Format->getString();
1191  EstimateSizeFormatHandler H(FormatStrRef);
1192  const char *FormatBytes = FormatStrRef.data();
1193  const ConstantArrayType *T =
1194  Context.getAsConstantArrayType(Format->getType());
1195  assert(T && "String literal not of constant array type!");
1196  size_t TypeSize = T->getSize().getZExtValue();
1197 
1198  // In case there's a null byte somewhere.
1199  size_t StrLen =
1200  std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
1202  H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
1203  Context.getTargetInfo(), false)) {
1204  DiagID = diag::warn_fortify_source_format_overflow;
1205  SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
1206  .extOrTrunc(SizeTypeWidth);
1207  if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
1208  DestinationSize = ComputeExplicitObjectSizeArgument(2);
1209  IsChkVariant = true;
1210  } else {
1211  DestinationSize = ComputeSizeArgument(0);
1212  }
1213  break;
1214  }
1215  }
1216  return;
1217  }
1218  case Builtin::BI__builtin___memcpy_chk:
1219  case Builtin::BI__builtin___memmove_chk:
1220  case Builtin::BI__builtin___memset_chk:
1221  case Builtin::BI__builtin___strlcat_chk:
1222  case Builtin::BI__builtin___strlcpy_chk:
1223  case Builtin::BI__builtin___strncat_chk:
1224  case Builtin::BI__builtin___strncpy_chk:
1225  case Builtin::BI__builtin___stpncpy_chk:
1226  case Builtin::BI__builtin___memccpy_chk:
1227  case Builtin::BI__builtin___mempcpy_chk: {
1228  DiagID = diag::warn_builtin_chk_overflow;
1229  SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
1230  DestinationSize =
1231  ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1232  IsChkVariant = true;
1233  break;
1234  }
1235 
1236  case Builtin::BI__builtin___snprintf_chk:
1237  case Builtin::BI__builtin___vsnprintf_chk: {
1238  DiagID = diag::warn_builtin_chk_overflow;
1239  SourceSize = ComputeExplicitObjectSizeArgument(1);
1240  DestinationSize = ComputeExplicitObjectSizeArgument(3);
1241  IsChkVariant = true;
1242  break;
1243  }
1244 
1245  case Builtin::BIstrncat:
1246  case Builtin::BI__builtin_strncat:
1247  case Builtin::BIstrncpy:
1248  case Builtin::BI__builtin_strncpy:
1249  case Builtin::BIstpncpy:
1250  case Builtin::BI__builtin_stpncpy: {
1251  // Whether these functions overflow depends on the runtime strlen of the
1252  // string, not just the buffer size, so emitting the "always overflow"
1253  // diagnostic isn't quite right. We should still diagnose passing a buffer
1254  // size larger than the destination buffer though; this is a runtime abort
1255  // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
1256  DiagID = diag::warn_fortify_source_size_mismatch;
1257  SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1258  DestinationSize = ComputeSizeArgument(0);
1259  break;
1260  }
1261 
1262  case Builtin::BImemcpy:
1263  case Builtin::BI__builtin_memcpy:
1264  case Builtin::BImemmove:
1265  case Builtin::BI__builtin_memmove:
1266  case Builtin::BImemset:
1267  case Builtin::BI__builtin_memset:
1268  case Builtin::BImempcpy:
1269  case Builtin::BI__builtin_mempcpy: {
1270  DiagID = diag::warn_fortify_source_overflow;
1271  SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1272  DestinationSize = ComputeSizeArgument(0);
1273  break;
1274  }
1275  case Builtin::BIsnprintf:
1276  case Builtin::BI__builtin_snprintf:
1277  case Builtin::BIvsnprintf:
1278  case Builtin::BI__builtin_vsnprintf: {
1279  DiagID = diag::warn_fortify_source_size_mismatch;
1280  SourceSize = ComputeExplicitObjectSizeArgument(1);
1281  DestinationSize = ComputeSizeArgument(0);
1282  break;
1283  }
1284  }
1285 
1286  if (!SourceSize || !DestinationSize ||
1287  llvm::APSInt::compareValues(SourceSize.getValue(),
1288  DestinationSize.getValue()) <= 0)
1289  return;
1290 
1291  StringRef FunctionName = GetFunctionName();
1292 
1293  SmallString<16> DestinationStr;
1294  SmallString<16> SourceStr;
1295  DestinationSize->toString(DestinationStr, /*Radix=*/10);
1296  SourceSize->toString(SourceStr, /*Radix=*/10);
1297  DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1298  PDiag(DiagID)
1299  << FunctionName << DestinationStr << SourceStr);
1300 }
1301 
1302 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
1303  Scope::ScopeFlags NeededScopeFlags,
1304  unsigned DiagID) {
1305  // Scopes aren't available during instantiation. Fortunately, builtin
1306  // functions cannot be template args so they cannot be formed through template
1307  // instantiation. Therefore checking once during the parse is sufficient.
1308  if (SemaRef.inTemplateInstantiation())
1309  return false;
1310 
1311  Scope *S = SemaRef.getCurScope();
1312  while (S && !S->isSEHExceptScope())
1313  S = S->getParent();
1314  if (!S || !(S->getFlags() & NeededScopeFlags)) {
1315  auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
1316  SemaRef.Diag(TheCall->getExprLoc(), DiagID)
1317  << DRE->getDecl()->getIdentifier();
1318  return true;
1319  }
1320 
1321  return false;
1322 }
1323 
1324 static inline bool isBlockPointer(Expr *Arg) {
1325  return Arg->getType()->isBlockPointerType();
1326 }
1327 
1328 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
1329 /// void*, which is a requirement of device side enqueue.
1330 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
1331  const BlockPointerType *BPT =
1332  cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
1333  ArrayRef<QualType> Params =
1334  BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
1335  unsigned ArgCounter = 0;
1336  bool IllegalParams = false;
1337  // Iterate through the block parameters until either one is found that is not
1338  // a local void*, or the block is valid.
1339  for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
1340  I != E; ++I, ++ArgCounter) {
1341  if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
1342  (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
1344  // Get the location of the error. If a block literal has been passed
1345  // (BlockExpr) then we can point straight to the offending argument,
1346  // else we just point to the variable reference.
1347  SourceLocation ErrorLoc;
1348  if (isa<BlockExpr>(BlockArg)) {
1349  BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
1350  ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
1351  } else if (isa<DeclRefExpr>(BlockArg)) {
1352  ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
1353  }
1354  S.Diag(ErrorLoc,
1355  diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
1356  IllegalParams = true;
1357  }
1358  }
1359 
1360  return IllegalParams;
1361 }
1362 
1363 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
1364  // OpenCL device can support extension but not the feature as extension
1365  // requires subgroup independent forward progress, but subgroup independent
1366  // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
1367  if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) &&
1368  !S.getOpenCLOptions().isSupported("__opencl_c_subgroups",
1369  S.getLangOpts())) {
1370  S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
1371  << 1 << Call->getDirectCallee()
1372  << "cl_khr_subgroups or __opencl_c_subgroups";
1373  return true;
1374  }
1375  return false;
1376 }
1377 
1379  if (checkArgCount(S, TheCall, 2))
1380  return true;
1381 
1382  if (checkOpenCLSubgroupExt(S, TheCall))
1383  return true;
1384 
1385  // First argument is an ndrange_t type.
1386  Expr *NDRangeArg = TheCall->getArg(0);
1387  if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1388  S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1389  << TheCall->getDirectCallee() << "'ndrange_t'";
1390  return true;
1391  }
1392 
1393  Expr *BlockArg = TheCall->getArg(1);
1394  if (!isBlockPointer(BlockArg)) {
1395  S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1396  << TheCall->getDirectCallee() << "block";
1397  return true;
1398  }
1399  return checkOpenCLBlockArgs(S, BlockArg);
1400 }
1401 
1402 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
1403 /// get_kernel_work_group_size
1404 /// and get_kernel_preferred_work_group_size_multiple builtin functions.
1406  if (checkArgCount(S, TheCall, 1))
1407  return true;
1408 
1409  Expr *BlockArg = TheCall->getArg(0);
1410  if (!isBlockPointer(BlockArg)) {
1411  S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1412  << TheCall->getDirectCallee() << "block";
1413  return true;
1414  }
1415  return checkOpenCLBlockArgs(S, BlockArg);
1416 }
1417 
1418 /// Diagnose integer type and any valid implicit conversion to it.
1419 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
1420  const QualType &IntType);
1421 
1423  unsigned Start, unsigned End) {
1424  bool IllegalParams = false;
1425  for (unsigned I = Start; I <= End; ++I)
1426  IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
1427  S.Context.getSizeType());
1428  return IllegalParams;
1429 }
1430 
1431 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
1432 /// 'local void*' parameter of passed block.
1434  Expr *BlockArg,
1435  unsigned NumNonVarArgs) {
1436  const BlockPointerType *BPT =
1437  cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
1438  unsigned NumBlockParams =
1439  BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
1440  unsigned TotalNumArgs = TheCall->getNumArgs();
1441 
1442  // For each argument passed to the block, a corresponding uint needs to
1443  // be passed to describe the size of the local memory.
1444  if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
1445  S.Diag(TheCall->getBeginLoc(),
1446  diag::err_opencl_enqueue_kernel_local_size_args);
1447  return true;
1448  }
1449 
1450  // Check that the sizes of the local memory are specified by integers.
1451  return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
1452  TotalNumArgs - 1);
1453 }
1454 
1455 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
1456 /// overload formats specified in Table 6.13.17.1.
1457 /// int enqueue_kernel(queue_t queue,
1458 /// kernel_enqueue_flags_t flags,
1459 /// const ndrange_t ndrange,
1460 /// void (^block)(void))
1461 /// int enqueue_kernel(queue_t queue,
1462 /// kernel_enqueue_flags_t flags,
1463 /// const ndrange_t ndrange,
1464 /// uint num_events_in_wait_list,
1465 /// clk_event_t *event_wait_list,
1466 /// clk_event_t *event_ret,
1467 /// void (^block)(void))
1468 /// int enqueue_kernel(queue_t queue,
1469 /// kernel_enqueue_flags_t flags,
1470 /// const ndrange_t ndrange,
1471 /// void (^block)(local void*, ...),
1472 /// uint size0, ...)
1473 /// int enqueue_kernel(queue_t queue,
1474 /// kernel_enqueue_flags_t flags,
1475 /// const ndrange_t ndrange,
1476 /// uint num_events_in_wait_list,
1477 /// clk_event_t *event_wait_list,
1478 /// clk_event_t *event_ret,
1479 /// void (^block)(local void*, ...),
1480 /// uint size0, ...)
1481 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
1482  unsigned NumArgs = TheCall->getNumArgs();
1483 
1484  if (NumArgs < 4) {
1485  S.Diag(TheCall->getBeginLoc(),
1486  diag::err_typecheck_call_too_few_args_at_least)
1487  << 0 << 4 << NumArgs;
1488  return true;
1489  }
1490 
1491  Expr *Arg0 = TheCall->getArg(0);
1492  Expr *Arg1 = TheCall->getArg(1);
1493  Expr *Arg2 = TheCall->getArg(2);
1494  Expr *Arg3 = TheCall->getArg(3);
1495 
1496  // First argument always needs to be a queue_t type.
1497  if (!Arg0->getType()->isQueueT()) {
1498  S.Diag(TheCall->getArg(0)->getBeginLoc(),
1499  diag::err_opencl_builtin_expected_type)
1500  << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
1501  return true;
1502  }
1503 
1504  // Second argument always needs to be a kernel_enqueue_flags_t enum value.
1505  if (!Arg1->getType()->isIntegerType()) {
1506  S.Diag(TheCall->getArg(1)->getBeginLoc(),
1507  diag::err_opencl_builtin_expected_type)
1508  << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
1509  return true;
1510  }
1511 
1512  // Third argument is always an ndrange_t type.
1513  if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1514  S.Diag(TheCall->getArg(2)->getBeginLoc(),
1515  diag::err_opencl_builtin_expected_type)
1516  << TheCall->getDirectCallee() << "'ndrange_t'";
1517  return true;
1518  }
1519 
1520  // With four arguments, there is only one form that the function could be
1521  // called in: no events and no variable arguments.
1522  if (NumArgs == 4) {
1523  // check that the last argument is the right block type.
1524  if (!isBlockPointer(Arg3)) {
1525  S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1526  << TheCall->getDirectCallee() << "block";
1527  return true;
1528  }
1529  // we have a block type, check the prototype
1530  const BlockPointerType *BPT =
1531  cast<BlockPointerType>(Arg3->getType().getCanonicalType());
1532  if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1533  S.Diag(Arg3->getBeginLoc(),
1534  diag::err_opencl_enqueue_kernel_blocks_no_args);
1535  return true;
1536  }
1537  return false;
1538  }
1539  // we can have block + varargs.
1540  if (isBlockPointer(Arg3))
1541  return (checkOpenCLBlockArgs(S, Arg3) ||
1542  checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
1543  // last two cases with either exactly 7 args or 7 args and varargs.
1544  if (NumArgs >= 7) {
1545  // check common block argument.
1546  Expr *Arg6 = TheCall->getArg(6);
1547  if (!isBlockPointer(Arg6)) {
1548  S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1549  << TheCall->getDirectCallee() << "block";
1550  return true;
1551  }
1552  if (checkOpenCLBlockArgs(S, Arg6))
1553  return true;
1554 
1555  // Forth argument has to be any integer type.
1556  if (!Arg3->getType()->isIntegerType()) {
1557  S.Diag(TheCall->getArg(3)->getBeginLoc(),
1558  diag::err_opencl_builtin_expected_type)
1559  << TheCall->getDirectCallee() << "integer";
1560  return true;
1561  }
1562  // check remaining common arguments.
1563  Expr *Arg4 = TheCall->getArg(4);
1564  Expr *Arg5 = TheCall->getArg(5);
1565 
1566  // Fifth argument is always passed as a pointer to clk_event_t.
1567  if (!Arg4->isNullPointerConstant(S.Context,
1570  S.Diag(TheCall->getArg(4)->getBeginLoc(),
1571  diag::err_opencl_builtin_expected_type)
1572  << TheCall->getDirectCallee()
1574  return true;
1575  }
1576 
1577  // Sixth argument is always passed as a pointer to clk_event_t.
1578  if (!Arg5->isNullPointerConstant(S.Context,
1580  !(Arg5->getType()->isPointerType() &&
1581  Arg5->getType()->getPointeeType()->isClkEventT())) {
1582  S.Diag(TheCall->getArg(5)->getBeginLoc(),
1583  diag::err_opencl_builtin_expected_type)
1584  << TheCall->getDirectCallee()
1586  return true;
1587  }
1588 
1589  if (NumArgs == 7)
1590  return false;
1591 
1592  return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
1593  }
1594 
1595  // None of the specific case has been detected, give generic error
1596  S.Diag(TheCall->getBeginLoc(),
1597  diag::err_opencl_enqueue_kernel_incorrect_args);
1598  return true;
1599 }
1600 
1601 /// Returns OpenCL access qual.
1602 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1603  return D->getAttr<OpenCLAccessAttr>();
1604 }
1605 
1606 /// Returns true if pipe element type is different from the pointer.
1607 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
1608  const Expr *Arg0 = Call->getArg(0);
1609  // First argument type should always be pipe.
1610  if (!Arg0->getType()->isPipeType()) {
1611  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1612  << Call->getDirectCallee() << Arg0->getSourceRange();
1613  return true;
1614  }
1615  OpenCLAccessAttr *AccessQual =
1616  getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1617  // Validates the access qualifier is compatible with the call.
1618  // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1619  // read_only and write_only, and assumed to be read_only if no qualifier is
1620  // specified.
1621  switch (Call->getDirectCallee()->getBuiltinID()) {
1622  case Builtin::BIread_pipe:
1623  case Builtin::BIreserve_read_pipe:
1624  case Builtin::BIcommit_read_pipe:
1625  case Builtin::BIwork_group_reserve_read_pipe:
1626  case Builtin::BIsub_group_reserve_read_pipe:
1627  case Builtin::BIwork_group_commit_read_pipe:
1628  case Builtin::BIsub_group_commit_read_pipe:
1629  if (!(!AccessQual || AccessQual->isReadOnly())) {
1630  S.Diag(Arg0->getBeginLoc(),
1631  diag::err_opencl_builtin_pipe_invalid_access_modifier)
1632  << "read_only" << Arg0->getSourceRange();
1633  return true;
1634  }
1635  break;
1636  case Builtin::BIwrite_pipe:
1637  case Builtin::BIreserve_write_pipe:
1638  case Builtin::BIcommit_write_pipe:
1639  case Builtin::BIwork_group_reserve_write_pipe:
1640  case Builtin::BIsub_group_reserve_write_pipe:
1641  case Builtin::BIwork_group_commit_write_pipe:
1642  case Builtin::BIsub_group_commit_write_pipe:
1643  if (!(AccessQual && AccessQual->isWriteOnly())) {
1644  S.Diag(Arg0->getBeginLoc(),
1645  diag::err_opencl_builtin_pipe_invalid_access_modifier)
1646  << "write_only" << Arg0->getSourceRange();
1647  return true;
1648  }
1649  break;
1650  default:
1651  break;
1652  }
1653  return false;
1654 }
1655 
1656 /// Returns true if pipe element type is different from the pointer.
1657 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1658  const Expr *Arg0 = Call->getArg(0);
1659  const Expr *ArgIdx = Call->getArg(Idx);
1660  const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
1661  const QualType EltTy = PipeTy->getElementType();
1662  const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1663  // The Idx argument should be a pointer and the type of the pointer and
1664  // the type of pipe element should also be the same.
1665  if (!ArgTy ||
1666  !S.Context.hasSameType(
1667  EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1668  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1669  << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1670  << ArgIdx->getType() << ArgIdx->getSourceRange();
1671  return true;
1672  }
1673  return false;
1674 }
1675 
1676 // Performs semantic analysis for the read/write_pipe call.
1677 // \param S Reference to the semantic analyzer.
1678 // \param Call A pointer to the builtin call.
1679 // \return True if a semantic error has been found, false otherwise.
1680 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
1681  // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1682  // functions have two forms.
1683  switch (Call->getNumArgs()) {
1684  case 2:
1685  if (checkOpenCLPipeArg(S, Call))
1686  return true;
1687  // The call with 2 arguments should be
1688  // read/write_pipe(pipe T, T*).
1689  // Check packet type T.
1690  if (checkOpenCLPipePacketType(S, Call, 1))
1691  return true;
1692  break;
1693 
1694  case 4: {
1695  if (checkOpenCLPipeArg(S, Call))
1696  return true;
1697  // The call with 4 arguments should be
1698  // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1699  // Check reserve_id_t.
1700  if (!Call->getArg(1)->getType()->isReserveIDT()) {
1701  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1702  << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1703  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1704  return true;
1705  }
1706 
1707  // Check the index.
1708  const Expr *Arg2 = Call->getArg(2);
1709  if (!Arg2->getType()->isIntegerType() &&
1710  !Arg2->getType()->isUnsignedIntegerType()) {
1711  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1712  << Call->getDirectCallee() << S.Context.UnsignedIntTy
1713  << Arg2->getType() << Arg2->getSourceRange();
1714  return true;
1715  }
1716 
1717  // Check packet type T.
1718  if (checkOpenCLPipePacketType(S, Call, 3))
1719  return true;
1720  } break;
1721  default:
1722  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1723  << Call->getDirectCallee() << Call->getSourceRange();
1724  return true;
1725  }
1726 
1727  return false;
1728 }
1729 
1730 // Performs a semantic analysis on the {work_group_/sub_group_
1731 // /_}reserve_{read/write}_pipe
1732 // \param S Reference to the semantic analyzer.
1733 // \param Call The call to the builtin function to be analyzed.
1734 // \return True if a semantic error was found, false otherwise.
1735 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
1736  if (checkArgCount(S, Call, 2))
1737  return true;
1738 
1739  if (checkOpenCLPipeArg(S, Call))
1740  return true;
1741 
1742  // Check the reserve size.
1743  if (!Call->getArg(1)->getType()->isIntegerType() &&
1744  !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
1745  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1746  << Call->getDirectCallee() << S.Context.UnsignedIntTy
1747  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1748  return true;
1749  }
1750 
1751  // Since return type of reserve_read/write_pipe built-in function is
1752  // reserve_id_t, which is not defined in the builtin def file , we used int
1753  // as return type and need to override the return type of these functions.
1754  Call->setType(S.Context.OCLReserveIDTy);
1755 
1756  return false;
1757 }
1758 
1759 // Performs a semantic analysis on {work_group_/sub_group_
1760 // /_}commit_{read/write}_pipe
1761 // \param S Reference to the semantic analyzer.
1762 // \param Call The call to the builtin function to be analyzed.
1763 // \return True if a semantic error was found, false otherwise.
1764 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1765  if (checkArgCount(S, Call, 2))
1766  return true;
1767 
1768  if (checkOpenCLPipeArg(S, Call))
1769  return true;
1770 
1771  // Check reserve_id_t.
1772  if (!Call->getArg(1)->getType()->isReserveIDT()) {
1773  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1774  << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1775  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1776  return true;
1777  }
1778 
1779  return false;
1780 }
1781 
1782 // Performs a semantic analysis on the call to built-in Pipe
1783 // Query Functions.
1784 // \param S Reference to the semantic analyzer.
1785 // \param Call The call to the builtin function to be analyzed.
1786 // \return True if a semantic error was found, false otherwise.
1787 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
1788  if (checkArgCount(S, Call, 1))
1789  return true;
1790 
1791  if (!Call->getArg(0)->getType()->isPipeType()) {
1792  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1793  << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1794  return true;
1795  }
1796 
1797  return false;
1798 }
1799 
1800 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1801 // Performs semantic analysis for the to_global/local/private call.
1802 // \param S Reference to the semantic analyzer.
1803 // \param BuiltinID ID of the builtin function.
1804 // \param Call A pointer to the builtin call.
1805 // \return True if a semantic error has been found, false otherwise.
1806 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
1807  CallExpr *Call) {
1808  if (checkArgCount(S, Call, 1))
1809  return true;
1810 
1811  auto RT = Call->getArg(0)->getType();
1812  if (!RT->isPointerType() || RT->getPointeeType()
1813  .getAddressSpace() == LangAS::opencl_constant) {
1814  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1815  << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1816  return true;
1817  }
1818 
1819  if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1820  S.Diag(Call->getArg(0)->getBeginLoc(),
1821  diag::warn_opencl_generic_address_space_arg)
1822  << Call->getDirectCallee()->getNameInfo().getAsString()
1823  << Call->getArg(0)->getSourceRange();
1824  }
1825 
1826  RT = RT->getPointeeType();
1827  auto Qual = RT.getQualifiers();
1828  switch (BuiltinID) {
1829  case Builtin::BIto_global:
1830  Qual.setAddressSpace(LangAS::opencl_global);
1831  break;
1832  case Builtin::BIto_local:
1833  Qual.setAddressSpace(LangAS::opencl_local);
1834  break;
1835  case Builtin::BIto_private:
1836  Qual.setAddressSpace(LangAS::opencl_private);
1837  break;
1838  default:
1839  llvm_unreachable("Invalid builtin function");
1840  }
1841  Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
1842  RT.getUnqualifiedType(), Qual)));
1843 
1844  return false;
1845 }
1846 
1848  if (checkArgCount(S, TheCall, 1))
1849  return ExprError();
1850 
1851  // Compute __builtin_launder's parameter type from the argument.
1852  // The parameter type is:
1853  // * The type of the argument if it's not an array or function type,
1854  // Otherwise,
1855  // * The decayed argument type.
1856  QualType ParamTy = [&]() {
1857  QualType ArgTy = TheCall->getArg(0)->getType();
1858  if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1859  return S.Context.getPointerType(Ty->getElementType());
1860  if (ArgTy->isFunctionType()) {
1861  return S.Context.getPointerType(ArgTy);
1862  }
1863  return ArgTy;
1864  }();
1865 
1866  TheCall->setType(ParamTy);
1867 
1868  auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
1869  if (!ParamTy->isPointerType())
1870  return 0;
1871  if (ParamTy->isFunctionPointerType())
1872  return 1;
1873  if (ParamTy->isVoidPointerType())
1874  return 2;
1875  return llvm::Optional<unsigned>{};
1876  }();
1877  if (DiagSelect.hasValue()) {
1878  S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
1879  << DiagSelect.getValue() << TheCall->getSourceRange();
1880  return ExprError();
1881  }
1882 
1883  // We either have an incomplete class type, or we have a class template
1884  // whose instantiation has not been forced. Example:
1885  //
1886  // template <class T> struct Foo { T value; };
1887  // Foo<int> *p = nullptr;
1888  // auto *d = __builtin_launder(p);
1889  if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
1890  diag::err_incomplete_type))
1891  return ExprError();
1892 
1893  assert(ParamTy->getPointeeType()->isObjectType() &&
1894  "Unhandled non-object pointer case");
1895 
1896  InitializedEntity Entity =
1898  ExprResult Arg =
1899  S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
1900  if (Arg.isInvalid())
1901  return ExprError();
1902  TheCall->setArg(0, Arg.get());
1903 
1904  return TheCall;
1905 }
1906 
1907 // Emit an error and return true if the current object format type is in the
1908 // list of unsupported types.
1910  Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1911  ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) {
1912  llvm::Triple::ObjectFormatType CurObjFormat =
1913  S.getASTContext().getTargetInfo().getTriple().getObjectFormat();
1914  if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) {
1915  S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1916  << TheCall->getSourceRange();
1917  return true;
1918  }
1919  return false;
1920 }
1921 
1922 // Emit an error and return true if the current architecture is not in the list
1923 // of supported architectures.
1924 static bool
1925 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1926  ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
1927  llvm::Triple::ArchType CurArch =
1928  S.getASTContext().getTargetInfo().getTriple().getArch();
1929  if (llvm::is_contained(SupportedArchs, CurArch))
1930  return false;
1931  S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1932  << TheCall->getSourceRange();
1933  return true;
1934 }
1935 
1936 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
1937  SourceLocation CallSiteLoc);
1938 
1939 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
1940  CallExpr *TheCall) {
1941  switch (TI.getTriple().getArch()) {
1942  default:
1943  // Some builtins don't require additional checking, so just consider these
1944  // acceptable.
1945  return false;
1946  case llvm::Triple::arm:
1947  case llvm::Triple::armeb:
1948  case llvm::Triple::thumb:
1949  case llvm::Triple::thumbeb:
1950  return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
1951  case llvm::Triple::aarch64:
1952  case llvm::Triple::aarch64_32:
1953  case llvm::Triple::aarch64_be:
1954  return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
1955  case llvm::Triple::bpfeb:
1956  case llvm::Triple::bpfel:
1957  return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
1958  case llvm::Triple::hexagon:
1959  return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
1960  case llvm::Triple::mips:
1961  case llvm::Triple::mipsel:
1962  case llvm::Triple::mips64:
1963  case llvm::Triple::mips64el:
1964  return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
1965  case llvm::Triple::systemz:
1966  return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
1967  case llvm::Triple::x86:
1968  case llvm::Triple::x86_64:
1969  return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
1970  case llvm::Triple::ppc:
1971  case llvm::Triple::ppcle:
1972  case llvm::Triple::ppc64:
1973  case llvm::Triple::ppc64le:
1974  return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
1975  case llvm::Triple::amdgcn:
1976  return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
1977  case llvm::Triple::riscv32:
1978  case llvm::Triple::riscv64:
1979  return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
1980  }
1981 }
1982 
1983 ExprResult
1984 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
1985  CallExpr *TheCall) {
1986  ExprResult TheCallResult(TheCall);
1987 
1988  // Find out if any arguments are required to be integer constant expressions.
1989  unsigned ICEArguments = 0;
1991  Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
1992  if (Error != ASTContext::GE_None)
1993  ICEArguments = 0; // Don't diagnose previously diagnosed errors.
1994 
1995  // If any arguments are required to be ICE's, check and diagnose.
1996  for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
1997  // Skip arguments not required to be ICE's.
1998  if ((ICEArguments & (1 << ArgNo)) == 0) continue;
1999 
2000  llvm::APSInt Result;
2001  // If we don't have enough arguments, continue so we can issue better
2002  // diagnostic in checkArgCount(...)
2003  if (ArgNo < TheCall->getNumArgs() &&
2004  SemaBuiltinConstantArg(TheCall, ArgNo, Result))
2005  return true;
2006  ICEArguments &= ~(1 << ArgNo);
2007  }
2008 
2009  switch (BuiltinID) {
2010  case Builtin::BI__builtin___CFStringMakeConstantString:
2011  // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
2012  // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
2014  *this, BuiltinID, TheCall,
2015  {llvm::Triple::GOFF, llvm::Triple::XCOFF}))
2016  return ExprError();
2017  assert(TheCall->getNumArgs() == 1 &&
2018  "Wrong # arguments to builtin CFStringMakeConstantString");
2019  if (CheckObjCString(TheCall->getArg(0)))
2020  return ExprError();
2021  break;
2022  case Builtin::BI__builtin_ms_va_start:
2023  case Builtin::BI__builtin_stdarg_start:
2024  case Builtin::BI__builtin_va_start:
2025  if (SemaBuiltinVAStart(BuiltinID, TheCall))
2026  return ExprError();
2027  break;
2028  case Builtin::BI__va_start: {
2029  switch (Context.getTargetInfo().getTriple().getArch()) {
2030  case llvm::Triple::aarch64:
2031  case llvm::Triple::arm:
2032  case llvm::Triple::thumb:
2033  if (SemaBuiltinVAStartARMMicrosoft(TheCall))
2034  return ExprError();
2035  break;
2036  default:
2037  if (SemaBuiltinVAStart(BuiltinID, TheCall))
2038  return ExprError();
2039  break;
2040  }
2041  break;
2042  }
2043 
2044  // The acquire, release, and no fence variants are ARM and AArch64 only.
2045  case Builtin::BI_interlockedbittestandset_acq:
2046  case Builtin::BI_interlockedbittestandset_rel:
2047  case Builtin::BI_interlockedbittestandset_nf:
2048  case Builtin::BI_interlockedbittestandreset_acq:
2049  case Builtin::BI_interlockedbittestandreset_rel:
2050  case Builtin::BI_interlockedbittestandreset_nf:
2052  *this, BuiltinID, TheCall,
2053  {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
2054  return ExprError();
2055  break;
2056 
2057  // The 64-bit bittest variants are x64, ARM, and AArch64 only.
2058  case Builtin::BI_bittest64:
2059  case Builtin::BI_bittestandcomplement64:
2060  case Builtin::BI_bittestandreset64:
2061  case Builtin::BI_bittestandset64:
2062  case Builtin::BI_interlockedbittestandreset64:
2063  case Builtin::BI_interlockedbittestandset64:
2064  if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
2065  {llvm::Triple::x86_64, llvm::Triple::arm,
2066  llvm::Triple::thumb,
2067  llvm::Triple::aarch64}))
2068  return ExprError();
2069  break;
2070 
2071  case Builtin::BI__builtin_isgreater:
2072  case Builtin::BI__builtin_isgreaterequal:
2073  case Builtin::BI__builtin_isless:
2074  case Builtin::BI__builtin_islessequal:
2075  case Builtin::BI__builtin_islessgreater:
2076  case Builtin::BI__builtin_isunordered:
2077  if (SemaBuiltinUnorderedCompare(TheCall))
2078  return ExprError();
2079  break;
2080  case Builtin::BI__builtin_fpclassify:
2081  if (SemaBuiltinFPClassification(TheCall, 6))
2082  return ExprError();
2083  break;
2084  case Builtin::BI__builtin_isfinite:
2085  case Builtin::BI__builtin_isinf:
2086  case Builtin::BI__builtin_isinf_sign:
2087  case Builtin::BI__builtin_isnan:
2088  case Builtin::BI__builtin_isnormal:
2089  case Builtin::BI__builtin_signbit:
2090  case Builtin::BI__builtin_signbitf:
2091  case Builtin::BI__builtin_signbitl:
2092  if (SemaBuiltinFPClassification(TheCall, 1))
2093  return ExprError();
2094  break;
2095  case Builtin::BI__builtin_shufflevector:
2096  return SemaBuiltinShuffleVector(TheCall);
2097  // TheCall will be freed by the smart pointer here, but that's fine, since
2098  // SemaBuiltinShuffleVector guts it, but then doesn't release it.
2099  case Builtin::BI__builtin_prefetch:
2100  if (SemaBuiltinPrefetch(TheCall))
2101  return ExprError();
2102  break;
2103  case Builtin::BI__builtin_alloca_with_align:
2104  case Builtin::BI__builtin_alloca_with_align_uninitialized:
2105  if (SemaBuiltinAllocaWithAlign(TheCall))
2106  return ExprError();
2107  LLVM_FALLTHROUGH;
2108  case Builtin::BI__builtin_alloca:
2109  case Builtin::BI__builtin_alloca_uninitialized:
2110  Diag(TheCall->getBeginLoc(), diag::warn_alloca)
2111  << TheCall->getDirectCallee();
2112  break;
2113  case Builtin::BI__arithmetic_fence:
2114  if (SemaBuiltinArithmeticFence(TheCall))
2115  return ExprError();
2116  break;
2117  case Builtin::BI__assume:
2118  case Builtin::BI__builtin_assume:
2119  if (SemaBuiltinAssume(TheCall))
2120  return ExprError();
2121  break;
2122  case Builtin::BI__builtin_assume_aligned:
2123  if (SemaBuiltinAssumeAligned(TheCall))
2124  return ExprError();
2125  break;
2126  case Builtin::BI__builtin_dynamic_object_size:
2127  case Builtin::BI__builtin_object_size:
2128  if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
2129  return ExprError();
2130  break;
2131  case Builtin::BI__builtin_longjmp:
2132  if (SemaBuiltinLongjmp(TheCall))
2133  return ExprError();
2134  break;
2135  case Builtin::BI__builtin_setjmp:
2136  if (SemaBuiltinSetjmp(TheCall))
2137  return ExprError();
2138  break;
2139  case Builtin::BI__builtin_classify_type:
2140  if (checkArgCount(*this, TheCall, 1)) return true;
2141  TheCall->setType(Context.IntTy);
2142  break;
2143  case Builtin::BI__builtin_complex:
2144  if (SemaBuiltinComplex(TheCall))
2145  return ExprError();
2146  break;
2147  case Builtin::BI__builtin_constant_p: {
2148  if (checkArgCount(*this, TheCall, 1)) return true;
2149  ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
2150  if (Arg.isInvalid()) return true;
2151  TheCall->setArg(0, Arg.get());
2152  TheCall->setType(Context.IntTy);
2153  break;
2154  }
2155  case Builtin::BI__builtin_launder:
2156  return SemaBuiltinLaunder(*this, TheCall);
2157  case Builtin::BI__sync_fetch_and_add:
2158  case Builtin::BI__sync_fetch_and_add_1:
2159  case Builtin::BI__sync_fetch_and_add_2:
2160  case Builtin::BI__sync_fetch_and_add_4:
2161  case Builtin::BI__sync_fetch_and_add_8:
2162  case Builtin::BI__sync_fetch_and_add_16:
2163  case Builtin::BI__sync_fetch_and_sub:
2164  case Builtin::BI__sync_fetch_and_sub_1:
2165  case Builtin::BI__sync_fetch_and_sub_2:
2166  case Builtin::BI__sync_fetch_and_sub_4:
2167  case Builtin::BI__sync_fetch_and_sub_8:
2168  case Builtin::BI__sync_fetch_and_sub_16:
2169  case Builtin::BI__sync_fetch_and_or:
2170  case Builtin::BI__sync_fetch_and_or_1:
2171  case Builtin::BI__sync_fetch_and_or_2:
2172  case Builtin::BI__sync_fetch_and_or_4:
2173  case Builtin::BI__sync_fetch_and_or_8:
2174  case Builtin::BI__sync_fetch_and_or_16:
2175  case Builtin::BI__sync_fetch_and_and:
2176  case Builtin::BI__sync_fetch_and_and_1:
2177  case Builtin::BI__sync_fetch_and_and_2:
2178  case Builtin::BI__sync_fetch_and_and_4:
2179  case Builtin::BI__sync_fetch_and_and_8:
2180  case Builtin::BI__sync_fetch_and_and_16:
2181  case Builtin::BI__sync_fetch_and_xor:
2182  case Builtin::BI__sync_fetch_and_xor_1:
2183  case Builtin::BI__sync_fetch_and_xor_2:
2184  case Builtin::BI__sync_fetch_and_xor_4:
2185  case Builtin::BI__sync_fetch_and_xor_8:
2186  case Builtin::BI__sync_fetch_and_xor_16:
2187  case Builtin::BI__sync_fetch_and_nand:
2188  case Builtin::BI__sync_fetch_and_nand_1:
2189  case Builtin::BI__sync_fetch_and_nand_2:
2190  case Builtin::BI__sync_fetch_and_nand_4:
2191  case Builtin::BI__sync_fetch_and_nand_8:
2192  case Builtin::BI__sync_fetch_and_nand_16:
2193  case Builtin::BI__sync_add_and_fetch:
2194  case Builtin::BI__sync_add_and_fetch_1:
2195  case Builtin::BI__sync_add_and_fetch_2:
2196  case Builtin::BI__sync_add_and_fetch_4:
2197  case Builtin::BI__sync_add_and_fetch_8:
2198  case Builtin::BI__sync_add_and_fetch_16:
2199  case Builtin::BI__sync_sub_and_fetch:
2200  case Builtin::BI__sync_sub_and_fetch_1:
2201  case Builtin::BI__sync_sub_and_fetch_2:
2202  case Builtin::BI__sync_sub_and_fetch_4:
2203  case Builtin::BI__sync_sub_and_fetch_8:
2204  case Builtin::BI__sync_sub_and_fetch_16:
2205  case Builtin::BI__sync_and_and_fetch:
2206  case Builtin::BI__sync_and_and_fetch_1:
2207  case Builtin::BI__sync_and_and_fetch_2:
2208  case Builtin::BI__sync_and_and_fetch_4:
2209  case Builtin::BI__sync_and_and_fetch_8:
2210  case Builtin::BI__sync_and_and_fetch_16:
2211  case Builtin::BI__sync_or_and_fetch:
2212  case Builtin::BI__sync_or_and_fetch_1:
2213  case Builtin::BI__sync_or_and_fetch_2:
2214  case Builtin::BI__sync_or_and_fetch_4:
2215  case Builtin::BI__sync_or_and_fetch_8:
2216  case Builtin::BI__sync_or_and_fetch_16:
2217  case Builtin::BI__sync_xor_and_fetch:
2218  case Builtin::BI__sync_xor_and_fetch_1:
2219  case Builtin::BI__sync_xor_and_fetch_2:
2220  case Builtin::BI__sync_xor_and_fetch_4:
2221  case Builtin::BI__sync_xor_and_fetch_8:
2222  case Builtin::BI__sync_xor_and_fetch_16:
2223  case Builtin::BI__sync_nand_and_fetch:
2224  case Builtin::BI__sync_nand_and_fetch_1:
2225  case Builtin::BI__sync_nand_and_fetch_2:
2226  case Builtin::BI__sync_nand_and_fetch_4:
2227  case Builtin::BI__sync_nand_and_fetch_8:
2228  case Builtin::BI__sync_nand_and_fetch_16:
2229  case Builtin::BI__sync_val_compare_and_swap:
2230  case Builtin::BI__sync_val_compare_and_swap_1:
2231  case Builtin::BI__sync_val_compare_and_swap_2:
2232  case Builtin::BI__sync_val_compare_and_swap_4:
2233  case Builtin::BI__sync_val_compare_and_swap_8:
2234  case Builtin::BI__sync_val_compare_and_swap_16:
2235  case Builtin::BI__sync_bool_compare_and_swap:
2236  case Builtin::BI__sync_bool_compare_and_swap_1:
2237  case Builtin::BI__sync_bool_compare_and_swap_2:
2238  case Builtin::BI__sync_bool_compare_and_swap_4:
2239  case Builtin::BI__sync_bool_compare_and_swap_8:
2240  case Builtin::BI__sync_bool_compare_and_swap_16:
2241  case Builtin::BI__sync_lock_test_and_set:
2242  case Builtin::BI__sync_lock_test_and_set_1:
2243  case Builtin::BI__sync_lock_test_and_set_2:
2244  case Builtin::BI__sync_lock_test_and_set_4:
2245  case Builtin::BI__sync_lock_test_and_set_8:
2246  case Builtin::BI__sync_lock_test_and_set_16:
2247  case Builtin::BI__sync_lock_release:
2248  case Builtin::BI__sync_lock_release_1:
2249  case Builtin::BI__sync_lock_release_2:
2250  case Builtin::BI__sync_lock_release_4:
2251  case Builtin::BI__sync_lock_release_8:
2252  case Builtin::BI__sync_lock_release_16:
2253  case Builtin::BI__sync_swap:
2254  case Builtin::BI__sync_swap_1:
2255  case Builtin::BI__sync_swap_2:
2256  case Builtin::BI__sync_swap_4:
2257  case Builtin::BI__sync_swap_8:
2258  case Builtin::BI__sync_swap_16:
2259  return SemaBuiltinAtomicOverloaded(TheCallResult);
2260  case Builtin::BI__sync_synchronize:
2261  Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
2262  << TheCall->getCallee()->getSourceRange();
2263  break;
2264  case Builtin::BI__builtin_nontemporal_load:
2265  case Builtin::BI__builtin_nontemporal_store:
2266  return SemaBuiltinNontemporalOverloaded(TheCallResult);
2267  case Builtin::BI__builtin_memcpy_inline: {
2268  if (checkArgCount(*this, TheCall, 3))
2269  return ExprError();
2270  auto ArgArrayConversionFailed = [&](unsigned Arg) {
2271  ExprResult ArgExpr =
2272  DefaultFunctionArrayLvalueConversion(TheCall->getArg(Arg));
2273  if (ArgExpr.isInvalid())
2274  return true;
2275  TheCall->setArg(Arg, ArgExpr.get());
2276  return false;
2277  };
2278 
2279  if (ArgArrayConversionFailed(0) || ArgArrayConversionFailed(1))
2280  return true;
2281  clang::Expr *SizeOp = TheCall->getArg(2);
2282  // We warn about copying to or from `nullptr` pointers when `size` is
2283  // greater than 0. When `size` is value dependent we cannot evaluate its
2284  // value so we bail out.
2285  if (SizeOp->isValueDependent())
2286  break;
2287  if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) {
2288  CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
2289  CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
2290  }
2291  break;
2292  }
2293 #define BUILTIN(ID, TYPE, ATTRS)
2294 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
2295  case Builtin::BI##ID: \
2296  return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
2297 #include "clang/Basic/Builtins.def"
2298  case Builtin::BI__annotation:
2299  if (SemaBuiltinMSVCAnnotation(*this, TheCall))
2300  return ExprError();
2301  break;
2302  case Builtin::BI__builtin_annotation:
2303  if (SemaBuiltinAnnotation(*this, TheCall))
2304  return ExprError();
2305  break;
2306  case Builtin::BI__builtin_addressof:
2307  if (SemaBuiltinAddressof(*this, TheCall))
2308  return ExprError();
2309  break;
2310  case Builtin::BI__builtin_function_start:
2311  if (SemaBuiltinFunctionStart(*this, TheCall))
2312  return ExprError();
2313  break;
2314  case Builtin::BI__builtin_is_aligned:
2315  case Builtin::BI__builtin_align_up:
2316  case Builtin::BI__builtin_align_down:
2317  if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
2318  return ExprError();
2319  break;
2320  case Builtin::BI__builtin_add_overflow:
2321  case Builtin::BI__builtin_sub_overflow:
2322  case Builtin::BI__builtin_mul_overflow:
2323  if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
2324  return ExprError();
2325  break;
2326  case Builtin::BI__builtin_operator_new:
2327  case Builtin::BI__builtin_operator_delete: {
2328  bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
2329  ExprResult Res =
2330  SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
2331  if (Res.isInvalid())
2332  CorrectDelayedTyposInExpr(TheCallResult.get());
2333  return Res;
2334  }
2335  case Builtin::BI__builtin_dump_struct:
2336  return SemaBuiltinDumpStruct(*this, TheCall);
2337  case Builtin::BI__builtin_expect_with_probability: {
2338  // We first want to ensure we are called with 3 arguments
2339  if (checkArgCount(*this, TheCall, 3))
2340  return ExprError();
2341  // then check probability is constant float in range [0.0, 1.0]
2342  const Expr *ProbArg = TheCall->getArg(2);
2344  Expr::EvalResult Eval;
2345  Eval.Diag = &Notes;
2346  if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
2347  !Eval.Val.isFloat()) {
2348  Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
2349  << ProbArg->getSourceRange();
2350  for (const PartialDiagnosticAt &PDiag : Notes)
2351  Diag(PDiag.first, PDiag.second);
2352  return ExprError();
2353  }
2354  llvm::APFloat Probability = Eval.Val.getFloat();
2355  bool LoseInfo = false;
2356  Probability.convert(llvm::APFloat::IEEEdouble(),
2357  llvm::RoundingMode::Dynamic, &LoseInfo);
2358  if (!(Probability >= llvm::APFloat(0.0) &&
2359  Probability <= llvm::APFloat(1.0))) {
2360  Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
2361  << ProbArg->getSourceRange();
2362  return ExprError();
2363  }
2364  break;
2365  }
2366  case Builtin::BI__builtin_preserve_access_index:
2367  if (SemaBuiltinPreserveAI(*this, TheCall))
2368  return ExprError();
2369  break;
2370  case Builtin::BI__builtin_call_with_static_chain:
2371  if (SemaBuiltinCallWithStaticChain(*this, TheCall))
2372  return ExprError();
2373  break;
2374  case Builtin::BI__exception_code:
2375  case Builtin::BI_exception_code:
2376  if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
2377  diag::err_seh___except_block))
2378  return ExprError();
2379  break;
2380  case Builtin::BI__exception_info:
2381  case Builtin::BI_exception_info:
2382  if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
2383  diag::err_seh___except_filter))
2384  return ExprError();
2385  break;
2386  case Builtin::BI__GetExceptionInfo:
2387  if (checkArgCount(*this, TheCall, 1))
2388  return ExprError();
2389 
2390  if (CheckCXXThrowOperand(
2391  TheCall->getBeginLoc(),
2392  Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
2393  TheCall))
2394  return ExprError();
2395 
2396  TheCall->setType(Context.VoidPtrTy);
2397  break;
2398  case Builtin::BIaddressof:
2399  case Builtin::BI__addressof:
2400  case Builtin::BIforward:
2401  case Builtin::BImove:
2402  case Builtin::BImove_if_noexcept:
2403  case Builtin::BIas_const: {
2404  // These are all expected to be of the form
2405  // T &/&&/* f(U &/&&)
2406  // where T and U only differ in qualification.
2407  if (checkArgCount(*this, TheCall, 1))
2408  return ExprError();
2409  QualType Param = FDecl->getParamDecl(0)->getType();
2410  QualType Result = FDecl->getReturnType();
2411  bool ReturnsPointer = BuiltinID == Builtin::BIaddressof ||
2412  BuiltinID == Builtin::BI__addressof;
2413  if (!(Param->isReferenceType() &&
2414  (ReturnsPointer ? Result->isPointerType()
2415  : Result->isReferenceType()) &&
2416  Context.hasSameUnqualifiedType(Param->getPointeeType(),
2417  Result->getPointeeType()))) {
2418  Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported)
2419  << FDecl;
2420  return ExprError();
2421  }
2422  break;
2423  }
2424  // OpenCL v2.0, s6.13.16 - Pipe functions
2425  case Builtin::BIread_pipe:
2426  case Builtin::BIwrite_pipe:
2427  // Since those two functions are declared with var args, we need a semantic
2428  // check for the argument.
2429  if (SemaBuiltinRWPipe(*this, TheCall))
2430  return ExprError();
2431  break;
2432  case Builtin::BIreserve_read_pipe:
2433  case Builtin::BIreserve_write_pipe:
2434  case Builtin::BIwork_group_reserve_read_pipe:
2435  case Builtin::BIwork_group_reserve_write_pipe:
2436  if (SemaBuiltinReserveRWPipe(*this, TheCall))
2437  return ExprError();
2438  break;
2439  case Builtin::BIsub_group_reserve_read_pipe:
2440  case Builtin::BIsub_group_reserve_write_pipe:
2441  if (checkOpenCLSubgroupExt(*this, TheCall) ||
2442  SemaBuiltinReserveRWPipe(*this, TheCall))
2443  return ExprError();
2444  break;
2445  case Builtin::BIcommit_read_pipe:
2446  case Builtin::BIcommit_write_pipe:
2447  case Builtin::BIwork_group_commit_read_pipe:
2448  case Builtin::BIwork_group_commit_write_pipe:
2449  if (SemaBuiltinCommitRWPipe(*this, TheCall))
2450  return ExprError();
2451  break;
2452  case Builtin::BIsub_group_commit_read_pipe:
2453  case Builtin::BIsub_group_commit_write_pipe:
2454  if (checkOpenCLSubgroupExt(*this, TheCall) ||
2455  SemaBuiltinCommitRWPipe(*this, TheCall))
2456  return ExprError();
2457  break;
2458  case Builtin::BIget_pipe_num_packets:
2459  case Builtin::BIget_pipe_max_packets:
2460  if (SemaBuiltinPipePackets(*this, TheCall))
2461  return ExprError();
2462  break;
2463  case Builtin::BIto_global:
2464  case Builtin::BIto_local:
2465  case Builtin::BIto_private:
2466  if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
2467  return ExprError();
2468  break;
2469  // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
2470  case Builtin::BIenqueue_kernel:
2471  if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
2472  return ExprError();
2473  break;
2474  case Builtin::BIget_kernel_work_group_size:
2475  case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2476  if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
2477  return ExprError();
2478  break;
2479  case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2480  case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2481  if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
2482  return ExprError();
2483  break;
2484  case Builtin::BI__builtin_os_log_format:
2485  Cleanup.setExprNeedsCleanups(true);
2486  LLVM_FALLTHROUGH;
2487  case Builtin::BI__builtin_os_log_format_buffer_size:
2488  if (SemaBuiltinOSLogFormat(TheCall))
2489  return ExprError();
2490  break;
2491  case Builtin::BI__builtin_frame_address:
2492  case Builtin::BI__builtin_return_address: {
2493  if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
2494  return ExprError();
2495 
2496  // -Wframe-address warning if non-zero passed to builtin
2497  // return/frame address.
2498  Expr::EvalResult Result;
2499  if (!TheCall->getArg(0)->isValueDependent() &&
2500  TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
2501  Result.Val.getInt() != 0)
2502  Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
2503  << ((BuiltinID == Builtin::BI__builtin_return_address)
2504  ? "__builtin_return_address"
2505  : "__builtin_frame_address")
2506  << TheCall->getSourceRange();
2507  break;
2508  }
2509 
2510  // __builtin_elementwise_abs restricts the element type to signed integers or
2511  // floating point types only.
2512  case Builtin::BI__builtin_elementwise_abs: {
2513  if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
2514  return ExprError();
2515 
2516  QualType ArgTy = TheCall->getArg(0)->getType();
2517  QualType EltTy = ArgTy;
2518 
2519  if (auto *VecTy = EltTy->getAs<VectorType>())
2520  EltTy = VecTy->getElementType();
2521  if (EltTy->isUnsignedIntegerType()) {
2522  Diag(TheCall->getArg(0)->getBeginLoc(),
2523  diag::err_builtin_invalid_arg_type)
2524  << 1 << /* signed integer or float ty*/ 3 << ArgTy;
2525  return ExprError();
2526  }
2527  break;
2528  }
2529 
2530  // These builtins restrict the element type to floating point
2531  // types only.
2532  case Builtin::BI__builtin_elementwise_ceil:
2533  case Builtin::BI__builtin_elementwise_floor:
2534  case Builtin::BI__builtin_elementwise_roundeven:
2535  case Builtin::BI__builtin_elementwise_trunc: {
2536  if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
2537  return ExprError();
2538 
2539  QualType ArgTy = TheCall->getArg(0)->getType();
2540  QualType EltTy = ArgTy;
2541 
2542  if (auto *VecTy = EltTy->getAs<VectorType>())
2543  EltTy = VecTy->getElementType();
2544  if (!EltTy->isFloatingType()) {
2545  Diag(TheCall->getArg(0)->getBeginLoc(),
2546  diag::err_builtin_invalid_arg_type)
2547  << 1 << /* float ty*/ 5 << ArgTy;
2548 
2549  return ExprError();
2550  }
2551  break;
2552  }
2553 
2554  // These builtins restrict the element type to integer
2555  // types only.
2556  case Builtin::BI__builtin_elementwise_add_sat:
2557  case Builtin::BI__builtin_elementwise_sub_sat: {
2558  if (SemaBuiltinElementwiseMath(TheCall))
2559  return ExprError();
2560 
2561  const Expr *Arg = TheCall->getArg(0);
2562  QualType ArgTy = Arg->getType();
2563  QualType EltTy = ArgTy;
2564 
2565  if (auto *VecTy = EltTy->getAs<VectorType>())
2566  EltTy = VecTy->getElementType();
2567 
2568  if (!EltTy->isIntegerType()) {
2569  Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2570  << 1 << /* integer ty */ 6 << ArgTy;
2571  return ExprError();
2572  }
2573  break;
2574  }
2575 
2576  case Builtin::BI__builtin_elementwise_min:
2577  case Builtin::BI__builtin_elementwise_max:
2578  if (SemaBuiltinElementwiseMath(TheCall))
2579  return ExprError();
2580  break;
2581  case Builtin::BI__builtin_reduce_max:
2582  case Builtin::BI__builtin_reduce_min: {
2583  if (PrepareBuiltinReduceMathOneArgCall(TheCall))
2584  return ExprError();
2585 
2586  const Expr *Arg = TheCall->getArg(0);
2587  const auto *TyA = Arg->getType()->getAs<VectorType>();
2588  if (!TyA) {
2589  Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2590  << 1 << /* vector ty*/ 4 << Arg->getType();
2591  return ExprError();
2592  }
2593 
2594  TheCall->setType(TyA->getElementType());
2595  break;
2596  }
2597 
2598  // These builtins support vectors of integers only.
2599  // TODO: ADD/MUL should support floating-point types.
2600  case Builtin::BI__builtin_reduce_add:
2601  case Builtin::BI__builtin_reduce_mul:
2602  case Builtin::BI__builtin_reduce_xor:
2603  case Builtin::BI__builtin_reduce_or:
2604  case Builtin::BI__builtin_reduce_and: {
2605  if (PrepareBuiltinReduceMathOneArgCall(TheCall))
2606  return ExprError();
2607 
2608  const Expr *Arg = TheCall->getArg(0);
2609  const auto *TyA = Arg->getType()->getAs<VectorType>();
2610  if (!TyA || !TyA->getElementType()->isIntegerType()) {
2611  Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2612  << 1 << /* vector of integers */ 6 << Arg->getType();
2613  return ExprError();
2614  }
2615  TheCall->setType(TyA->getElementType());
2616  break;
2617  }
2618 
2619  case Builtin::BI__builtin_matrix_transpose:
2620  return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
2621 
2622  case Builtin::BI__builtin_matrix_column_major_load:
2623  return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
2624 
2625  case Builtin::BI__builtin_matrix_column_major_store:
2626  return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
2627 
2628  case Builtin::BI__builtin_get_device_side_mangled_name: {
2629  auto Check = [](CallExpr *TheCall) {
2630  if (TheCall->getNumArgs() != 1)
2631  return false;
2632  auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
2633  if (!DRE)
2634  return false;
2635  auto *D = DRE->getDecl();
2636  if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
2637  return false;
2638  return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
2639  D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
2640  };
2641  if (!Check(TheCall)) {
2642  Diag(TheCall->getBeginLoc(),
2643  diag::err_hip_invalid_args_builtin_mangled_name);
2644  return ExprError();
2645  }
2646  }
2647  }
2648 
2649  // Since the target specific builtins for each arch overlap, only check those
2650  // of the arch we are compiling for.
2651  if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
2652  if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
2653  assert(Context.getAuxTargetInfo() &&
2654  "Aux Target Builtin, but not an aux target?");
2655 
2656  if (CheckTSBuiltinFunctionCall(
2657  *Context.getAuxTargetInfo(),
2658  Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
2659  return ExprError();
2660  } else {
2661  if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
2662  TheCall))
2663  return ExprError();
2664  }
2665  }
2666 
2667  return TheCallResult;
2668 }
2669 
2670 // Get the valid immediate range for the specified NEON type code.
2671 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
2672  NeonTypeFlags Type(t);
2673  int IsQuad = ForceQuad ? true : Type.isQuad();
2674  switch (Type.getEltType()) {
2675  case NeonTypeFlags::Int8:
2676  case NeonTypeFlags::Poly8:
2677  return shift ? 7 : (8 << IsQuad) - 1;
2678  case NeonTypeFlags::Int16:
2679  case NeonTypeFlags::Poly16:
2680  return shift ? 15 : (4 << IsQuad) - 1;
2681  case NeonTypeFlags::Int32:
2682  return shift ? 31 : (2 << IsQuad) - 1;
2683  case NeonTypeFlags::Int64:
2684  case NeonTypeFlags::Poly64:
2685  return shift ? 63 : (1 << IsQuad) - 1;
2687  return shift ? 127 : (1 << IsQuad) - 1;
2689  assert(!shift && "cannot shift float types!");
2690  return (4 << IsQuad) - 1;
2692  assert(!shift && "cannot shift float types!");
2693  return (2 << IsQuad) - 1;
2695  assert(!shift && "cannot shift float types!");
2696  return (1 << IsQuad) - 1;
2698  assert(!shift && "cannot shift float types!");
2699  return (4 << IsQuad) - 1;
2700  }
2701  llvm_unreachable("Invalid NeonTypeFlag!");
2702 }
2703 
2704 /// getNeonEltType - Return the QualType corresponding to the elements of
2705 /// the vector type specified by the NeonTypeFlags. This is used to check
2706 /// the pointer arguments for Neon load/store intrinsics.
2708  bool IsPolyUnsigned, bool IsInt64Long) {
2709  switch (Flags.getEltType()) {
2710  case NeonTypeFlags::Int8:
2711  return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
2712  case NeonTypeFlags::Int16:
2713  return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
2714  case NeonTypeFlags::Int32:
2715  return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
2716  case NeonTypeFlags::Int64:
2717  if (IsInt64Long)
2718  return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
2719  else
2720  return Flags.isUnsigned() ? Context.UnsignedLongLongTy
2721  : Context.LongLongTy;
2722  case NeonTypeFlags::Poly8:
2723  return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
2724  case NeonTypeFlags::Poly16:
2725  return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
2726  case NeonTypeFlags::Poly64:
2727  if (IsInt64Long)
2728  return Context.UnsignedLongTy;
2729  else
2730  return Context.UnsignedLongLongTy;
2732  break;
2734  return Context.HalfTy;
2736  return Context.FloatTy;
2738  return Context.DoubleTy;
2740  return Context.BFloat16Ty;
2741  }
2742  llvm_unreachable("Invalid NeonTypeFlag!");
2743 }
2744 
2745 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2746  // Range check SVE intrinsics that take immediate values.
2748 
2749  switch (BuiltinID) {
2750  default:
2751  return false;
2752 #define GET_SVE_IMMEDIATE_CHECK
2753 #include "clang/Basic/arm_sve_sema_rangechecks.inc"
2754 #undef GET_SVE_IMMEDIATE_CHECK
2755  }
2756 
2757  // Perform all the immediate checks for this builtin call.
2758  bool HasError = false;
2759  for (auto &I : ImmChecks) {
2760  int ArgNum, CheckTy, ElementSizeInBits;
2761  std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
2762 
2763  typedef bool(*OptionSetCheckFnTy)(int64_t Value);
2764 
2765  // Function that checks whether the operand (ArgNum) is an immediate
2766  // that is one of the predefined values.
2767  auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
2768  int ErrDiag) -> bool {
2769  // We can't check the value of a dependent argument.
2770  Expr *Arg = TheCall->getArg(ArgNum);
2771  if (Arg->isTypeDependent() || Arg->isValueDependent())
2772  return false;
2773 
2774  // Check constant-ness first.
2775  llvm::APSInt Imm;
2776  if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
2777  return true;
2778 
2779  if (!CheckImm(Imm.getSExtValue()))
2780  return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
2781  return false;
2782  };
2783 
2784  switch ((SVETypeFlags::ImmCheckType)CheckTy) {
2785  case SVETypeFlags::ImmCheck0_31:
2786  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
2787  HasError = true;
2788  break;
2789  case SVETypeFlags::ImmCheck0_13:
2790  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
2791  HasError = true;
2792  break;
2793  case SVETypeFlags::ImmCheck1_16:
2794  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
2795  HasError = true;
2796  break;
2797  case SVETypeFlags::ImmCheck0_7:
2798  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
2799  HasError = true;
2800  break;
2801  case SVETypeFlags::ImmCheckExtract:
2802  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2803  (2048 / ElementSizeInBits) - 1))
2804  HasError = true;
2805  break;
2806  case SVETypeFlags::ImmCheckShiftRight:
2807  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
2808  HasError = true;
2809  break;
2810  case SVETypeFlags::ImmCheckShiftRightNarrow:
2811  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
2812  ElementSizeInBits / 2))
2813  HasError = true;
2814  break;
2815  case SVETypeFlags::ImmCheckShiftLeft:
2816  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2817  ElementSizeInBits - 1))
2818  HasError = true;
2819  break;
2820  case SVETypeFlags::ImmCheckLaneIndex:
2821  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2822  (128 / (1 * ElementSizeInBits)) - 1))
2823  HasError = true;
2824  break;
2825  case SVETypeFlags::ImmCheckLaneIndexCompRotate:
2826  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2827  (128 / (2 * ElementSizeInBits)) - 1))
2828  HasError = true;
2829  break;
2830  case SVETypeFlags::ImmCheckLaneIndexDot:
2831  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2832  (128 / (4 * ElementSizeInBits)) - 1))
2833  HasError = true;
2834  break;
2835  case SVETypeFlags::ImmCheckComplexRot90_270:
2836  if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
2837  diag::err_rotation_argument_to_cadd))
2838  HasError = true;
2839  break;
2840  case SVETypeFlags::ImmCheckComplexRotAll90:
2841  if (CheckImmediateInSet(
2842  [](int64_t V) {
2843  return V == 0 || V == 90 || V == 180 || V == 270;
2844  },
2845  diag::err_rotation_argument_to_cmla))
2846  HasError = true;
2847  break;
2848  case SVETypeFlags::ImmCheck0_1:
2849  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
2850  HasError = true;
2851  break;
2852  case SVETypeFlags::ImmCheck0_2:
2853  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
2854  HasError = true;
2855  break;
2856  case SVETypeFlags::ImmCheck0_3:
2857  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
2858  HasError = true;
2859  break;
2860  }
2861  }
2862 
2863  return HasError;
2864 }
2865 
2866 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
2867  unsigned BuiltinID, CallExpr *TheCall) {
2868  llvm::APSInt Result;
2869  uint64_t mask = 0;
2870  unsigned TV = 0;
2871  int PtrArgNum = -1;
2872  bool HasConstPtr = false;
2873  switch (BuiltinID) {
2874 #define GET_NEON_OVERLOAD_CHECK
2875 #include "clang/Basic/arm_neon.inc"
2876 #include "clang/Basic/arm_fp16.inc"
2877 #undef GET_NEON_OVERLOAD_CHECK
2878  }
2879 
2880  // For NEON intrinsics which are overloaded on vector element type, validate
2881  // the immediate which specifies which variant to emit.
2882  unsigned ImmArg = TheCall->getNumArgs()-1;
2883  if (mask) {
2884  if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
2885  return true;
2886 
2887  TV = Result.getLimitedValue(64);
2888  if ((TV > 63) || (mask & (1ULL << TV)) == 0)
2889  return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
2890  << TheCall->getArg(ImmArg)->getSourceRange();
2891  }
2892 
2893  if (PtrArgNum >= 0) {
2894  // Check that pointer arguments have the specified type.
2895  Expr *Arg = TheCall->getArg(PtrArgNum);
2896  if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
2897  Arg = ICE->getSubExpr();
2898  ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
2899  QualType RHSTy = RHS.get()->getType();
2900 
2901  llvm::Triple::ArchType Arch = TI.getTriple().getArch();
2902  bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
2903  Arch == llvm::Triple::aarch64_32 ||
2904  Arch == llvm::Triple::aarch64_be;
2905  bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
2906  QualType EltTy =
2907  getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
2908  if (HasConstPtr)
2909  EltTy = EltTy.withConst();
2910  QualType LHSTy = Context.getPointerType(EltTy);
2911  AssignConvertType ConvTy;
2912  ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
2913  if (RHS.isInvalid())
2914  return true;
2915  if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
2916  RHS.get(), AA_Assigning))
2917  return true;
2918  }
2919 
2920  // For NEON intrinsics which take an immediate value as part of the
2921  // instruction, range check them here.
2922  unsigned i = 0, l = 0, u = 0;
2923  switch (BuiltinID) {
2924  default:
2925  return false;
2926  #define GET_NEON_IMMEDIATE_CHECK
2927  #include "clang/Basic/arm_neon.inc"
2928  #include "clang/Basic/arm_fp16.inc"
2929  #undef GET_NEON_IMMEDIATE_CHECK
2930  }
2931 
2932  return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2933 }
2934 
2935 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2936  switch (BuiltinID) {
2937  default:
2938  return false;
2939  #include "clang/Basic/arm_mve_builtin_sema.inc"
2940  }
2941 }
2942 
2943 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2944  CallExpr *TheCall) {
2945  bool Err = false;
2946  switch (BuiltinID) {
2947  default:
2948  return false;
2949 #include "clang/Basic/arm_cde_builtin_sema.inc"
2950  }
2951 
2952  if (Err)
2953  return true;
2954 
2955  return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
2956 }
2957 
2958 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
2959  const Expr *CoprocArg, bool WantCDE) {
2960  if (isConstantEvaluated())
2961  return false;
2962 
2963  // We can't check the value of a dependent argument.
2964  if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
2965  return false;
2966 
2967  llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
2968  int64_t CoprocNo = CoprocNoAP.getExtValue();
2969  assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
2970 
2971  uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
2972  bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
2973 
2974  if (IsCDECoproc != WantCDE)
2975  return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
2976  << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
2977 
2978  return false;
2979 }
2980 
2981 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
2982  unsigned MaxWidth) {
2983  assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
2984  BuiltinID == ARM::BI__builtin_arm_ldaex ||
2985  BuiltinID == ARM::BI__builtin_arm_strex ||
2986  BuiltinID == ARM::BI__builtin_arm_stlex ||
2987  BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2988  BuiltinID == AArch64::BI__builtin_arm_ldaex ||
2989  BuiltinID == AArch64::BI__builtin_arm_strex ||
2990  BuiltinID == AArch64::BI__builtin_arm_stlex) &&
2991  "unexpected ARM builtin");
2992  bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
2993  BuiltinID == ARM::BI__builtin_arm_ldaex ||
2994  BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2995  BuiltinID == AArch64::BI__builtin_arm_ldaex;
2996 
2997  DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
2998 
2999  // Ensure that we have the proper number of arguments.
3000  if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
3001  return true;
3002 
3003  // Inspect the pointer argument of the atomic builtin. This should always be
3004  // a pointer type, whose element is an integral scalar or pointer type.
3005  // Because it is a pointer type, we don't have to worry about any implicit
3006  // casts here.
3007  Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
3008  ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
3009  if (PointerArgRes.isInvalid())
3010  return true;
3011  PointerArg = PointerArgRes.get();
3012 
3013  const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
3014  if (!pointerType) {
3015  Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
3016  << PointerArg->getType() << PointerArg->getSourceRange();
3017  return true;
3018  }
3019 
3020  // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
3021  // task is to insert the appropriate casts into the AST. First work out just
3022  // what the appropriate type is.
3023  QualType ValType = pointerType->getPointeeType();
3024  QualType AddrType = ValType.getUnqualifiedType().withVolatile();
3025  if (IsLdrex)
3026  AddrType.addConst();
3027 
3028  // Issue a warning if the cast is dodgy.
3029  CastKind CastNeeded = CK_NoOp;
3030  if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
3031  CastNeeded = CK_BitCast;
3032  Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
3033  << PointerArg->getType() << Context.getPointerType(AddrType)
3034  << AA_Passing << PointerArg->getSourceRange();
3035  }
3036 
3037  // Finally, do the cast and replace the argument with the corrected version.
3038  AddrType = Context.getPointerType(AddrType);
3039  PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
3040  if (PointerArgRes.isInvalid())
3041  return true;
3042  PointerArg = PointerArgRes.get();
3043 
3044  TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
3045 
3046  // In general, we allow ints, floats and pointers to be loaded and stored.
3047  if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
3048  !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
3049  Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
3050  << PointerArg->getType() << PointerArg->getSourceRange();
3051  return true;
3052  }
3053 
3054  // But ARM doesn't have instructions to deal with 128-bit versions.
3055  if (Context.getTypeSize(ValType) > MaxWidth) {
3056  assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
3057  Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
3058  << PointerArg->getType() << PointerArg->getSourceRange();
3059  return true;
3060  }
3061 
3062  switch (ValType.getObjCLifetime()) {
3063  case Qualifiers::OCL_None:
3065  // okay
3066  break;
3067 
3068  case Qualifiers::OCL_Weak:
3071  Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
3072  << ValType << PointerArg->getSourceRange();
3073  return true;
3074  }
3075 
3076  if (IsLdrex) {
3077  TheCall->setType(ValType);
3078  return false;
3079  }
3080 
3081  // Initialize the argument to be stored.
3082  ExprResult ValArg = TheCall->getArg(0);
3084  Context, ValType, /*consume*/ false);
3085  ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
3086  if (ValArg.isInvalid())
3087  return true;
3088  TheCall->setArg(0, ValArg.get());
3089 
3090  // __builtin_arm_strex always returns an int. It's marked as such in the .def,
3091  // but the custom checker bypasses all default analysis.
3092  TheCall->setType(Context.IntTy);
3093  return false;
3094 }
3095 
3096 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3097  CallExpr *TheCall) {
3098  if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
3099  BuiltinID == ARM::BI__builtin_arm_ldaex ||
3100  BuiltinID == ARM::BI__builtin_arm_strex ||
3101  BuiltinID == ARM::BI__builtin_arm_stlex) {
3102  return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
3103  }
3104 
3105  if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
3106  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3107  SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
3108  }
3109 
3110  if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
3111  BuiltinID == ARM::BI__builtin_arm_wsr64)
3112  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
3113 
3114  if (BuiltinID == ARM::BI__builtin_arm_rsr ||
3115  BuiltinID == ARM::BI__builtin_arm_rsrp ||
3116  BuiltinID == ARM::BI__builtin_arm_wsr ||
3117  BuiltinID == ARM::BI__builtin_arm_wsrp)
3118  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
3119 
3120  if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
3121  return true;
3122  if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
3123  return true;
3124  if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
3125  return true;
3126 
3127  // For intrinsics which take an immediate value as part of the instruction,
3128  // range check them here.
3129  // FIXME: VFP Intrinsics should error if VFP not present.
3130  switch (BuiltinID) {
3131  default: return false;
3132  case ARM::BI__builtin_arm_ssat:
3133  return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
3134  case ARM::BI__builtin_arm_usat:
3135  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
3136  case ARM::BI__builtin_arm_ssat16:
3137  return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
3138  case ARM::BI__builtin_arm_usat16:
3139  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
3140  case ARM::BI__builtin_arm_vcvtr_f:
3141  case ARM::BI__builtin_arm_vcvtr_d:
3142  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3143  case ARM::BI__builtin_arm_dmb:
3144  case ARM::BI__builtin_arm_dsb:
3145  case ARM::BI__builtin_arm_isb:
3146  case ARM::BI__builtin_arm_dbg:
3147  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
3148  case ARM::BI__builtin_arm_cdp:
3149  case ARM::BI__builtin_arm_cdp2:
3150  case ARM::BI__builtin_arm_mcr:
3151  case ARM::BI__builtin_arm_mcr2:
3152  case ARM::BI__builtin_arm_mrc:
3153  case ARM::BI__builtin_arm_mrc2:
3154  case ARM::BI__builtin_arm_mcrr:
3155  case ARM::BI__builtin_arm_mcrr2:
3156  case ARM::BI__builtin_arm_mrrc:
3157  case ARM::BI__builtin_arm_mrrc2:
3158  case ARM::BI__builtin_arm_ldc:
3159  case ARM::BI__builtin_arm_ldcl:
3160  case ARM::BI__builtin_arm_ldc2:
3161  case ARM::BI__builtin_arm_ldc2l:
3162  case ARM::BI__builtin_arm_stc:
3163  case ARM::BI__builtin_arm_stcl:
3164  case ARM::BI__builtin_arm_stc2:
3165  case ARM::BI__builtin_arm_stc2l:
3166  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
3167  CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
3168  /*WantCDE*/ false);
3169  }
3170 }
3171 
3172 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
3173  unsigned BuiltinID,
3174  CallExpr *TheCall) {
3175  if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3176  BuiltinID == AArch64::BI__builtin_arm_ldaex ||
3177  BuiltinID == AArch64::BI__builtin_arm_strex ||
3178  BuiltinID == AArch64::BI__builtin_arm_stlex) {
3179  return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
3180  }
3181 
3182  if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
3183  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3184  SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
3185  SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
3186  SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
3187  }
3188 
3189  if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
3190  BuiltinID == AArch64::BI__builtin_arm_wsr64)
3191  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
3192 
3193  // Memory Tagging Extensions (MTE) Intrinsics
3194  if (BuiltinID == AArch64::BI__builtin_arm_irg ||
3195  BuiltinID == AArch64::BI__builtin_arm_addg ||
3196  BuiltinID == AArch64::BI__builtin_arm_gmi ||
3197  BuiltinID == AArch64::BI__builtin_arm_ldg ||
3198  BuiltinID == AArch64::BI__builtin_arm_stg ||
3199  BuiltinID == AArch64::BI__builtin_arm_subp) {
3200  return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
3201  }
3202 
3203  if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
3204  BuiltinID == AArch64::BI__builtin_arm_rsrp ||
3205  BuiltinID == AArch64::BI__builtin_arm_wsr ||
3206  BuiltinID == AArch64::BI__builtin_arm_wsrp)
3207  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
3208 
3209  // Only check the valid encoding range. Any constant in this range would be
3210  // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
3211  // an exception for incorrect registers. This matches MSVC behavior.
3212  if (BuiltinID == AArch64::BI_ReadStatusReg ||
3213  BuiltinID == AArch64::BI_WriteStatusReg)
3214  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
3215 
3216  if (BuiltinID == AArch64::BI__getReg)
3217  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
3218 
3219  if (BuiltinID == AArch64::BI__break)
3220  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
3221 
3222  if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
3223  return true;
3224 
3225  if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
3226  return true;
3227 
3228  // For intrinsics which take an immediate value as part of the instruction,
3229  // range check them here.
3230  unsigned i = 0, l = 0, u = 0;
3231  switch (BuiltinID) {
3232  default: return false;
3233  case AArch64::BI__builtin_arm_dmb:
3234  case AArch64::BI__builtin_arm_dsb:
3235  case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
3236  case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
3237  }
3238 
3239  return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
3240 }
3241 
3243  if (Arg->getType()->getAsPlaceholderType())
3244  return false;
3245 
3246  // The first argument needs to be a record field access.
3247  // If it is an array element access, we delay decision
3248  // to BPF backend to check whether the access is a
3249  // field access or not.
3250  return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
3251  isa<MemberExpr>(Arg->IgnoreParens()) ||
3252  isa<ArraySubscriptExpr>(Arg->IgnoreParens()));
3253 }
3254 
3255 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S,
3256  QualType VectorTy, QualType EltTy) {
3257  QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType();
3258  if (!Context.hasSameType(VectorEltTy, EltTy)) {
3259  S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types)
3260  << Call->getSourceRange() << VectorEltTy << EltTy;
3261  return false;
3262  }
3263  return true;
3264 }
3265 
3267  QualType ArgType = Arg->getType();
3268  if (ArgType->getAsPlaceholderType())
3269  return false;
3270 
3271  // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type
3272  // format:
3273  // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
3274  // 2. <type> var;
3275  // __builtin_preserve_type_info(var, flag);
3276  if (!isa<DeclRefExpr>(Arg->IgnoreParens()) &&
3277  !isa<UnaryOperator>(Arg->IgnoreParens()))
3278  return false;
3279 
3280  // Typedef type.
3281  if (ArgType->getAs<TypedefType>())
3282  return true;
3283 
3284  // Record type or Enum type.
3285  const Type *Ty = ArgType->getUnqualifiedDesugaredType();
3286  if (const auto *RT = Ty->getAs<RecordType>()) {
3287  if (!RT->getDecl()->getDeclName().isEmpty())
3288  return true;
3289  } else if (const auto *ET = Ty->getAs<EnumType>()) {
3290  if (!ET->getDecl()->getDeclName().isEmpty())
3291  return true;
3292  }
3293 
3294  return false;
3295 }
3296 
3298  QualType ArgType = Arg->getType();
3299  if (ArgType->getAsPlaceholderType())
3300  return false;
3301 
3302  // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
3303  // format:
3304  // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
3305  // flag);
3306  const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
3307  if (!UO)
3308  return false;
3309 
3310  const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
3311  if (!CE)
3312  return false;
3313  if (CE->getCastKind() != CK_IntegralToPointer &&
3314  CE->getCastKind() != CK_NullToPointer)
3315  return false;
3316 
3317  // The integer must be from an EnumConstantDecl.
3318  const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
3319  if (!DR)
3320  return false;
3321 
3322  const EnumConstantDecl *Enumerator =
3323  dyn_cast<EnumConstantDecl>(DR->getDecl());
3324  if (!Enumerator)
3325  return false;
3326 
3327  // The type must be EnumType.
3328  const Type *Ty = ArgType->getUnqualifiedDesugaredType();
3329  const auto *ET = Ty->getAs<EnumType>();
3330  if (!ET)
3331  return false;
3332 
3333  // The enum value must be supported.
3334  return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
3335 }
3336 
3337 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
3338  CallExpr *TheCall) {
3339  assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
3340  BuiltinID == BPF::BI__builtin_btf_type_id ||
3341  BuiltinID == BPF::BI__builtin_preserve_type_info ||
3342  BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
3343  "unexpected BPF builtin");
3344 
3345  if (checkArgCount(*this, TheCall, 2))
3346  return true;
3347 
3348  // The second argument needs to be a constant int
3349  Expr *Arg = TheCall->getArg(1);
3351  diag::kind kind;
3352  if (!Value) {
3353  if (BuiltinID == BPF::BI__builtin_preserve_field_info)
3354  kind = diag::err_preserve_field_info_not_const;
3355  else if (BuiltinID == BPF::BI__builtin_btf_type_id)
3356  kind = diag::err_btf_type_id_not_const;
3357  else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
3358  kind = diag::err_preserve_type_info_not_const;
3359  else
3360  kind = diag::err_preserve_enum_value_not_const;
3361  Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
3362  return true;
3363  }
3364 
3365  // The first argument
3366  Arg = TheCall->getArg(0);
3367  bool InvalidArg = false;
3368  bool ReturnUnsignedInt = true;
3369  if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
3370  if (!isValidBPFPreserveFieldInfoArg(Arg)) {
3371  InvalidArg = true;
3372  kind = diag::err_preserve_field_info_not_field;
3373  }
3374  } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
3375  if (!isValidBPFPreserveTypeInfoArg(Arg)) {
3376  InvalidArg = true;
3377  kind = diag::err_preserve_type_info_invalid;
3378  }
3379  } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
3380  if (!isValidBPFPreserveEnumValueArg(Arg)) {
3381  InvalidArg = true;
3382  kind = diag::err_preserve_enum_value_invalid;
3383  }
3384  ReturnUnsignedInt = false;
3385  } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
3386  ReturnUnsignedInt = false;
3387  }
3388 
3389  if (InvalidArg) {
3390  Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
3391  return true;
3392  }
3393 
3394  if (ReturnUnsignedInt)
3395  TheCall->setType(Context.UnsignedIntTy);
3396  else
3397  TheCall->setType(Context.UnsignedLongTy);
3398  return false;
3399 }
3400 
3401 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
3402  struct ArgInfo {
3403  uint8_t OpNum;
3404  bool IsSigned;
3405  uint8_t BitWidth;
3406  uint8_t Align;
3407  };
3408  struct BuiltinInfo {
3409  unsigned BuiltinID;
3410  ArgInfo Infos[2];
3411  };
3412 
3413  static BuiltinInfo Infos[] = {
3414  { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
3415  { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
3416  { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
3417  { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
3418  { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
3419  { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
3420  { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
3421  { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
3422  { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
3423  { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
3424  { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
3425 
3426  { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
3427  { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
3428  { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
3429  { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
3430  { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
3431  { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
3432  { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
3433  { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
3434  { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
3435  { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
3436  { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
3437 
3438  { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
3439  { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
3440  { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
3441  { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
3442  { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
3443  { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
3444  { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
3445  { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
3446  { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
3447  { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
3448  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
3449  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
3450  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
3451  { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
3452  { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
3453  { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
3454  { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
3455  { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
3456  { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
3457  { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
3458  { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
3459  { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
3460  { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
3461  { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
3462  { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
3463  { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
3464  { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
3465  { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
3466  { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
3467  { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
3468  { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
3469  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
3470  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
3471  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
3472  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
3473  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
3474  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
3475  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
3476  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
3477  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
3478  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
3479  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
3480  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
3481  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
3482  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
3483  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
3484  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
3485  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
3486  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
3487  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
3488  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
3489  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
3490  {{ 1, false, 6, 0 }} },
3491  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
3492  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
3493  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
3494  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
3495  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
3496  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
3497  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
3498  {{ 1, false, 5, 0 }} },
3499  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
3500  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
3501  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
3502  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
3503  { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
3504  { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
3505  { 2, false, 5, 0 }} },
3506  { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
3507  { 2, false, 6, 0 }} },
3508  { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
3509  { 3, false, 5, 0 }} },
3510  { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
3511  { 3, false, 6, 0 }} },
3512  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
3513  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
3514  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
3515  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
3516  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
3517  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
3518  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
3519  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
3520  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
3521  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
3522  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
3523  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
3524  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
3525  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
3526  { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
3527  { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
3528  {{ 2, false, 4, 0 },
3529  { 3, false, 5, 0 }} },
3530  { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
3531  {{ 2, false, 4, 0 },
3532  { 3, false, 5, 0 }} },
3533  { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
3534  {{ 2, false, 4, 0 },
3535  { 3, false, 5, 0 }} },
3536  { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
3537  {{ 2, false, 4, 0 },
3538  { 3, false, 5, 0 }} },
3539  { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
3540  { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
3541  { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
3542  { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
3543  { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
3544  { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
3545  { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
3546  { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
3547  { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
3548  { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
3549  { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
3550  { 2, false, 5, 0 }} },
3551  { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
3552  { 2, false, 6, 0 }} },
3553  { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
3554  { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
3555  { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
3556  { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
3557  { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
3558  { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
3559  { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
3560  { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
3561  { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
3562  {{ 1, false, 4, 0 }} },
3563  { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
3564  { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
3565  {{ 1, false, 4, 0 }} },
3566  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
3567  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
3568  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
3569  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
3570  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
3571  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
3572  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
3573  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
3574  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
3575  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
3576  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
3577  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
3578  { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
3579  { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
3580  { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
3581  { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
3582  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
3583  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
3584  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
3585  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
3586  {{ 3, false, 1, 0 }} },
3587  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
3588  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
3589  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
3590  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
3591  {{ 3, false, 1, 0 }} },
3592  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
3593  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
3594  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
3595  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
3596  {{ 3, false, 1, 0 }} },
3597  };
3598 
3599  // Use a dynamically initialized static to sort the table exactly once on
3600  // first run.
3601  static const bool SortOnce =
3602  (llvm::sort(Infos,
3603  [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
3604  return LHS.BuiltinID < RHS.BuiltinID;
3605  }),
3606  true);
3607  (void)SortOnce;
3608 
3609  const BuiltinInfo *F = llvm::partition_point(
3610  Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
3611  if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
3612  return false;
3613 
3614  bool Error = false;
3615 
3616  for (const ArgInfo &A : F->Infos) {
3617  // Ignore empty ArgInfo elements.
3618  if (A.BitWidth == 0)
3619  continue;
3620 
3621  int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
3622  int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
3623  if (!A.Align) {
3624  Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
3625  } else {
3626  unsigned M = 1 << A.Align;
3627  Min *= M;
3628  Max *= M;
3629  Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
3630  Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
3631  }
3632  }
3633  return Error;
3634 }
3635 
3636 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
3637  CallExpr *TheCall) {
3638  return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
3639 }
3640 
3641 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
3642  unsigned BuiltinID, CallExpr *TheCall) {
3643  return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
3644  CheckMipsBuiltinArgument(BuiltinID, TheCall);
3645 }
3646 
3647 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
3648  CallExpr *TheCall) {
3649 
3650  if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
3651  BuiltinID <= Mips::BI__builtin_mips_lwx) {
3652  if (!TI.hasFeature("dsp"))
3653  return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
3654  }
3655 
3656  if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
3657  BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
3658  if (!TI.hasFeature("dspr2"))
3659  return Diag(TheCall->getBeginLoc(),
3660  diag::err_mips_builtin_requires_dspr2);
3661  }
3662 
3663  if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
3664  BuiltinID <= Mips::BI__builtin_msa_xori_b) {
3665  if (!TI.hasFeature("msa"))
3666  return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
3667  }
3668 
3669  return false;
3670 }
3671 
3672 // CheckMipsBuiltinArgument - Checks the constant value passed to the
3673 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The
3674 // ordering for DSP is unspecified. MSA is ordered by the data format used
3675 // by the underlying instruction i.e., df/m, df/n and then by size.
3676 //
3677 // FIXME: The size tests here should instead be tablegen'd along with the
3678 // definitions from include/clang/Basic/BuiltinsMips.def.
3679 // FIXME: GCC is strict on signedness for some of these intrinsics, we should
3680 // be too.
3681 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
3682  unsigned i = 0, l = 0, u = 0, m = 0;
3683  switch (BuiltinID) {
3684  default: return false;
3685  case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
3686  case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
3687  case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
3688  case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
3689  case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
3690  case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
3691  case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
3692  // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
3693  // df/m field.
3694  // These intrinsics take an unsigned 3 bit immediate.
3695  case Mips::BI__builtin_msa_bclri_b:
3696  case Mips::BI__builtin_msa_bnegi_b:
3697  case Mips::BI__builtin_msa_bseti_b:
3698  case Mips::BI__builtin_msa_sat_s_b:
3699  case Mips::BI__builtin_msa_sat_u_b:
3700  case Mips::BI__builtin_msa_slli_b:
3701  case Mips::BI__builtin_msa_srai_b:
3702  case Mips::BI__builtin_msa_srari_b:
3703  case Mips::BI__builtin_msa_srli_b:
3704  case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
3705  case Mips::BI__builtin_msa_binsli_b:
3706  case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
3707  // These intrinsics take an unsigned 4 bit immediate.
3708  case Mips::BI__builtin_msa_bclri_h:
3709  case Mips::BI__builtin_msa_bnegi_h:
3710  case Mips::BI__builtin_msa_bseti_h:
3711  case Mips::BI__builtin_msa_sat_s_h:
3712  case Mips::BI__builtin_msa_sat_u_h:
3713  case Mips::BI__builtin_msa_slli_h:
3714  case Mips::BI__builtin_msa_srai_h:
3715  case Mips::BI__builtin_msa_srari_h:
3716  case Mips::BI__builtin_msa_srli_h:
3717  case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
3718  case Mips::BI__builtin_msa_binsli_h:
3719  case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
3720  // These intrinsics take an unsigned 5 bit immediate.
3721  // The first block of intrinsics actually have an unsigned 5 bit field,
3722  // not a df/n field.
3723  case Mips::BI__builtin_msa_cfcmsa:
3724  case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
3725  case Mips::BI__builtin_msa_clei_u_b:
3726  case Mips::BI__builtin_msa_clei_u_h:
3727  case Mips::BI__builtin_msa_clei_u_w:
3728  case Mips::BI__builtin_msa_clei_u_d:
3729  case Mips::BI__builtin_msa_clti_u_b:
3730  case Mips::BI__builtin_msa_clti_u_h:
3731  case Mips::BI__builtin_msa_clti_u_w:
3732  case Mips::BI__builtin_msa_clti_u_d:
3733  case Mips::BI__builtin_msa_maxi_u_b:
3734  case Mips::BI__builtin_msa_maxi_u_h:
3735  case Mips::BI__builtin_msa_maxi_u_w:
3736  case Mips::BI__builtin_msa_maxi_u_d:
3737  case Mips::BI__builtin_msa_mini_u_b:
3738  case Mips::BI__builtin_msa_mini_u_h:
3739  case Mips::BI__builtin_msa_mini_u_w:
3740  case Mips::BI__builtin_msa_mini_u_d:
3741  case Mips::BI__builtin_msa_addvi_b:
3742  case Mips::BI__builtin_msa_addvi_h:
3743  case Mips::BI__builtin_msa_addvi_w:
3744  case Mips::BI__builtin_msa_addvi_d:
3745  case Mips::BI__builtin_msa_bclri_w:
3746  case Mips::BI__builtin_msa_bnegi_w:
3747  case Mips::BI__builtin_msa_bseti_w:
3748  case Mips::BI__builtin_msa_sat_s_w:
3749  case Mips::BI__builtin_msa_sat_u_w:
3750  case Mips::BI__builtin_msa_slli_w:
3751  case Mips::BI__builtin_msa_srai_w:
3752  case Mips::BI__builtin_msa_srari_w:
3753  case Mips::BI__builtin_msa_srli_w:
3754  case Mips::BI__builtin_msa_srlri_w:
3755  case Mips::BI__builtin_msa_subvi_b:
3756  case Mips::BI__builtin_msa_subvi_h:
3757  case Mips::BI__builtin_msa_subvi_w:
3758  case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
3759  case Mips::BI__builtin_msa_binsli_w:
3760  case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
3761  // These intrinsics take an unsigned 6 bit immediate.
3762  case Mips::BI__builtin_msa_bclri_d:
3763  case Mips::BI__builtin_msa_bnegi_d:
3764  case Mips::BI__builtin_msa_bseti_d:
3765  case Mips::BI__builtin_msa_sat_s_d:
3766  case Mips::BI__builtin_msa_sat_u_d:
3767  case Mips::BI__builtin_msa_slli_d:
3768  case Mips::BI__builtin_msa_srai_d:
3769  case Mips::BI__builtin_msa_srari_d:
3770  case Mips::BI__builtin_msa_srli_d:
3771  case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
3772  case Mips::BI__builtin_msa_binsli_d:
3773  case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
3774  // These intrinsics take a signed 5 bit immediate.
3775  case Mips::BI__builtin_msa_ceqi_b:
3776  case Mips::BI__builtin_msa_ceqi_h:
3777  case Mips::BI__builtin_msa_ceqi_w:
3778  case Mips::BI__builtin_msa_ceqi_d:
3779  case Mips::BI__builtin_msa_clti_s_b:
3780  case Mips::BI__builtin_msa_clti_s_h:
3781  case Mips::BI__builtin_msa_clti_s_w:
3782  case Mips::BI__builtin_msa_clti_s_d:
3783  case Mips::BI__builtin_msa_clei_s_b:
3784  case Mips::BI__builtin_msa_clei_s_h:
3785  case Mips::BI__builtin_msa_clei_s_w:
3786  case Mips::BI__builtin_msa_clei_s_d:
3787  case Mips::BI__builtin_msa_maxi_s_b:
3788  case Mips::BI__builtin_msa_maxi_s_h:
3789  case Mips::BI__builtin_msa_maxi_s_w:
3790  case Mips::BI__builtin_msa_maxi_s_d:
3791  case Mips::BI__builtin_msa_mini_s_b:
3792  case Mips::BI__builtin_msa_mini_s_h:
3793  case Mips::BI__builtin_msa_mini_s_w:
3794  case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
3795  // These intrinsics take an unsigned 8 bit immediate.
3796  case Mips::BI__builtin_msa_andi_b:
3797  case Mips::BI__builtin_msa_nori_b:
3798  case Mips::BI__builtin_msa_ori_b:
3799  case Mips::BI__builtin_msa_shf_b:
3800  case Mips::BI__builtin_msa_shf_h:
3801  case Mips::BI__builtin_msa_shf_w:
3802  case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
3803  case Mips::BI__builtin_msa_bseli_b:
3804  case Mips::BI__builtin_msa_bmnzi_b:
3805  case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
3806  // df/n format
3807  // These intrinsics take an unsigned 4 bit immediate.
3808  case Mips::BI__builtin_msa_copy_s_b:
3809  case Mips::BI__builtin_msa_copy_u_b:
3810  case Mips::BI__builtin_msa_insve_b:
3811  case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
3812  case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
3813  // These intrinsics take an unsigned 3 bit immediate.
3814  case Mips::BI__builtin_msa_copy_s_h:
3815  case Mips::BI__builtin_msa_copy_u_h:
3816  case Mips::BI__builtin_msa_insve_h:
3817  case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
3818  case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
3819  // These intrinsics take an unsigned 2 bit immediate.
3820  case Mips::BI__builtin_msa_copy_s_w:
3821  case Mips::BI__builtin_msa_copy_u_w:
3822  case Mips::BI__builtin_msa_insve_w:
3823  case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
3824  case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
3825  // These intrinsics take an unsigned 1 bit immediate.
3826  case Mips::BI__builtin_msa_copy_s_d:
3827  case Mips::BI__builtin_msa_copy_u_d:
3828  case Mips::BI__builtin_msa_insve_d:
3829  case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
3830  case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
3831  // Memory offsets and immediate loads.
3832  // These intrinsics take a signed 10 bit immediate.
3833  case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
3834  case Mips::BI__builtin_msa_ldi_h:
3835  case Mips::BI__builtin_msa_ldi_w:
3836  case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
3837  case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
3838  case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
3839  case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
3840  case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
3841  case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
3842  case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
3843  case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
3844  case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
3845  case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
3846  case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
3847  case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
3848  case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
3849  }
3850 
3851  if (!m)
3852  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3853 
3854  return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
3855  SemaBuiltinConstantArgMultiple(TheCall, i, m);
3856 }
3857 
3858 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
3859 /// advancing the pointer over the consumed characters. The decoded type is
3860 /// returned. If the decoded type represents a constant integer with a
3861 /// constraint on its value then Mask is set to that value. The type descriptors
3862 /// used in Str are specific to PPC MMA builtins and are documented in the file
3863 /// defining the PPC builtins.
3864 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
3865  unsigned &Mask) {
3866  bool RequireICE = false;
3868  switch (*Str++) {
3869  case 'V':
3870  return Context.getVectorType(Context.UnsignedCharTy, 16,
3871  VectorType::VectorKind::AltiVecVector);
3872  case 'i': {
3873  char *End;
3874  unsigned size = strtoul(Str, &End, 10);
3875  assert(End != Str && "Missing constant parameter constraint");
3876  Str = End;
3877  Mask = size;
3878  return Context.IntTy;
3879  }
3880  case 'W': {
3881  char *End;
3882  unsigned size = strtoul(Str, &End, 10);
3883  assert(End != Str && "Missing PowerPC MMA type size");
3884  Str = End;
3885  QualType Type;
3886  switch (size) {
3887  #define PPC_VECTOR_TYPE(typeName, Id, size) \
3888  case size: Type = Context.Id##Ty; break;
3889  #include "clang/Basic/PPCTypes.def"
3890  default: llvm_unreachable("Invalid PowerPC MMA vector type");
3891  }
3892  bool CheckVectorArgs = false;
3893  while (!CheckVectorArgs) {
3894  switch (*Str++) {
3895  case '*':
3896  Type = Context.getPointerType(Type);
3897  break;
3898  case 'C':
3899  Type = Type.withConst();
3900  break;
3901  default:
3902  CheckVectorArgs = true;
3903  --Str;
3904  break;
3905  }
3906  }
3907  return Type;
3908  }
3909  default:
3910  return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
3911  }
3912 }
3913 
3914 static bool isPPC_64Builtin(unsigned BuiltinID) {
3915  // These builtins only work on PPC 64bit targets.
3916  switch (BuiltinID) {
3917  case PPC::BI__builtin_divde:
3918  case PPC::BI__builtin_divdeu:
3919  case PPC::BI__builtin_bpermd:
3920  case PPC::BI__builtin_pdepd:
3921  case PPC::BI__builtin_pextd:
3922  case PPC::BI__builtin_ppc_ldarx:
3923  case PPC::BI__builtin_ppc_stdcx:
3924  case PPC::BI__builtin_ppc_tdw:
3925  case PPC::BI__builtin_ppc_trapd:
3926  case PPC::BI__builtin_ppc_cmpeqb:
3927  case PPC::BI__builtin_ppc_setb:
3928  case PPC::BI__builtin_ppc_mulhd:
3929  case PPC::BI__builtin_ppc_mulhdu:
3930  case PPC::BI__builtin_ppc_maddhd:
3931  case PPC::BI__builtin_ppc_maddhdu:
3932  case PPC::BI__builtin_ppc_maddld:
3933  case PPC::BI__builtin_ppc_load8r:
3934  case PPC::BI__builtin_ppc_store8r:
3935  case PPC::BI__builtin_ppc_insert_exp:
3936  case PPC::BI__builtin_ppc_extract_sig:
3937  case PPC::BI__builtin_ppc_addex:
3938  case PPC::BI__builtin_darn:
3939  case PPC::BI__builtin_darn_raw:
3940  case PPC::BI__builtin_ppc_compare_and_swaplp:
3941  case PPC::BI__builtin_ppc_fetch_and_addlp:
3942  case PPC::BI__builtin_ppc_fetch_and_andlp:
3943  case PPC::BI__builtin_ppc_fetch_and_orlp:
3944  case PPC::BI__builtin_ppc_fetch_and_swaplp:
3945  return true;
3946  }
3947  return false;
3948 }
3949 
3950 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall,
3951  StringRef FeatureToCheck, unsigned DiagID,
3952  StringRef DiagArg = "") {
3953  if (S.Context.getTargetInfo().hasFeature(FeatureToCheck))
3954  return false;
3955 
3956  if (DiagArg.empty())
3957  S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange();
3958  else
3959  S.Diag(TheCall->getBeginLoc(), DiagID)
3960  << DiagArg << TheCall->getSourceRange();
3961 
3962  return true;
3963 }
3964 
3965 /// Returns true if the argument consists of one contiguous run of 1s with any
3966 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
3967 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
3968 /// since all 1s are not contiguous.
3969 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
3970  llvm::APSInt Result;
3971  // We can't check the value of a dependent argument.
3972  Expr *Arg = TheCall->getArg(ArgNum);
3973  if (Arg->isTypeDependent() || Arg->isValueDependent())
3974  return false;
3975 
3976  // Check constant-ness first.
3977  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3978  return true;
3979 
3980  // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
3981  if (Result.isShiftedMask() || (~Result).isShiftedMask())
3982  return false;
3983 
3984  return Diag(TheCall->getBeginLoc(),
3985  diag::err_argument_not_contiguous_bit_field)
3986  << ArgNum << Arg->getSourceRange();
3987 }
3988 
3989 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3990  CallExpr *TheCall) {
3991  unsigned i = 0, l = 0, u = 0;
3992  bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
3993  llvm::APSInt Result;
3994 
3995  if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
3996  return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
3997  << TheCall->getSourceRange();
3998 
3999  switch (BuiltinID) {
4000  default: return false;
4001  case PPC::BI__builtin_altivec_crypto_vshasigmaw:
4002  case PPC::BI__builtin_altivec_crypto_vshasigmad:
4003  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
4004  SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
4005  case PPC::BI__builtin_altivec_dss:
4006  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
4007  case PPC::BI__builtin_tbegin:
4008  case PPC::BI__builtin_tend:
4009  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) ||
4010  SemaFeatureCheck(*this, TheCall, "htm",
4011  diag::err_ppc_builtin_requires_htm);
4012  case PPC::BI__builtin_tsr:
4013  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
4014  SemaFeatureCheck(*this, TheCall, "htm",
4015  diag::err_ppc_builtin_requires_htm);
4016  case PPC::BI__builtin_tabortwc:
4017  case PPC::BI__builtin_tabortdc:
4018  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
4019  SemaFeatureCheck(*this, TheCall, "htm",
4020  diag::err_ppc_builtin_requires_htm);
4021  case PPC::BI__builtin_tabortwci:
4022  case PPC::BI__builtin_tabortdci:
4023  return SemaFeatureCheck(*this, TheCall, "htm",
4024  diag::err_ppc_builtin_requires_htm) ||
4025  (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
4026  SemaBuiltinConstantArgRange(TheCall, 2, 0, 31));
4027  case PPC::BI__builtin_tabort:
4028  case PPC::BI__builtin_tcheck:
4029  case PPC::BI__builtin_treclaim:
4030  case PPC::BI__builtin_trechkpt:
4031  case PPC::BI__builtin_tendall:
4032  case PPC::BI__builtin_tresume:
4033  case PPC::BI__builtin_tsuspend:
4034  case PPC::BI__builtin_get_texasr:
4035  case PPC::BI__builtin_get_texasru:
4036  case PPC::BI__builtin_get_tfhar:
4037  case PPC::BI__builtin_get_tfiar:
4038  case PPC::BI__builtin_set_texasr:
4039  case PPC::BI__builtin_set_texasru:
4040  case PPC::BI__builtin_set_tfhar:
4041  case PPC::BI__builtin_set_tfiar:
4042  case PPC::BI__builtin_ttest:
4043  return SemaFeatureCheck(*this, TheCall, "htm",
4044  diag::err_ppc_builtin_requires_htm);
4045  // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
4046  // __builtin_(un)pack_longdouble are available only if long double uses IBM
4047  // extended double representation.
4048  case PPC::BI__builtin_unpack_longdouble:
4049  if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1))
4050  return true;
4051  LLVM_FALLTHROUGH;
4052  case PPC::BI__builtin_pack_longdouble:
4053  if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
4054  return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
4055  << "ibmlongdouble";
4056  return false;
4057  case PPC::BI__builtin_altivec_dst:
4058  case PPC::BI__builtin_altivec_dstt:
4059  case PPC::BI__builtin_altivec_dstst:
4060  case PPC::BI__builtin_altivec_dststt:
4061  return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
4062  case PPC::BI__builtin_vsx_xxpermdi:
4063  case PPC::BI__builtin_vsx_xxsldwi:
4064  return SemaBuiltinVSX(TheCall);
4065  case PPC::BI__builtin_divwe:
4066  case PPC::BI__builtin_divweu:
4067  case PPC::BI__builtin_divde:
4068  case PPC::BI__builtin_divdeu:
4069  return SemaFeatureCheck(*this, TheCall, "extdiv",
4070  diag::err_ppc_builtin_only_on_arch, "7");
4071  case PPC::BI__builtin_bpermd:
4072  return SemaFeatureCheck(*this, TheCall, "bpermd",
4073  diag::err_ppc_builtin_only_on_arch, "7");
4074  case PPC::BI__builtin_unpack_vector_int128:
4075  return SemaFeatureCheck(*this, TheCall, "vsx",
4076  diag::err_ppc_builtin_only_on_arch, "7") ||
4077  SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
4078  case PPC::BI__builtin_pack_vector_int128:
4079  return SemaFeatureCheck(*this, TheCall, "vsx",
4080  diag::err_ppc_builtin_only_on_arch, "7");
4081  case PPC::BI__builtin_pdepd:
4082  case PPC::BI__builtin_pextd:
4083  return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions",
4084  diag::err_ppc_builtin_only_on_arch, "10");
4085  case PPC::BI__builtin_altivec_vgnb:
4086  return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
4087  case PPC::BI__builtin_altivec_vec_replace_elt:
4088  case PPC::BI__builtin_altivec_vec_replace_unaligned: {
4089  QualType VecTy = TheCall->getArg(0)->getType();
4090  QualType EltTy = TheCall->getArg(1)->getType();
4091  unsigned Width = Context.getIntWidth(EltTy);
4092  return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) ||
4093  !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy);
4094  }
4095  case PPC::BI__builtin_vsx_xxeval:
4096  return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
4097  case PPC::BI__builtin_altivec_vsldbi:
4098  return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
4099  case PPC::BI__builtin_altivec_vsrdbi:
4100  return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
4101  case PPC::BI__builtin_vsx_xxpermx:
4102  return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
4103  case PPC::BI__builtin_ppc_tw:
4104  case PPC::BI__builtin_ppc_tdw:
4105  return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
4106  case PPC::BI__builtin_ppc_cmpeqb:
4107  case PPC::BI__builtin_ppc_setb:
4108  case PPC::BI__builtin_ppc_maddhd:
4109  case PPC::BI__builtin_ppc_maddhdu:
4110  case PPC::BI__builtin_ppc_maddld:
4111  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
4112  diag::err_ppc_builtin_only_on_arch, "9");
4113  case PPC::BI__builtin_ppc_cmprb:
4114  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
4115  diag::err_ppc_builtin_only_on_arch, "9") ||
4116  SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
4117  // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
4118  // be a constant that represents a contiguous bit field.
4119  case PPC::BI__builtin_ppc_rlwnm:
4120  return SemaValueIsRunOfOnes(TheCall, 2);
4121  case PPC::BI__builtin_ppc_rlwimi:
4122  case PPC::BI__builtin_ppc_rldimi:
4123  return SemaBuiltinConstantArg(TheCall, 2, Result) ||
4124  SemaValueIsRunOfOnes(TheCall, 3);
4125  case PPC::BI__builtin_ppc_extract_exp:
4126  case PPC::BI__builtin_ppc_extract_sig:
4127  case PPC::BI__builtin_ppc_insert_exp:
4128  return SemaFeatureCheck(*this, TheCall, "power9-vector",
4129  diag::err_ppc_builtin_only_on_arch, "9");
4130  case PPC::BI__builtin_ppc_addex: {
4131  if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
4132  diag::err_ppc_builtin_only_on_arch, "9") ||
4133  SemaBuiltinConstantArgRange(TheCall, 2, 0, 3))
4134  return true;
4135  // Output warning for reserved values 1 to 3.
4136  int ArgValue =
4137  TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
4138  if (ArgValue != 0)
4139  Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
4140  << ArgValue;
4141  return false;
4142  }
4143  case PPC::BI__builtin_ppc_mtfsb0:
4144  case PPC::BI__builtin_ppc_mtfsb1:
4145  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
4146  case PPC::BI__builtin_ppc_mtfsf:
4147  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255);
4148  case PPC::BI__builtin_ppc_mtfsfi:
4149  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
4150  SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
4151  case PPC::BI__builtin_ppc_alignx:
4152  return SemaBuiltinConstantArgPower2(TheCall, 0);
4153  case PPC::BI__builtin_ppc_rdlam:
4154  return SemaValueIsRunOfOnes(TheCall, 2);
4155  case PPC::BI__builtin_ppc_icbt:
4156  case PPC::BI__builtin_ppc_sthcx:
4157  case PPC::BI__builtin_ppc_stbcx:
4158  case PPC::BI__builtin_ppc_lharx:
4159  case PPC::BI__builtin_ppc_lbarx:
4160  return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
4161  diag::err_ppc_builtin_only_on_arch, "8");
4162  case PPC::BI__builtin_vsx_ldrmb:
4163  case PPC::BI__builtin_vsx_strmb:
4164  return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
4165  diag::err_ppc_builtin_only_on_arch, "8") ||
4166  SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
4167  case PPC::BI__builtin_altivec_vcntmbb:
4168  case PPC::BI__builtin_altivec_vcntmbh:
4169  case PPC::BI__builtin_altivec_vcntmbw:
4170  case PPC::BI__builtin_altivec_vcntmbd:
4171  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
4172  case PPC::BI__builtin_darn:
4173  case PPC::BI__builtin_darn_raw:
4174  case PPC::BI__builtin_darn_32:
4175  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
4176  diag::err_ppc_builtin_only_on_arch, "9");
4177  case PPC::BI__builtin_vsx_xxgenpcvbm:
4178  case PPC::BI__builtin_vsx_xxgenpcvhm:
4179  case PPC::BI__builtin_vsx_xxgenpcvwm:
4180  case PPC::BI__builtin_vsx_xxgenpcvdm:
4181  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
4182  case PPC::BI__builtin_ppc_compare_exp_uo:
4183  case PPC::BI__builtin_ppc_compare_exp_lt:
4184  case PPC::BI__builtin_ppc_compare_exp_gt:
4185  case PPC::BI__builtin_ppc_compare_exp_eq:
4186  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
4187  diag::err_ppc_builtin_only_on_arch, "9") ||
4188  SemaFeatureCheck(*this, TheCall, "vsx",
4189  diag::err_ppc_builtin_requires_vsx);
4190  case PPC::BI__builtin_ppc_test_data_class: {
4191  // Check if the first argument of the __builtin_ppc_test_data_class call is
4192  // valid. The argument must be either a 'float' or a 'double'.
4193  QualType ArgType = TheCall->getArg(0)->getType();
4194  if (ArgType != QualType(Context.FloatTy) &&
4195  ArgType != QualType(Context.DoubleTy))
4196  return Diag(TheCall->getBeginLoc(),
4197  diag::err_ppc_invalid_test_data_class_type);
4198  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
4199  diag::err_ppc_builtin_only_on_arch, "9") ||
4200  SemaFeatureCheck(*this, TheCall, "vsx",
4201  diag::err_ppc_builtin_requires_vsx) ||
4202  SemaBuiltinConstantArgRange(TheCall, 1, 0, 127);
4203  }
4204  case PPC::BI__builtin_ppc_maxfe:
4205  case PPC::BI__builtin_ppc_minfe:
4206  case PPC::BI__builtin_ppc_maxfl:
4207  case PPC::BI__builtin_ppc_minfl:
4208  case PPC::BI__builtin_ppc_maxfs:
4209  case PPC::BI__builtin_ppc_minfs: {
4210  if (Context.getTargetInfo().getTriple().isOSAIX() &&
4211  (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
4212  BuiltinID == PPC::BI__builtin_ppc_minfe))
4213  return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
4214  << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
4215  << false << Context.getTargetInfo().getTriple().str();
4216  // Argument type should be exact.
4217  QualType ArgType = QualType(Context.LongDoubleTy);
4218  if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
4219  BuiltinID == PPC::BI__builtin_ppc_minfl)
4220  ArgType = QualType(Context.DoubleTy);
4221  else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
4222  BuiltinID == PPC::BI__builtin_ppc_minfs)
4223  ArgType = QualType(Context.FloatTy);
4224  for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
4225  if (TheCall->getArg(I)->getType() != ArgType)
4226  return Diag(TheCall->getBeginLoc(),
4227  diag::err_typecheck_convert_incompatible)
4228  << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
4229  return false;
4230  }
4231  case PPC::BI__builtin_ppc_load8r:
4232  case PPC::BI__builtin_ppc_store8r:
4233  return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions",
4234  diag::err_ppc_builtin_only_on_arch, "7");
4235 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
4236  case PPC::BI__builtin_##Name: \
4237  return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types);
4238 #include "clang/Basic/BuiltinsPPC.def"
4239  }
4240  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
4241 }
4242 
4243 // Check if the given type is a non-pointer PPC MMA type. This function is used
4244 // in Sema to prevent invalid uses of restricted PPC MMA types.
4245 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
4246  if (Type->isPointerType() || Type->isArrayType())
4247  return false;
4248 
4249  QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
4250 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
4251  if (false
4252 #include "clang/Basic/PPCTypes.def"
4253  ) {
4254  Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
4255  return true;
4256  }
4257  return false;
4258 }
4259 
4260 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
4261  CallExpr *TheCall) {
4262  // position of memory order and scope arguments in the builtin
4263  unsigned OrderIndex, ScopeIndex;
4264  switch (BuiltinID) {
4265  case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
4266  case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
4267  case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
4268  case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
4269  OrderIndex = 2;
4270  ScopeIndex = 3;
4271  break;
4272  case AMDGPU::BI__builtin_amdgcn_fence:
4273  OrderIndex = 0;
4274  ScopeIndex = 1;
4275  break;
4276  default:
4277  return false;
4278  }
4279 
4280  ExprResult Arg = TheCall->getArg(OrderIndex);
4281  auto ArgExpr = Arg.get();
4282  Expr::EvalResult ArgResult;
4283 
4284  if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
4285  return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
4286  << ArgExpr->getType();
4287  auto Ord = ArgResult.Val.getInt().getZExtValue();
4288 
4289  // Check validity of memory ordering as per C11 / C++11's memody model.
4290  // Only fence needs check. Atomic dec/inc allow all memory orders.
4291  if (!llvm::isValidAtomicOrderingCABI(Ord))
4292  return Diag(ArgExpr->getBeginLoc(),
4293  diag::warn_atomic_op_has_invalid_memory_order)
4294  << ArgExpr->getSourceRange();
4295  switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
4296  case llvm::AtomicOrderingCABI::relaxed:
4297  case llvm::AtomicOrderingCABI::consume:
4298  if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
4299  return Diag(ArgExpr->getBeginLoc(),
4300  diag::warn_atomic_op_has_invalid_memory_order)
4301  << ArgExpr->getSourceRange();
4302  break;
4303  case llvm::AtomicOrderingCABI::acquire:
4304  case llvm::AtomicOrderingCABI::release:
4305  case llvm::AtomicOrderingCABI::acq_rel:
4306  case llvm::AtomicOrderingCABI::seq_cst:
4307  break;
4308  }
4309 
4310  Arg = TheCall->getArg(ScopeIndex);
4311  ArgExpr = Arg.get();
4312  Expr::EvalResult ArgResult1;
4313  // Check that sync scope is a constant literal
4314  if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
4315  return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
4316  << ArgExpr->getType();
4317 
4318  return false;
4319 }
4320 
4321 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
4322  llvm::APSInt Result;
4323 
4324  // We can't check the value of a dependent argument.
4325  Expr *Arg = TheCall->getArg(ArgNum);
4326  if (Arg->isTypeDependent() || Arg->isValueDependent())
4327  return false;
4328 
4329  // Check constant-ness first.
4330  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4331  return true;
4332 
4333  int64_t Val = Result.getSExtValue();
4334  if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
4335  return false;
4336 
4337  return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
4338  << Arg->getSourceRange();
4339 }
4340 
4341 static bool isRISCV32Builtin(unsigned BuiltinID) {
4342  // These builtins only work on riscv32 targets.
4343  switch (BuiltinID) {
4344  case RISCV::BI__builtin_riscv_zip_32:
4345  case RISCV::BI__builtin_riscv_unzip_32:
4346  case RISCV::BI__builtin_riscv_aes32dsi_32:
4347  case RISCV::BI__builtin_riscv_aes32dsmi_32:
4348  case RISCV::BI__builtin_riscv_aes32esi_32:
4349  case RISCV::BI__builtin_riscv_aes32esmi_32:
4350  case RISCV::BI__builtin_riscv_sha512sig0h_32:
4351  case RISCV::BI__builtin_riscv_sha512sig0l_32:
4352  case RISCV::BI__builtin_riscv_sha512sig1h_32:
4353  case RISCV::BI__builtin_riscv_sha512sig1l_32:
4354  case RISCV::BI__builtin_riscv_sha512sum0r_32:
4355  case RISCV::BI__builtin_riscv_sha512sum1r_32:
4356  return true;
4357  }
4358 
4359  return false;
4360 }
4361 
4362 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
4363  unsigned BuiltinID,
4364  CallExpr *TheCall) {
4365  // CodeGenFunction can also detect this, but this gives a better error
4366  // message.
4367  bool FeatureMissing = false;
4368  SmallVector<StringRef> ReqFeatures;
4369  StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
4370  Features.split(ReqFeatures, ',');
4371 
4372  // Check for 32-bit only builtins on a 64-bit target.
4373  const llvm::Triple &TT = TI.getTriple();
4374  if (TT.getArch() != llvm::Triple::riscv32 && isRISCV32Builtin(BuiltinID))
4375  return Diag(TheCall->getCallee()->getBeginLoc(),
4376  diag::err_32_bit_builtin_64_bit_tgt);
4377 
4378  // Check if each required feature is included
4379  for (StringRef F : ReqFeatures) {
4380  SmallVector<StringRef> ReqOpFeatures;
4381  F.split(ReqOpFeatures, '|');
4382  bool HasFeature = false;
4383  for (StringRef OF : ReqOpFeatures) {
4384  if (TI.hasFeature(OF)) {
4385  HasFeature = true;
4386  continue;
4387  }
4388  }
4389 
4390  if (!HasFeature) {
4391  std::string FeatureStrs;
4392  for (StringRef OF : ReqOpFeatures) {
4393  // If the feature is 64bit, alter the string so it will print better in
4394  // the diagnostic.
4395  if (OF == "64bit")
4396  OF = "RV64";
4397 
4398  // Convert features like "zbr" and "experimental-zbr" to "Zbr".
4399  OF.consume_front("experimental-");
4400  std::string FeatureStr = OF.str();
4401  FeatureStr[0] = std::toupper(FeatureStr[0]);
4402  // Combine strings.
4403  FeatureStrs += FeatureStrs == "" ? "" : ", ";
4404  FeatureStrs += "'";
4405  FeatureStrs += FeatureStr;
4406  FeatureStrs += "'";
4407  }
4408  // Error message
4409  FeatureMissing = true;
4410  Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
4411  << TheCall->getSourceRange() << StringRef(FeatureStrs);
4412  }
4413  }
4414 
4415  if (FeatureMissing)
4416  return true;
4417 
4418  switch (BuiltinID) {
4419  case RISCVVector::BI__builtin_rvv_vsetvli:
4420  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
4421  CheckRISCVLMUL(TheCall, 2);
4422  case RISCVVector::BI__builtin_rvv_vsetvlimax:
4423  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
4424  CheckRISCVLMUL(TheCall, 1);
4425  case RISCVVector::BI__builtin_rvv_vget_v: {
4427  Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
4428  TheCall->getType().getCanonicalType().getTypePtr()));
4430  Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
4431  TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
4432  unsigned MaxIndex =
4433  (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
4434  (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
4435  return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
4436  }
4437  case RISCVVector::BI__builtin_rvv_vset_v: {
4439  Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
4440  TheCall->getType().getCanonicalType().getTypePtr()));
4442  Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
4443  TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
4444  unsigned MaxIndex =
4445  (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
4446  (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
4447  return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
4448  }
4449  // Check if byteselect is in [0, 3]
4450  case RISCV::BI__builtin_riscv_aes32dsi_32:
4451  case RISCV::BI__builtin_riscv_aes32dsmi_32:
4452  case RISCV::BI__builtin_riscv_aes32esi_32:
4453  case RISCV::BI__builtin_riscv_aes32esmi_32:
4454  case RISCV::BI__builtin_riscv_sm4ks:
4455  case RISCV::BI__builtin_riscv_sm4ed:
4456  return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
4457  // Check if rnum is in [0, 10]
4458  case RISCV::BI__builtin_riscv_aes64ks1i_64:
4459  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10);
4460  }
4461 
4462  return false;
4463 }
4464 
4465 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
4466  CallExpr *TheCall) {
4467  if (BuiltinID == SystemZ::BI__builtin_tabort) {
4468  Expr *Arg = TheCall->getArg(0);
4469  if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context))
4470  if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
4471  return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
4472  << Arg->getSourceRange();
4473  }
4474 
4475  // For intrinsics which take an immediate value as part of the instruction,
4476  // range check them here.
4477  unsigned i = 0, l = 0, u = 0;
4478  switch (BuiltinID) {
4479  default: return false;
4480  case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
4481  case SystemZ::BI__builtin_s390_verimb:
4482  case SystemZ::BI__builtin_s390_verimh:
4483  case SystemZ::BI__builtin_s390_verimf:
4484  case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
4485  case SystemZ::BI__builtin_s390_vfaeb:
4486  case SystemZ::BI__builtin_s390_vfaeh:
4487  case SystemZ::BI__builtin_s390_vfaef:
4488  case SystemZ::BI__builtin_s390_vfaebs:
4489  case SystemZ::BI__builtin_s390_vfaehs:
4490  case SystemZ::BI__builtin_s390_vfaefs:
4491  case SystemZ::BI__builtin_s390_vfaezb:
4492  case SystemZ::BI__builtin_s390_vfaezh:
4493  case SystemZ::BI__builtin_s390_vfaezf:
4494  case SystemZ::BI__builtin_s390_vfaezbs:
4495  case SystemZ::BI__builtin_s390_vfaezhs:
4496  case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
4497  case SystemZ::BI__builtin_s390_vfisb:
4498  case SystemZ::BI__builtin_s390_vfidb:
4499  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
4500  SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
4501  case SystemZ::BI__builtin_s390_vftcisb:
4502  case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
4503  case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
4504  case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
4505  case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
4506  case SystemZ::BI__builtin_s390_vstrcb:
4507  case SystemZ::BI__builtin_s390_vstrch:
4508  case SystemZ::BI__builtin_s390_vstrcf:
4509  case SystemZ::BI__builtin_s390_vstrczb:
4510  case SystemZ::BI__builtin_s390_vstrczh:
4511  case SystemZ::BI__builtin_s390_vstrczf:
4512  case SystemZ::BI__builtin_s390_vstrcbs:
4513  case SystemZ::BI__builtin_s390_vstrchs:
4514  case SystemZ::BI__builtin_s390_vstrcfs:
4515  case SystemZ::BI__builtin_s390_vstrczbs:
4516  case SystemZ::BI__builtin_s390_vstrczhs:
4517  case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
4518  case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
4519  case SystemZ::BI__builtin_s390_vfminsb:
4520  case SystemZ::BI__builtin_s390_vfmaxsb:
4521  case SystemZ::BI__builtin_s390_vfmindb:
4522  case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
4523  case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
4524  case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
4525  case SystemZ::BI__builtin_s390_vclfnhs:
4526  case SystemZ::BI__builtin_s390_vclfnls:
4527  case SystemZ::BI__builtin_s390_vcfn:
4528  case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
4529  case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
4530  }
4531  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
4532 }
4533 
4534 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
4535 /// This checks that the target supports __builtin_cpu_supports and
4536 /// that the string argument is constant and valid.
4537 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
4538  CallExpr *TheCall) {
4539  Expr *Arg = TheCall->getArg(0);
4540 
4541  // Check if the argument is a string literal.
4542  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
4543  return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
4544  << Arg->getSourceRange();
4545 
4546  // Check the contents of the string.
4547  StringRef Feature =
4548  cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
4549  if (!TI.validateCpuSupports(Feature))
4550  return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
4551  << Arg->getSourceRange();
4552  return false;
4553 }
4554 
4555 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
4556 /// This checks that the target supports __builtin_cpu_is and
4557 /// that the string argument is constant and valid.
4558 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
4559  Expr *Arg = TheCall->getArg(0);
4560 
4561  // Check if the argument is a string literal.
4562  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
4563  return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
4564  << Arg->getSourceRange();
4565 
4566  // Check the contents of the string.
4567  StringRef Feature =
4568  cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
4569  if (!TI.validateCpuIs(Feature))
4570  return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
4571  << Arg->getSourceRange();
4572  return false;
4573 }
4574 
4575 // Check if the rounding mode is legal.
4576 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
4577  // Indicates if this instruction has rounding control or just SAE.
4578  bool HasRC = false;
4579 
4580  unsigned ArgNum = 0;
4581  switch (BuiltinID) {
4582  default:
4583  return false;
4584  case X86::BI__builtin_ia32_vcvttsd2si32:
4585  case X86::BI__builtin_ia32_vcvttsd2si64:
4586  case X86::BI__builtin_ia32_vcvttsd2usi32:
4587  case X86::BI__builtin_ia32_vcvttsd2usi64:
4588  case X86::BI__builtin_ia32_vcvttss2si32:
4589  case X86::BI__builtin_ia32_vcvttss2si64:
4590  case X86::BI__builtin_ia32_vcvttss2usi32:
4591  case X86::BI__builtin_ia32_vcvttss2usi64:
4592  case X86::BI__builtin_ia32_vcvttsh2si32:
4593  case X86::BI__builtin_ia32_vcvttsh2si64:
4594  case X86::BI__builtin_ia32_vcvttsh2usi32:
4595  case X86::BI__builtin_ia32_vcvttsh2usi64:
4596  ArgNum = 1;
4597  break;
4598  case X86::BI__builtin_ia32_maxpd512:
4599  case X86::BI__builtin_ia32_maxps512:
4600  case X86::BI__builtin_ia32_minpd512:
4601  case X86::BI__builtin_ia32_minps512:
4602  case X86::BI__builtin_ia32_maxph512:
4603  case X86::BI__builtin_ia32_minph512:
4604  ArgNum = 2;
4605  break;
4606  case X86::BI__builtin_ia32_vcvtph2pd512_mask:
4607  case X86::BI__builtin_ia32_vcvtph2psx512_mask:
4608  case X86::BI__builtin_ia32_cvtps2pd512_mask:
4609  case X86::BI__builtin_ia32_cvttpd2dq512_mask:
4610  case X86::BI__builtin_ia32_cvttpd2qq512_mask:
4611  case X86::BI__builtin_ia32_cvttpd2udq512_mask:
4612  case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
4613  case X86::BI__builtin_ia32_cvttps2dq512_mask:
4614  case X86::BI__builtin_ia32_cvttps2qq512_mask:
4615  case X86::BI__builtin_ia32_cvttps2udq512_mask:
4616  case X86::BI__builtin_ia32_cvttps2uqq512_mask:
4617  case X86::BI__builtin_ia32_vcvttph2w512_mask:
4618  case X86::BI__builtin_ia32_vcvttph2uw512_mask:
4619  case X86::BI__builtin_ia32_vcvttph2dq512_mask:
4620  case X86::BI__builtin_ia32_vcvttph2udq512_mask:
4621  case X86::BI__builtin_ia32_vcvttph2qq512_mask:
4622  case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
4623  case X86::BI__builtin_ia32_exp2pd_mask:
4624  case X86::BI__builtin_ia32_exp2ps_mask:
4625  case X86::BI__builtin_ia32_getexppd512_mask:
4626  case X86::BI__builtin_ia32_getexpps512_mask:
4627  case X86::BI__builtin_ia32_getexpph512_mask:
4628  case X86::BI__builtin_ia32_rcp28pd_mask:
4629  case X86::BI__builtin_ia32_rcp28ps_mask:
4630  case X86::BI__builtin_ia32_rsqrt28pd_mask:
4631  case X86::BI__builtin_ia32_rsqrt28ps_mask:
4632  case X86::BI__builtin_ia32_vcomisd:
4633  case X86::BI__builtin_ia32_vcomiss:
4634  case X86::BI__builtin_ia32_vcomish:
4635  case X86::BI__builtin_ia32_vcvtph2ps512_mask:
4636  ArgNum = 3;
4637  break;
4638  case X86::BI__builtin_ia32_cmppd512_mask:
4639  case X86::BI__builtin_ia32_cmpps512_mask:
4640  case X86::BI__builtin_ia32_cmpsd_mask:
4641  case X86::BI__builtin_ia32_cmpss_mask:
4642  case X86::BI__builtin_ia32_cmpsh_mask:
4643  case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
4644  case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
4645  case X86::BI__builtin_ia32_cvtss2sd_round_mask:
4646  case X86::BI__builtin_ia32_getexpsd128_round_mask:
4647  case X86::BI__builtin_ia32_getexpss128_round_mask:
4648  case X86::BI__builtin_ia32_getexpsh128_round_mask:
4649  case X86::BI__builtin_ia32_getmantpd512_mask:
4650  case X86::BI__builtin_ia32_getmantps512_mask:
4651  case X86::BI__builtin_ia32_getmantph512_mask:
4652  case X86::BI__builtin_ia32_maxsd_round_mask:
4653  case X86::BI__builtin_ia32_maxss_round_mask:
4654  case X86::BI__builtin_ia32_maxsh_round_mask:
4655  case X86::BI__builtin_ia32_minsd_round_mask:
4656  case X86::BI__builtin_ia32_minss_round_mask:
4657  case X86::BI__builtin_ia32_minsh_round_mask:
4658  case X86::BI__builtin_ia32_rcp28sd_round_mask:
4659  case X86::BI__builtin_ia32_rcp28ss_round_mask:
4660  case X86::BI__builtin_ia32_reducepd512_mask:
4661  case X86::BI__builtin_ia32_reduceps512_mask:
4662  case X86::BI__builtin_ia32_reduceph512_mask:
4663  case X86::BI__builtin_ia32_rndscalepd_mask:
4664  case X86::BI__builtin_ia32_rndscaleps_mask:
4665  case X86::BI__builtin_ia32_rndscaleph_mask:
4666  case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
4667  case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
4668  ArgNum = 4;
4669  break;
4670  case X86::BI__builtin_ia32_fixupimmpd512_mask:
4671  case X86::BI__builtin_ia32_fixupimmpd512_maskz:
4672  case X86::BI__builtin_ia32_fixupimmps512_mask:
4673  case X86::BI__builtin_ia32_fixupimmps512_maskz:
4674  case X86::BI__builtin_ia32_fixupimmsd_mask:
4675  case X86::BI__builtin_ia32_fixupimmsd_maskz:
4676  case X86::BI__builtin_ia32_fixupimmss_mask:
4677  case X86::BI__builtin_ia32_fixupimmss_maskz:
4678  case X86::BI__builtin_ia32_getmantsd_round_mask:
4679  case X86::BI__builtin_ia32_getmantss_round_mask:
4680  case X86::BI__builtin_ia32_getmantsh_round_mask:
4681  case X86::BI__builtin_ia32_rangepd512_mask:
4682  case X86::BI__builtin_ia32_rangeps512_mask:
4683  case X86::BI__builtin_ia32_rangesd128_round_mask:
4684  case X86::BI__builtin_ia32_rangess128_round_mask:
4685  case X86::BI__builtin_ia32_reducesd_mask:
4686  case X86::BI__builtin_ia32_reducess_mask:
4687  case X86::BI__builtin_ia32_reducesh_mask:
4688  case X86::BI__builtin_ia32_rndscalesd_round_mask:
4689  case X86::BI__builtin_ia32_rndscaless_round_mask:
4690  case X86::BI__builtin_ia32_rndscalesh_round_mask:
4691  ArgNum = 5;
4692  break;
4693  case X86::BI__builtin_ia32_vcvtsd2si64:
4694  case X86::BI__builtin_ia32_vcvtsd2si32:
4695  case X86::BI__builtin_ia32_vcvtsd2usi32:
4696  case X86::BI__builtin_ia32_vcvtsd2usi64:
4697  case X86::BI__builtin_ia32_vcvtss2si32:
4698  case X86::BI__builtin_ia32_vcvtss2si64:
4699  case X86::BI__builtin_ia32_vcvtss2usi32:
4700  case X86::BI__builtin_ia32_vcvtss2usi64:
4701  case X86::BI__builtin_ia32_vcvtsh2si32:
4702  case X86::BI__builtin_ia32_vcvtsh2si64:
4703  case X86::BI__builtin_ia32_vcvtsh2usi32:
4704  case X86::BI__builtin_ia32_vcvtsh2usi64:
4705  case X86::BI__builtin_ia32_sqrtpd512:
4706  case X86::BI__builtin_ia32_sqrtps512:
4707  case X86::BI__builtin_ia32_sqrtph512:
4708  ArgNum = 1;
4709  HasRC = true;
4710  break;
4711  case X86::BI__builtin_ia32_addph512:
4712  case X86::BI__builtin_ia32_divph512:
4713  case X86::BI__builtin_ia32_mulph512:
4714  case X86::BI__builtin_ia32_subph512:
4715  case X86::BI__builtin_ia32_addpd512:
4716  case X86::BI__builtin_ia32_addps512:
4717  case X86::BI__builtin_ia32_divpd512:
4718  case X86::BI__builtin_ia32_divps512:
4719  case X86::BI__builtin_ia32_mulpd512:
4720  case X86::BI__builtin_ia32_mulps512:
4721  case X86::BI__builtin_ia32_subpd512:
4722  case X86::BI__builtin_ia32_subps512:
4723  case X86::BI__builtin_ia32_cvtsi2sd64:
4724  case X86::BI__builtin_ia32_cvtsi2ss32:
4725  case X86::BI__builtin_ia32_cvtsi2ss64:
4726  case X86::BI__builtin_ia32_cvtusi2sd64:
4727  case X86::BI__builtin_ia32_cvtusi2ss32:
4728  case X86::BI__builtin_ia32_cvtusi2ss64:
4729  case X86::BI__builtin_ia32_vcvtusi2sh:
4730  case X86::BI__builtin_ia32_vcvtusi642sh:
4731  case X86::BI__builtin_ia32_vcvtsi2sh:
4732  case X86::BI__builtin_ia32_vcvtsi642sh:
4733  ArgNum = 2;
4734  HasRC = true;
4735  break;
4736  case X86::BI__builtin_ia32_cvtdq2ps512_mask:
4737  case X86::BI__builtin_ia32_cvtudq2ps512_mask:
4738  case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
4739  case X86::BI__builtin_ia32_vcvtps2phx512_mask:
4740  case X86::BI__builtin_ia32_cvtpd2ps512_mask:
4741  case X86::BI__builtin_ia32_cvtpd2dq512_mask:
4742  case X86::BI__builtin_ia32_cvtpd2qq512_mask:
4743  case X86::BI__builtin_ia32_cvtpd2udq512_mask:
4744  case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
4745  case X86::BI__builtin_ia32_cvtps2dq512_mask:
4746  case X86::BI__builtin_ia32_cvtps2qq512_mask:
4747  case X86::BI__builtin_ia32_cvtps2udq512_mask:
4748  case X86::BI__builtin_ia32_cvtps2uqq512_mask:
4749  case X86::BI__builtin_ia32_cvtqq2pd512_mask:
4750  case X86::BI__builtin_ia32_cvtqq2ps512_mask:
4751  case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
4752  case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
4753  case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
4754  case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
4755  case X86::BI__builtin_ia32_vcvtw2ph512_mask:
4756  case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
4757  case X86::BI__builtin_ia32_vcvtph2w512_mask:
4758  case X86::BI__builtin_ia32_vcvtph2uw512_mask:
4759  case X86::BI__builtin_ia32_vcvtph2dq512_mask:
4760  case X86::BI__builtin_ia32_vcvtph2udq512_mask:
4761  case X86::BI__builtin_ia32_vcvtph2qq512_mask:
4762  case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
4763  case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
4764  case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
4765  ArgNum = 3;
4766  HasRC = true;
4767  break;
4768  case X86::BI__builtin_ia32_addsh_round_mask:
4769  case X86::BI__builtin_ia32_addss_round_mask:
4770  case X86::BI__builtin_ia32_addsd_round_mask:
4771  case X86::BI__builtin_ia32_divsh_round_mask:
4772  case X86::BI__builtin_ia32_divss_round_mask:
4773  case X86::BI__builtin_ia32_divsd_round_mask:
4774  case X86::BI__builtin_ia32_mulsh_round_mask:
4775  case X86::BI__builtin_ia32_mulss_round_mask:
4776  case X86::BI__builtin_ia32_mulsd_round_mask:
4777  case X86::BI__builtin_ia32_subsh_round_mask:
4778  case X86::BI__builtin_ia32_subss_round_mask:
4779  case X86::BI__builtin_ia32_subsd_round_mask:
4780  case X86::BI__builtin_ia32_scalefph512_mask:
4781  case X86::BI__builtin_ia32_scalefpd512_mask:
4782  case X86::BI__builtin_ia32_scalefps512_mask:
4783  case X86::BI__builtin_ia32_scalefsd_round_mask:
4784  case X86::BI__builtin_ia32_scalefss_round_mask:
4785  case X86::BI__builtin_ia32_scalefsh_round_mask:
4786  case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
4787  case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
4788  case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
4789  case X86::BI__builtin_ia32_sqrtsd_round_mask:
4790  case X86::BI__builtin_ia32_sqrtss_round_mask:
4791  case X86::BI__builtin_ia32_sqrtsh_round_mask:
4792  case X86::BI__builtin_ia32_vfmaddsd3_mask:
4793  case X86::BI__builtin_ia32_vfmaddsd3_maskz:
4794  case X86::BI__builtin_ia32_vfmaddsd3_mask3:
4795  case X86::BI__builtin_ia32_vfmaddss3_mask:
4796  case X86::BI__builtin_ia32_vfmaddss3_maskz:
4797  case X86::BI__builtin_ia32_vfmaddss3_mask3:
4798  case X86::BI__builtin_ia32_vfmaddsh3_mask:
4799  case X86::BI__builtin_ia32_vfmaddsh3_maskz:
4800  case X86::BI__builtin_ia32_vfmaddsh3_mask3:
4801  case X86::BI__builtin_ia32_vfmaddpd512_mask:
4802  case X86::BI__builtin_ia32_vfmaddpd512_maskz:
4803  case X86::BI__builtin_ia32_vfmaddpd512_mask3:
4804  case X86::BI__builtin_ia32_vfmsubpd512_mask3:
4805  case X86::BI__builtin_ia32_vfmaddps512_mask:
4806  case X86::BI__builtin_ia32_vfmaddps512_maskz:
4807  case X86::BI__builtin_ia32_vfmaddps512_mask3:
4808  case X86::BI__builtin_ia32_vfmsubps512_mask3:
4809  case X86::BI__builtin_ia32_vfmaddph512_mask:
4810  case X86::BI__builtin_ia32_vfmaddph512_maskz:
4811  case X86::BI__builtin_ia32_vfmaddph512_mask3:
4812  case X86::BI__builtin_ia32_vfmsubph512_mask3:
4813  case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
4814  case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
4815  case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
4816  case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
4817  case X86::BI__builtin_ia32_vfmaddsubps512_mask:
4818  case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
4819  case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
4820  case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
4821  case X86::BI__builtin_ia32_vfmaddsubph512_mask:
4822  case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
4823  case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
4824  case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
4825  case X86::BI__builtin_ia32_vfmaddcsh_mask:
4826  case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
4827  case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
4828  case X86::BI__builtin_ia32_vfmaddcph512_mask:
4829  case X86::BI__builtin_ia32_vfmaddcph512_maskz:
4830  case X86::BI__builtin_ia32_vfmaddcph512_mask3:
4831  case X86::BI__builtin_ia32_vfcmaddcsh_mask:
4832  case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
4833  case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
4834  case X86::BI__builtin_ia32_vfcmaddcph512_mask:
4835  case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
4836  case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
4837  case X86::BI__builtin_ia32_vfmulcsh_mask:
4838  case X86::BI__builtin_ia32_vfmulcph512_mask:
4839  case X86::BI__builtin_ia32_vfcmulcsh_mask:
4840  case X86::BI__builtin_ia32_vfcmulcph512_mask:
4841  ArgNum = 4;
4842  HasRC = true;
4843  break;
4844  }
4845 
4846  llvm::APSInt Result;
4847 
4848  // We can't check the value of a dependent argument.
4849  Expr *Arg = TheCall->getArg(ArgNum);
4850  if (Arg->isTypeDependent() || Arg->isValueDependent())
4851  return false;
4852 
4853  // Check constant-ness first.
4854  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4855  return true;
4856 
4857  // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
4858  // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
4859  // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
4860  // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
4861  if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
4862  Result == 8/*ROUND_NO_EXC*/ ||
4863  (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
4864  (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
4865  return false;
4866 
4867  return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
4868  << Arg->getSourceRange();
4869 }
4870 
4871 // Check if the gather/scatter scale is legal.
4872 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
4873  CallExpr *TheCall) {
4874  unsigned ArgNum = 0;
4875  switch (BuiltinID) {
4876  default:
4877  return false;
4878  case X86::BI__builtin_ia32_gatherpfdpd:
4879  case X86::BI__builtin_ia32_gatherpfdps:
4880  case X86::BI__builtin_ia32_gatherpfqpd:
4881  case X86::BI__builtin_ia32_gatherpfqps:
4882  case X86::BI__builtin_ia32_scatterpfdpd:
4883  case X86::BI__builtin_ia32_scatterpfdps:
4884  case X86::BI__builtin_ia32_scatterpfqpd:
4885  case X86::BI__builtin_ia32_scatterpfqps:
4886  ArgNum = 3;
4887  break;
4888  case X86::BI__builtin_ia32_gatherd_pd:
4889  case X86::BI__builtin_ia32_gatherd_pd256:
4890  case X86::BI__builtin_ia32_gatherq_pd:
4891  case X86::BI__builtin_ia32_gatherq_pd256:
4892  case X86::BI__builtin_ia32_gatherd_ps:
4893  case X86::BI__builtin_ia32_gatherd_ps256:
4894  case X86::BI__builtin_ia32_gatherq_ps:
4895  case X86::BI__builtin_ia32_gatherq_ps256:
4896  case X86::BI__builtin_ia32_gatherd_q:
4897  case X86::BI__builtin_ia32_gatherd_q256:
4898  case X86::BI__builtin_ia32_gatherq_q:
4899  case X86::BI__builtin_ia32_gatherq_q256:
4900  case X86::BI__builtin_ia32_gatherd_d:
4901  case X86::BI__builtin_ia32_gatherd_d256:
4902  case X86::BI__builtin_ia32_gatherq_d:
4903  case X86::BI__builtin_ia32_gatherq_d256:
4904  case X86::BI__builtin_ia32_gather3div2df:
4905  case X86::BI__builtin_ia32_gather3div2di:
4906  case X86::BI__builtin_ia32_gather3div4df:
4907  case X86::BI__builtin_ia32_gather3div4di:
4908  case X86::BI__builtin_ia32_gather3div4sf:
4909  case X86::BI__builtin_ia32_gather3div4si:
4910  case X86::BI__builtin_ia32_gather3div8sf:
4911  case X86::BI__builtin_ia32_gather3div8si:
4912  case X86::BI__builtin_ia32_gather3siv2df:
4913  case X86::BI__builtin_ia32_gather3siv2di:
4914  case X86::BI__builtin_ia32_gather3siv4df:
4915  case X86::BI__builtin_ia32_gather3siv4di:
4916  case X86::BI__builtin_ia32_gather3siv4sf:
4917  case X86::BI__builtin_ia32_gather3siv4si:
4918  case X86::BI__builtin_ia32_gather3siv8sf:
4919  case X86::BI__builtin_ia32_gather3siv8si:
4920  case X86::BI__builtin_ia32_gathersiv8df:
4921  case X86::BI__builtin_ia32_gathersiv16sf:
4922  case X86::BI__builtin_ia32_gatherdiv8df:
4923  case X86::BI__builtin_ia32_gatherdiv16sf:
4924  case X86::BI__builtin_ia32_gathersiv8di:
4925  case X86::BI__builtin_ia32_gathersiv16si:
4926  case X86::BI__builtin_ia32_gatherdiv8di:
4927  case X86::BI__builtin_ia32_gatherdiv16si:
4928  case X86::BI__builtin_ia32_scatterdiv2df:
4929  case X86::BI__builtin_ia32_scatterdiv2di:
4930  case X86::BI__builtin_ia32_scatterdiv4df:
4931  case X86::BI__builtin_ia32_scatterdiv4di:
4932  case X86::BI__builtin_ia32_scatterdiv4sf:
4933  case X86::BI__builtin_ia32_scatterdiv4si:
4934  case X86::BI__builtin_ia32_scatterdiv8sf:
4935  case X86::BI__builtin_ia32_scatterdiv8si:
4936  case X86::BI__builtin_ia32_scattersiv2df:
4937  case X86::BI__builtin_ia32_scattersiv2di:
4938  case X86::BI__builtin_ia32_scattersiv4df:
4939  case X86::BI__builtin_ia32_scattersiv4di:
4940  case X86::BI__builtin_ia32_scattersiv4sf:
4941  case X86::BI__builtin_ia32_scattersiv4si:
4942  case X86::BI__builtin_ia32_scattersiv8sf:
4943  case X86::BI__builtin_ia32_scattersiv8si:
4944  case X86::BI__builtin_ia32_scattersiv8df:
4945  case X86::BI__builtin_ia32_scattersiv16sf:
4946  case X86::BI__builtin_ia32_scatterdiv8df:
4947  case X86::BI__builtin_ia32_scatterdiv16sf:
4948  case X86::BI__builtin_ia32_scattersiv8di:
4949  case X86::BI__builtin_ia32_scattersiv16si:
4950  case X86::BI__builtin_ia32_scatterdiv8di:
4951  case X86::BI__builtin_ia32_scatterdiv16si:
4952  ArgNum = 4;
4953  break;
4954  }
4955 
4956  llvm::APSInt Result;
4957 
4958  // We can't check the value of a dependent argument.
4959  Expr *Arg = TheCall->getArg(ArgNum);
4960  if (Arg->isTypeDependent() || Arg->isValueDependent())
4961  return false;
4962 
4963  // Check constant-ness first.
4964  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4965  return true;
4966 
4967  if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
4968  return false;
4969 
4970  return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
4971  << Arg->getSourceRange();
4972 }
4973 
4974 enum { TileRegLow = 0, TileRegHigh = 7 };
4975 
4976 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
4977  ArrayRef<int> ArgNums) {
4978  for (int ArgNum : ArgNums) {
4979  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
4980  return true;
4981  }
4982  return false;
4983 }
4984 
4985 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
4986  ArrayRef<int> ArgNums) {
4987  // Because the max number of tile register is TileRegHigh + 1, so here we use
4988  // each bit to represent the usage of them in bitset.
4989  std::bitset<TileRegHigh + 1> ArgValues;
4990  for (int ArgNum : ArgNums) {
4991  Expr *Arg = TheCall->getArg(ArgNum);
4992  if (Arg->isTypeDependent() || Arg->isValueDependent())
4993  continue;
4994 
4995  llvm::APSInt Result;
4996  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4997  return true;
4998  int ArgExtValue = Result.getExtValue();
4999  assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&
5000  "Incorrect tile register num.");
5001  if (ArgValues.test(ArgExtValue))
5002  return Diag(TheCall->getBeginLoc(),
5003  diag::err_x86_builtin_tile_arg_duplicate)
5004  << TheCall->getArg(ArgNum)->getSourceRange();
5005  ArgValues.set(ArgExtValue);
5006  }
5007  return false;
5008 }
5009 
5010 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
5011  ArrayRef<int> ArgNums) {
5012  return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
5013  CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
5014 }
5015 
5016 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
5017  switch (BuiltinID) {
5018  default:
5019  return false;
5020  case X86::BI__builtin_ia32_tileloadd64:
5021  case X86::BI__builtin_ia32_tileloaddt164:
5022  case X86::BI__builtin_ia32_tilestored64:
5023  case X86::BI__builtin_ia32_tilezero:
5024  return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
5025  case X86::BI__builtin_ia32_tdpbssd:
5026  case X86::BI__builtin_ia32_tdpbsud:
5027  case X86::BI__builtin_ia32_tdpbusd:
5028  case X86::BI__builtin_ia32_tdpbuud:
5029  case X86::BI__builtin_ia32_tdpbf16ps:
5030  return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
5031  }
5032 }
5033 static bool isX86_32Builtin(unsigned BuiltinID) {
5034  // These builtins only work on x86-32 targets.
5035  switch (BuiltinID) {
5036  case X86::BI__builtin_ia32_readeflags_u32:
5037  case X86::BI__builtin_ia32_writeeflags_u32:
5038  return true;
5039  }
5040 
5041  return false;
5042 }
5043 
5044 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
5045  CallExpr *TheCall) {
5046  if (BuiltinID == X86::BI__builtin_cpu_supports)
5047  return SemaBuiltinCpuSupports(*this, TI, TheCall);
5048 
5049  if (BuiltinID == X86::BI__builtin_cpu_is)
5050  return SemaBuiltinCpuIs(*this, TI, TheCall);
5051 
5052  // Check for 32-bit only builtins on a 64-bit target.
5053  const llvm::Triple &TT = TI.getTriple();
5054  if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
5055  return Diag(TheCall->getCallee()->getBeginLoc(),
5056  diag::err_32_bit_builtin_64_bit_tgt);
5057 
5058  // If the intrinsic has rounding or SAE make sure its valid.
5059  if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
5060  return true;
5061 
5062  // If the intrinsic has a gather/scatter scale immediate make sure its valid.
5063  if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
5064  return true;
5065 
5066  // If the intrinsic has a tile arguments, make sure they are valid.
5067  if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
5068  return true;
5069 
5070  // For intrinsics which take an immediate value as part of the instruction,
5071  // range check them here.
5072  int i = 0, l = 0, u = 0;
5073  switch (BuiltinID) {
5074  default:
5075  return false;
5076  case X86::BI__builtin_ia32_vec_ext_v2si:
5077  case X86::BI__builtin_ia32_vec_ext_v2di:
5078  case X86::BI__builtin_ia32_vextractf128_pd256:
5079  case X86::BI__builtin_ia32_vextractf128_ps256:
5080  case X86::BI__builtin_ia32_vextractf128_si256:
5081  case X86::BI__builtin_ia32_extract128i256:
5082  case X86::BI__builtin_ia32_extractf64x4_mask:
5083  case X86::BI__builtin_ia32_extracti64x4_mask:
5084  case X86::BI__builtin_ia32_extractf32x8_mask:
5085  case X86::BI__builtin_ia32_extracti32x8_mask:
5086  case X86::BI__builtin_ia32_extractf64x2_256_mask:
5087  case X86::BI__builtin_ia32_extracti64x2_256_mask:
5088  case X86::BI__builtin_ia32_extractf32x4_256_mask:
5089  case X86::BI__builtin_ia32_extracti32x4_256_mask:
5090  i = 1; l = 0; u = 1;
5091  break;
5092  case X86::BI__builtin_ia32_vec_set_v2di:
5093  case X86::BI__builtin_ia32_vinsertf128_pd256:
5094  case X86::BI__builtin_ia32_vinsertf128_ps256:
5095  case X86::BI__builtin_ia32_vinsertf128_si256:
5096  case X86::BI__builtin_ia32_insert128i256:
5097  case X86::BI__builtin_ia32_insertf32x8:
5098  case X86::BI__builtin_ia32_inserti32x8:
5099  case X86::BI__builtin_ia32_insertf64x4:
5100  case X86::BI__builtin_ia32_inserti64x4:
5101  case X86::BI__builtin_ia32_insertf64x2_256:
5102  case X86::BI__builtin_ia32_inserti64x2_256:
5103  case X86::BI__builtin_ia32_insertf32x4_256:
5104  case X86::BI__builtin_ia32_inserti32x4_256:
5105  i = 2; l = 0; u = 1;
5106  break;
5107  case X86::BI__builtin_ia32_vpermilpd:
5108  case X86::BI__builtin_ia32_vec_ext_v4hi:
5109  case X86::BI__builtin_ia32_vec_ext_v4si:
5110  case X86::BI__builtin_ia32_vec_ext_v4sf:
5111  case X86::BI__builtin_ia32_vec_ext_v4di:
5112  case X86::BI__builtin_ia32_extractf32x4_mask:
5113  case X86::BI__builtin_ia32_extracti32x4_mask:
5114  case X86::BI__builtin_ia32_extractf64x2_512_mask:
5115  case X86::BI__builtin_ia32_extracti64x2_512_mask:
5116  i = 1; l = 0; u = 3;
5117  break;
5118  case X86::BI_mm_prefetch:
5119  case X86::BI__builtin_ia32_vec_ext_v8hi:
5120  case X86::BI__builtin_ia32_vec_ext_v8si:
5121  i = 1; l = 0; u = 7;
5122  break;
5123  case X86::BI__builtin_ia32_sha1rnds4:
5124  case X86::BI__builtin_ia32_blendpd:
5125  case X86::BI__builtin_ia32_shufpd:
5126  case X86::BI__builtin_ia32_vec_set_v4hi:
5127  case X86::BI__builtin_ia32_vec_set_v4si:
5128  case X86::BI__builtin_ia32_vec_set_v4di:
5129  case X86::BI__builtin_ia32_shuf_f32x4_256:
5130  case X86::BI__builtin_ia32_shuf_f64x2_256:
5131  case X86::BI__builtin_ia32_shuf_i32x4_256:
5132  case X86::BI__builtin_ia32_shuf_i64x2_256:
5133  case X86::BI__builtin_ia32_insertf64x2_512:
5134  case X86::BI__builtin_ia32_inserti64x2_512:
5135  case X86::BI__builtin_ia32_insertf32x4:
5136  case X86::BI__builtin_ia32_inserti32x4:
5137  i = 2; l = 0; u = 3;
5138  break;
5139  case X86::BI__builtin_ia32_vpermil2pd:
5140  case X86::BI__builtin_ia32_vpermil2pd256:
5141  case X86::BI__builtin_ia32_vpermil2ps:
5142  case X86::BI__builtin_ia32_vpermil2ps256:
5143  i = 3; l = 0; u = 3;
5144  break;
5145  case X86::BI__builtin_ia32_cmpb128_mask:
5146  case X86::BI__builtin_ia32_cmpw128_mask:
5147  case X86::BI__builtin_ia32_cmpd128_mask:
5148  case X86::BI__builtin_ia32_cmpq128_mask:
5149  case X86::BI__builtin_ia32_cmpb256_mask:
5150  case X86::BI__builtin_ia32_cmpw256_mask:
5151  case X86::BI__builtin_ia32_cmpd256_mask:
5152  case X86::BI__builtin_ia32_cmpq256_mask:
5153  case X86::BI__builtin_ia32_cmpb512_mask:
5154  case X86::BI__builtin_ia32_cmpw512_mask:
5155  case X86::BI__builtin_ia32_cmpd512_mask:
5156  case X86::BI__builtin_ia32_cmpq512_mask:
5157  case X86::BI__builtin_ia32_ucmpb128_mask:
5158  case X86::BI__builtin_ia32_ucmpw128_mask:
5159  case X86::BI__builtin_ia32_ucmpd128_mask:
5160  case X86::BI__builtin_ia32_ucmpq128_mask:
5161  case X86::BI__builtin_ia32_ucmpb256_mask:
5162  case X86::BI__builtin_ia32_ucmpw256_mask:
5163  case X86::BI__builtin_ia32_ucmpd256_mask:
5164  case X86::BI__builtin_ia32_ucmpq256_mask:
5165  case X86::BI__builtin_ia32_ucmpb512_mask:
5166  case X86::BI__builtin_ia32_ucmpw512_mask:
5167  case X86::BI__builtin_ia32_ucmpd512_mask:
5168  case X86::BI__builtin_ia32_ucmpq512_mask:
5169  case X86::BI__builtin_ia32_vpcomub:
5170  case X86::BI__builtin_ia32_vpcomuw:
5171  case X86::BI__builtin_ia32_vpcomud:
5172  case X86::BI__builtin_ia32_vpcomuq:
5173  case X86::BI__builtin_ia32_vpcomb:
5174  case X86::BI__builtin_ia32_vpcomw:
5175  case X86::BI__builtin_ia32_vpcomd:
5176  case X86::BI__builtin_ia32_vpcomq:
5177  case X86::BI__builtin_ia32_vec_set_v8hi:
5178  case X86::BI__builtin_ia32_vec_set_v8si:
5179  i = 2; l = 0; u = 7;
5180  break;
5181  case X86::BI__builtin_ia32_vpermilpd256:
5182  case X86::BI__builtin_ia32_roundps:
5183  case X86::BI__builtin_ia32_roundpd:
5184  case X86::BI__builtin_ia32_roundps256:
5185  case X86::BI__builtin_ia32_roundpd256:
5186  case X86::BI__builtin_ia32_getmantpd128_mask:
5187  case X86::BI__builtin_ia32_getmantpd256_mask:
5188  case X86::BI__builtin_ia32_getmantps128_mask:
5189  case X86::BI__builtin_ia32_getmantps256_mask:
5190  case X86::BI__builtin_ia32_getmantpd512_mask:
5191  case X86::BI__builtin_ia32_getmantps512_mask:
5192  case X86::BI__builtin_ia32_getmantph128_mask:
5193  case X86::BI__builtin_ia32_getmantph256_mask:
5194  case X86::BI__builtin_ia32_getmantph512_mask:
5195  case X86::BI__builtin_ia32_vec_ext_v16qi:
5196  case X86::BI__builtin_ia32_vec_ext_v16hi:
5197  i = 1; l = 0; u = 15;
5198  break;
5199  case X86::BI__builtin_ia32_pblendd128:
5200  case X86::BI__builtin_ia32_blendps:
5201  case X86::BI__builtin_ia32_blendpd256:
5202  case X86::BI__builtin_ia32_shufpd256:
5203  case X86::BI__builtin_ia32_roundss:
5204  case X86::BI__builtin_ia32_roundsd:
5205  case X86::BI__builtin_ia32_rangepd128_mask:
5206  case X86::BI__builtin_ia32_rangepd256_mask:
5207  case X86::BI__builtin_ia32_rangepd512_mask:
5208  case X86::BI__builtin_ia32_rangeps128_mask:
5209  case X86::BI__builtin_ia32_rangeps256_mask:
5210  case X86::BI__builtin_ia32_rangeps512_mask:
5211  case X86::BI__builtin_ia32_getmantsd_round_mask:
5212  case X86::BI__builtin_ia32_getmantss_round_mask:
5213  case X86::BI__builtin_ia32_getmantsh_round_mask:
5214  case X86::BI__builtin_ia32_vec_set_v16qi:
5215  case X86::BI__builtin_ia32_vec_set_v16hi:
5216  i = 2; l = 0; u = 15;
5217  break;
5218  case X86::BI__builtin_ia32_vec_ext_v32qi:
5219  i = 1; l = 0; u = 31;
5220  break;
5221  case X86::BI__builtin_ia32_cmpps:
5222  case X86::BI__builtin_ia32_cmpss:
5223  case X86::BI__builtin_ia32_cmppd:
5224  case X86::BI__builtin_ia32_cmpsd:
5225  case X86::BI__builtin_ia32_cmpps256:
5226  case X86::BI__builtin_ia32_cmppd256:
5227  case X86::BI__builtin_ia32_cmpps128_mask:
5228  case X86::BI__builtin_ia32_cmppd128_mask:
5229  case X86::BI__builtin_ia32_cmpps256_mask:
5230  case X86::BI__builtin_ia32_cmppd256_mask:
5231  case X86::BI__builtin_ia32_cmpps512_mask:
5232  case X86::BI__builtin_ia32_cmppd512_mask:
5233  case X86::BI__builtin_ia32_cmpsd_mask:
5234  case X86::BI__builtin_ia32_cmpss_mask:
5235  case X86::BI__builtin_ia32_vec_set_v32qi:
5236  i = 2; l = 0; u = 31;
5237  break;
5238  case X86::BI__builtin_ia32_permdf256:
5239  case X86::BI__builtin_ia32_permdi256:
5240  case X86::BI__builtin_ia32_permdf512:
5241  case X86::BI__builtin_ia32_permdi512:
5242  case X86::BI__builtin_ia32_vpermilps:
5243  case X86::BI__builtin_ia32_vpermilps256:
5244  case X86::BI__builtin_ia32_vpermilpd512:
5245  case X86::BI__builtin_ia32_vpermilps512:
5246  case X86::BI__builtin_ia32_pshufd:
5247  case X86::BI__builtin_ia32_pshufd256:
5248  case X86::BI__builtin_ia32_pshufd512:
5249  case X86::BI__builtin_ia32_pshufhw:
5250  case X86::BI__builtin_ia32_pshufhw256:
5251  case X86::BI__builtin_ia32_pshufhw512:
5252  case X86::BI__builtin_ia32_pshuflw:
5253  case X86::BI__builtin_ia32_pshuflw256:
5254  case X86::BI__builtin_ia32_pshuflw512:
5255  case X86::BI__builtin_ia32_vcvtps2ph:
5256  case X86::BI__builtin_ia32_vcvtps2ph_mask:
5257  case X86::BI__builtin_ia32_vcvtps2ph256:
5258  case X86::BI__builtin_ia32_vcvtps2ph256_mask:
5259  case X86::BI__builtin_ia32_vcvtps2ph512_mask:
5260  case X86::BI__builtin_ia32_rndscaleps_128_mask:
5261  case X86::BI__builtin_ia32_rndscalepd_128_mask:
5262  case X86::BI__builtin_ia32_rndscaleps_256_mask:
5263  case X86::BI__builtin_ia32_rndscalepd_256_mask:
5264  case X86::BI__builtin_ia32_rndscaleps_mask:
5265  case X86::BI__builtin_ia32_rndscalepd_mask:
5266  case X86::BI__builtin_ia32_rndscaleph_mask:
5267  case X86::BI__builtin_ia32_reducepd128_mask:
5268  case X86::BI__builtin_ia32_reducepd256_mask:
5269  case X86::BI__builtin_ia32_reducepd512_mask:
5270  case X86::BI__builtin_ia32_reduceps128_mask:
5271  case X86::BI__builtin_ia32_reduceps256_mask:
5272  case X86::BI__builtin_ia32_reduceps512_mask:
5273  case X86::BI__builtin_ia32_reduceph128_mask:
5274  case X86::BI__builtin_ia32_reduceph256_mask:
5275  case X86::BI__builtin_ia32_reduceph512_mask:
5276  case X86::BI__builtin_ia32_prold512:
5277  case X86::BI__builtin_ia32_prolq512:
5278  case X86::BI__builtin_ia32_prold128:
5279  case X86::BI__builtin_ia32_prold256:
5280  case X86::BI__builtin_ia32_prolq128:
5281  case X86::BI__builtin_ia32_prolq256:
5282  case X86::BI__builtin_ia32_prord512:
5283  case X86::BI__builtin_ia32_prorq512:
5284  case X86::BI__builtin_ia32_prord128:
5285  case X86::BI__builtin_ia32_prord256:
5286  case X86::BI__builtin_ia32_prorq128:
5287  case X86::BI__builtin_ia32_prorq256:
5288  case X86::BI__builtin_ia32_fpclasspd128_mask:
5289  case X86::BI__builtin_ia32_fpclasspd256_mask:
5290  case X86::BI__builtin_ia32_fpclassps128_mask:
5291  case X86::BI__builtin_ia32_fpclassps256_mask:
5292  case X86::BI__builtin_ia32_fpclassps512_mask:
5293  case X86::BI__builtin_ia32_fpclasspd512_mask:
5294  case X86::BI__builtin_ia32_fpclassph128_mask:
5295  case X86::BI__builtin_ia32_fpclassph256_mask:
5296  case X86::BI__builtin_ia32_fpclassph512_mask:
5297  case X86::BI__builtin_ia32_fpclasssd_mask:
5298  case X86::BI__builtin_ia32_fpclassss_mask:
5299  case X86::BI__builtin_ia32_fpclasssh_mask:
5300  case X86::BI__builtin_ia32_pslldqi128_byteshift:
5301  case X86::BI__builtin_ia32_pslldqi256_byteshift:
5302  case X86::BI__builtin_ia32_pslldqi512_byteshift:
5303  case X86::BI__builtin_ia32_psrldqi128_byteshift:
5304  case X86::BI__builtin_ia32_psrldqi256_byteshift:
5305  case X86::BI__builtin_ia32_psrldqi512_byteshift:
5306  case X86::BI__builtin_ia32_kshiftliqi:
5307  case X86::BI__builtin_ia32_kshiftlihi:
5308  case X86::BI__builtin_ia32_kshiftlisi:
5309  case X86::BI__builtin_ia32_kshiftlidi:
5310  case X86::BI__builtin_ia32_kshiftriqi:
5311  case X86::BI__builtin_ia32_kshiftrihi:
5312  case X86::BI__builtin_ia32_kshiftrisi:
5313  case X86::BI__builtin_ia32_kshiftridi:
5314  i = 1; l = 0; u = 255;
5315  break;
5316  case X86::BI__builtin_ia32_vperm2f128_pd256:
5317  case X86::BI__builtin_ia32_vperm2f128_ps256:
5318  case X86::BI__builtin_ia32_vperm2f128_si256:
5319  case X86::BI__builtin_ia32_permti256:
5320  case X86::BI__builtin_ia32_pblendw128:
5321  case X86::BI__builtin_ia32_pblendw256:
5322  case X86::BI__builtin_ia32_blendps256:
5323  case X86::BI__builtin_ia32_pblendd256:
5324  case X86::BI__builtin_ia32_palignr128:
5325  case X86::BI__builtin_ia32_palignr256:
5326  case X86::BI__builtin_ia32_palignr512:
5327  case X86::BI__builtin_ia32_alignq512:
5328  case X86::BI__builtin_ia32_alignd512:
5329  case X86::BI__builtin_ia32_alignd128:
5330  case X86::BI__builtin_ia32_alignd256:
5331  case X86::BI__builtin_ia32_alignq128:
5332  case X86::BI__builtin_ia32_alignq256:
5333  case X86::BI__builtin_ia32_vcomisd:
5334  case X86::BI__builtin_ia32_vcomiss:
5335  case X86::BI__builtin_ia32_shuf_f32x4:
5336  case X86::BI__builtin_ia32_shuf_f64x2:
5337  case X86::BI__builtin_ia32_shuf_i32x4:
5338  case X86::BI__builtin_ia32_shuf_i64x2:
5339  case X86::BI__builtin_ia32_shufpd512:
5340  case X86::BI__builtin_ia32_shufps:
5341  case X86::BI__builtin_ia32_shufps256:
5342  case X86::BI__builtin_ia32_shufps512:
5343  case X86::BI__builtin_ia32_dbpsadbw128:
5344  case X86::BI__builtin_ia32_dbpsadbw256:
5345  case X86::BI__builtin_ia32_dbpsadbw512:
5346  case X86::BI__builtin_ia32_vpshldd128:
5347  case X86::BI__builtin_ia32_vpshldd256:
5348  case X86::BI__builtin_ia32_vpshldd512:
5349  case X86::BI__builtin_ia32_vpshldq128:
5350  case X86::BI__builtin_ia32_vpshldq256:
5351  case X86::BI__builtin_ia32_vpshldq512:
5352  case X86::BI__builtin_ia32_vpshldw128:
5353  case X86::BI__builtin_ia32_vpshldw256:
5354  case X86::BI__builtin_ia32_vpshldw512:
5355  case X86::BI__builtin_ia32_vpshrdd128:
5356  case X86::BI__builtin_ia32_vpshrdd256:
5357  case X86::BI__builtin_ia32_vpshrdd512:
5358  case X86::BI__builtin_ia32_vpshrdq128:
5359  case X86::BI__builtin_ia32_vpshrdq256:
5360  case X86::BI__builtin_ia32_vpshrdq512:
5361  case X86::BI__builtin_ia32_vpshrdw128:
5362  case X86::BI__builtin_ia32_vpshrdw256:
5363  case X86::BI__builtin_ia32_vpshrdw512:
5364  i = 2; l = 0; u = 255;
5365  break;
5366  case X86::BI__builtin_ia32_fixupimmpd512_mask:
5367  case X86::BI__builtin_ia32_fixupimmpd512_maskz:
5368  case X86::BI__builtin_ia32_fixupimmps512_mask:
5369  case X86::BI__builtin_ia32_fixupimmps512_maskz:
5370  case X86::BI__builtin_ia32_fixupimmsd_mask:
5371  case X86::BI__builtin_ia32_fixupimmsd_maskz:
5372  case X86::BI__builtin_ia32_fixupimmss_mask:
5373  case X86::BI__builtin_ia32_fixupimmss_maskz:
5374  case X86::BI__builtin_ia32_fixupimmpd128_mask:
5375  case X86::BI__builtin_ia32_fixupimmpd128_maskz:
5376  case X86::BI__builtin_ia32_fixupimmpd256_mask:
5377  case X86::BI__builtin_ia32_fixupimmpd256_maskz:
5378  case X86::BI__builtin_ia32_fixupimmps128_mask:
5379  case X86::BI__builtin_ia32_fixupimmps128_maskz:
5380  case X86::BI__builtin_ia32_fixupimmps256_mask:
5381  case X86::BI__builtin_ia32_fixupimmps256_maskz:
5382  case X86::BI__builtin_ia32_pternlogd512_mask:
5383  case X86::BI__builtin_ia32_pternlogd512_maskz:
5384  case X86::BI__builtin_ia32_pternlogq512_mask:
5385  case X86::BI__builtin_ia32_pternlogq512_maskz:
5386  case X86::BI__builtin_ia32_pternlogd128_mask:
5387  case X86::BI__builtin_ia32_pternlogd128_maskz:
5388  case X86::BI__builtin_ia32_pternlogd256_mask:
5389  case X86::BI__builtin_ia32_pternlogd256_maskz:
5390  case X86::BI__builtin_ia32_pternlogq128_mask:
5391  case X86::BI__builtin_ia32_pternlogq128_maskz:
5392  case X86::BI__builtin_ia32_pternlogq256_mask:
5393  case X86::BI__builtin_ia32_pternlogq256_maskz:
5394  i = 3; l = 0; u = 255;
5395  break;
5396  case X86::BI__builtin_ia32_gatherpfdpd:
5397  case X86::BI__builtin_ia32_gatherpfdps:
5398  case X86::BI__builtin_ia32_gatherpfqpd:
5399  case X86::BI__builtin_ia32_gatherpfqps:
5400  case X86::BI__builtin_ia32_scatterpfdpd:
5401  case X86::BI__builtin_ia32_scatterpfdps:
5402  case X86::BI__builtin_ia32_scatterpfqpd:
5403  case X86::BI__builtin_ia32_scatterpfqps:
5404  i = 4; l = 2; u = 3;
5405  break;
5406  case X86::BI__builtin_ia32_reducesd_mask:
5407  case X86::BI__builtin_ia32_reducess_mask:
5408  case X86::BI__builtin_ia32_rndscalesd_round_mask:
5409  case X86::BI__builtin_ia32_rndscaless_round_mask:
5410  case X86::BI__builtin_ia32_rndscalesh_round_mask:
5411  case X86::BI__builtin_ia32_reducesh_mask:
5412  i = 4; l = 0; u = 255;
5413  break;
5414  }
5415 
5416  // Note that we don't force a hard error on the range check here, allowing
5417  // template-generated or macro-generated dead code to potentially have out-of-
5418  // range values. These need to code generate, but don't need to necessarily
5419  // make any sense. We use a warning that defaults to an error.
5420  return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
5421 }
5422 
5423 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
5424 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
5425 /// Returns true when the format fits the function and the FormatStringInfo has
5426 /// been populated.
5427 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
5428  FormatStringInfo *FSI) {
5429  FSI->HasVAListArg = Format->getFirstArg() == 0;
5430  FSI->FormatIdx = Format->getFormatIdx() - 1;
5431  FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
5432 
5433  // The way the format attribute works in GCC, the implicit this argument
5434  // of member functions is counted. However, it doesn't appear in our own
5435  // lists, so decrement format_idx in that case.
5436  if (IsCXXMember) {
5437  if(FSI->FormatIdx == 0)
5438  return false;
5439  --FSI->FormatIdx;
5440  if (FSI->FirstDataArg != 0)
5441  --FSI->FirstDataArg;
5442  }
5443  return true;
5444 }
5445 
5446 /// Checks if a the given expression evaluates to null.
5447 ///
5448 /// Returns true if the value evaluates to null.
5449 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
5450  // If the expression has non-null type, it doesn't evaluate to null.
5451  if (auto nullability
5453  if (*nullability == NullabilityKind::NonNull)
5454  return false;
5455  }
5456 
5457  // As a special case, transparent unions initialized with zero are
5458  // considered null for the purposes of the nonnull attribute.
5459  if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
5460  if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
5461  if (const CompoundLiteralExpr *CLE =
5462  dyn_cast<CompoundLiteralExpr>(Expr))
5463  if (const InitListExpr *ILE =
5464  dyn_cast<InitListExpr>(CLE->getInitializer()))
5465  Expr = ILE->getInit(0);
5466  }
5467 
5468  bool Result;
5469  return (!Expr->isValueDependent() &&
5470  Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
5471  !Result);
5472 }
5473 
5475  const Expr *ArgExpr,
5476  SourceLocation CallSiteLoc) {
5477  if (CheckNonNullExpr(S, ArgExpr))
5478  S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
5479  S.PDiag(diag::warn_null_arg)
5480  << ArgExpr->getSourceRange());
5481 }
5482 
5483 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
5484  FormatStringInfo FSI;
5485  if ((GetFormatStringType(Format) == FST_NSString) &&
5486  getFormatStringInfo(Format, false, &FSI)) {
5487  Idx = FSI.FormatIdx;
5488  return true;
5489  }
5490  return false;
5491 }
5492 
5493 /// Diagnose use of %s directive in an NSString which is being passed
5494 /// as formatting string to formatting method.
5495 static void
5497  const NamedDecl *FDecl,
5498  Expr **Args,
5499  unsigned NumArgs) {
5500  unsigned Idx = 0;
5501  bool Format = false;
5503  if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
5504  Idx = 2;
5505  Format = true;
5506  }
5507  else
5508  for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
5509  if (S.GetFormatNSStringIdx(I, Idx)) {
5510  Format = true;
5511  break;
5512  }
5513  }
5514  if (!Format || NumArgs <= Idx)
5515  return;
5516  const Expr *FormatExpr = Args[Idx];
5517  if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
5518  FormatExpr = CSCE->getSubExpr();
5519  const StringLiteral *FormatString;
5520  if (const ObjCStringLiteral *OSL =
5521  dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
5522  FormatString = OSL->getString();
5523  else
5524  FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
5525  if (!FormatString)
5526  return;
5527  if (S.FormatStringHasSArg(FormatString)) {
5528  S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
5529  << "%s" << 1 << 1;
5530  S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
5531  << FDecl->getDeclName();
5532  }
5533 }
5534 
5535 /// Determine whether the given type has a non-null nullability annotation.
5537  if (auto nullability = type->getNullability(ctx))
5538  return *nullability == NullabilityKind::NonNull;
5539 
5540  return false;
5541 }
5542 
5544  const NamedDecl *FDecl,
5545  const FunctionProtoType *Proto,
5547  SourceLocation CallSiteLoc) {
5548  assert((FDecl || Proto) && "Need a function declaration or prototype");
5549 
5550  // Already checked by by constant evaluator.
5551  if (S.isConstantEvaluated())
5552  return;
5553  // Check the attributes attached to the method/function itself.
5554  llvm::SmallBitVector NonNullArgs;
5555  if (FDecl) {
5556  // Handle the nonnull attribute on the function/method declaration itself.
5557  for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
5558  if (!NonNull->args_size()) {
5559  // Easy case: all pointer arguments are nonnull.
5560  for (const auto *Arg : Args)
5561  if (S.isValidPointerAttrType(Arg->getType()))
5562  CheckNonNullArgument(S, Arg, CallSiteLoc);
5563  return;
5564  }
5565 
5566  for (const ParamIdx &Idx : NonNull->args()) {
5567  unsigned IdxAST = Idx.getASTIndex();
5568  if (IdxAST >= Args.size())
5569  continue;
5570  if (NonNullArgs.empty())
5571  NonNullArgs.resize(Args.size());
5572  NonNullArgs.set(IdxAST);
5573  }
5574  }
5575  }
5576 
5577  if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
5578  // Handle the nonnull attribute on the parameters of the
5579  // function/method.
5580  ArrayRef<ParmVarDecl*> parms;
5581  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
5582  parms = FD->parameters();
5583  else
5584  parms = cast<ObjCMethodDecl>(FDecl)->parameters();
5585 
5586  unsigned ParamIndex = 0;
5587  for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
5588  I != E; ++I, ++ParamIndex) {
5589  const ParmVarDecl *PVD = *I;
5590  if (PVD->hasAttr<NonNullAttr>() ||
5591  isNonNullType(S.Context, PVD->getType())) {
5592  if (NonNullArgs.empty())
5593  NonNullArgs.resize(Args.size());
5594 
5595  NonNullArgs.set(ParamIndex);
5596  }
5597  }
5598  } else {
5599  // If we have a non-function, non-method declaration but no
5600  // function prototype, try to dig out the function prototype.
5601  if (!Proto) {
5602  if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
5603  QualType type = VD->getType().getNonReferenceType();
5604  if (auto pointerType = type->getAs<PointerType>())
5605  type = pointerType->getPointeeType();
5606  else if (auto blockType = type->getAs<BlockPointerType>())
5607  type = blockType->getPointeeType();
5608  // FIXME: data member pointers?
5609 
5610  // Dig out the function prototype, if there is one.
5611  Proto = type->getAs<FunctionProtoType>();
5612  }
5613  }
5614 
5615  // Fill in non-null argument information from the nullability
5616  // information on the parameter types (if we have them).
5617  if (Proto) {
5618  unsigned Index = 0;
5619  for (auto paramType : Proto->getParamTypes()) {
5620  if (isNonNullType(S.Context, paramType)) {
5621  if (NonNullArgs.empty())
5622  NonNullArgs.resize(Args.size());
5623 
5624  NonNullArgs.set(Index);
5625  }
5626 
5627  ++Index;
5628  }
5629  }
5630  }
5631 
5632  // Check for non-null arguments.
5633  for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
5634  ArgIndex != ArgIndexEnd; ++ArgIndex) {
5635  if (NonNullArgs[ArgIndex])
5636  CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
5637  }
5638 }
5639 
5640 /// Warn if a pointer or reference argument passed to a function points to an
5641 /// object that is less aligned than the parameter. This can happen when
5642 /// creating a typedef with a lower alignment than the original type and then
5643 /// calling functions defined in terms of the original type.
5644 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
5645  StringRef ParamName, QualType ArgTy,
5646  QualType ParamTy) {
5647 
5648  // If a function accepts a pointer or reference type
5649  if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
5650  return;
5651 
5652  // If the parameter is a pointer type, get the pointee type for the
5653  // argument too. If the parameter is a reference type, don't try to get
5654  // the pointee type for the argument.
5655  if (ParamTy->isPointerType())
5656  ArgTy = ArgTy->getPointeeType();
5657 
5658  // Remove reference or pointer
5659  ParamTy = ParamTy->getPointeeType();
5660 
5661  // Find expected alignment, and the actual alignment of the passed object.
5662  // getTypeAlignInChars requires complete types
5663  if (ArgTy.isNull() || ParamTy->isIncompleteType() ||
5664  ArgTy->isIncompleteType() || ParamTy->isUndeducedType() ||
5665  ArgTy->isUndeducedType())
5666  return;
5667 
5668  CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
5669  CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
5670 
5671  // If the argument is less aligned than the parameter, there is a
5672  // potential alignment issue.
5673  if (ArgAlign < ParamAlign)
5674  Diag(Loc, diag::warn_param_mismatched_alignment)
5675  << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
5676  << ParamName << (FDecl != nullptr) << FDecl;
5677 }
5678 
5679 /// Handles the checks for format strings, non-POD arguments to vararg
5680 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
5681 /// attributes.
5682 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
5683  const Expr *ThisArg, ArrayRef<const Expr *> Args,
5684  bool IsMemberFunction, SourceLocation Loc,
5685  SourceRange Range, VariadicCallType CallType) {
5686  // FIXME: We should check as much as we can in the template definition.
5687  if (CurContext->isDependentContext())
5688  return;
5689 
5690  // Printf and scanf checking.
5691  llvm::SmallBitVector CheckedVarArgs;
5692  if (FDecl) {
5693  for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
5694  // Only create vector if there are format attributes.
5695  CheckedVarArgs.resize(Args.size());
5696 
5697  CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
5698  CheckedVarArgs);
5699  }
5700  }
5701 
5702  // Refuse POD arguments that weren't caught by the format string
5703  // checks above.
5704  auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
5705  if (CallType != VariadicDoesNotApply &&
5706  (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
5707  unsigned NumParams = Proto ? Proto->getNumParams()
5708  : FDecl && isa<FunctionDecl>(FDecl)
5709  ? cast<FunctionDecl>(FDecl)->getNumParams()
5710  : FDecl && isa<ObjCMethodDecl>(FDecl)
5711  ? cast<ObjCMethodDecl>(FDecl)->param_size()
5712  : 0;
5713 
5714  for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
5715  // Args[ArgIdx] can be null in malformed code.
5716  if (const Expr *Arg = Args[ArgIdx]) {
5717  if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
5718  checkVariadicArgument(Arg, CallType);
5719  }
5720  }
5721  }
5722 
5723  if (FDecl || Proto) {
5724  CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
5725 
5726  // Type safety checking.
5727  if (FDecl) {
5728  for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
5729  CheckArgumentWithTypeTag(I, Args, Loc);
5730  }
5731  }
5732 
5733  // Check that passed arguments match the alignment of original arguments.
5734  // Try to get the missing prototype from the declaration.
5735  if (!Proto && FDecl) {
5736  const auto *FT = FDecl->getFunctionType();
5737  if (isa_and_nonnull<FunctionProtoType>(FT))
5738  Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
5739  }
5740  if (Proto) {
5741  // For variadic functions, we may have more args than parameters.
5742  // For some K&R functions, we may have less args than parameters.
5743  const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
5744  for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
5745  // Args[ArgIdx] can be null in malformed code.
5746  if (const Expr *Arg = Args[ArgIdx]) {
5747  if (Arg->containsErrors())
5748  continue;
5749 
5750  QualType ParamTy = Proto->getParamType(ArgIdx);
5751  QualType ArgTy = Arg->getType();
5752  CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
5753  ArgTy, ParamTy);
5754  }
5755  }
5756  }
5757 
5758  if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
5759  auto *AA = FDecl->getAttr<AllocAlignAttr>();
5760  const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
5761  if (!Arg->isValueDependent()) {
5762  Expr::EvalResult Align;
5763  if (Arg->EvaluateAsInt(Align, Context)) {
5764  const llvm::APSInt &I = Align.Val.getInt();
5765  if (!I.isPowerOf2())
5766  Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
5767  << Arg->getSourceRange();
5768 
5769  if (I > Sema::MaximumAlignment)
5770  Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
5771  << Arg->getSourceRange() << Sema::MaximumAlignment;
5772  }
5773  }
5774  }
5775 
5776  if (FD)
5777  diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
5778 }
5779 
5780 /// CheckConstructorCall - Check a constructor call for correctness and safety
5781 /// properties not enforced by the C type system.
5782 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
5784  const FunctionProtoType *Proto,
5785