clang  14.0.0git
SemaChecking.cpp
Go to the documentation of this file.
1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements extra semantic analysis beyond what is enforced
10 // by the C type system.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/AST/APValue.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/AttrIterator.h"
18 #include "clang/AST/CharUnits.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclBase.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/FormatString.h"
30 #include "clang/AST/NSAPI.h"
33 #include "clang/AST/RecordLayout.h"
34 #include "clang/AST/Stmt.h"
35 #include "clang/AST/TemplateBase.h"
36 #include "clang/AST/Type.h"
37 #include "clang/AST/TypeLoc.h"
40 #include "clang/Basic/CharInfo.h"
41 #include "clang/Basic/Diagnostic.h"
43 #include "clang/Basic/LLVM.h"
50 #include "clang/Basic/Specifiers.h"
51 #include "clang/Basic/SyncScope.h"
54 #include "clang/Basic/TargetInfo.h"
55 #include "clang/Basic/TypeTraits.h"
56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
58 #include "clang/Sema/Lookup.h"
59 #include "clang/Sema/Ownership.h"
60 #include "clang/Sema/Scope.h"
61 #include "clang/Sema/ScopeInfo.h"
62 #include "clang/Sema/Sema.h"
64 #include "llvm/ADT/APFloat.h"
65 #include "llvm/ADT/APInt.h"
66 #include "llvm/ADT/APSInt.h"
67 #include "llvm/ADT/ArrayRef.h"
68 #include "llvm/ADT/DenseMap.h"
69 #include "llvm/ADT/FoldingSet.h"
70 #include "llvm/ADT/None.h"
71 #include "llvm/ADT/Optional.h"
72 #include "llvm/ADT/STLExtras.h"
73 #include "llvm/ADT/SmallBitVector.h"
74 #include "llvm/ADT/SmallPtrSet.h"
75 #include "llvm/ADT/SmallString.h"
76 #include "llvm/ADT/SmallVector.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/ADT/StringSet.h"
79 #include "llvm/ADT/StringSwitch.h"
80 #include "llvm/ADT/Triple.h"
81 #include "llvm/Support/AtomicOrdering.h"
82 #include "llvm/Support/Casting.h"
83 #include "llvm/Support/Compiler.h"
84 #include "llvm/Support/ConvertUTF.h"
85 #include "llvm/Support/ErrorHandling.h"
86 #include "llvm/Support/Format.h"
87 #include "llvm/Support/Locale.h"
88 #include "llvm/Support/MathExtras.h"
89 #include "llvm/Support/SaveAndRestore.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include <algorithm>
92 #include <bitset>
93 #include <cassert>
94 #include <cctype>
95 #include <cstddef>
96 #include <cstdint>
97 #include <functional>
98 #include <limits>
99 #include <string>
100 #include <tuple>
101 #include <utility>
102 
103 using namespace clang;
104 using namespace sema;
105 
107  unsigned ByteNo) const {
108  return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
109  Context.getTargetInfo());
110 }
111 
112 /// Checks that a call expression's argument count is the desired number.
113 /// This is useful when doing custom type-checking. Returns true on error.
114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
115  unsigned argCount = call->getNumArgs();
116  if (argCount == desiredArgCount) return false;
117 
118  if (argCount < desiredArgCount)
119  return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
120  << 0 /*function call*/ << desiredArgCount << argCount
121  << call->getSourceRange();
122 
123  // Highlight all the excess arguments.
124  SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
125  call->getArg(argCount - 1)->getEndLoc());
126 
127  return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
128  << 0 /*function call*/ << desiredArgCount << argCount
129  << call->getArg(1)->getSourceRange();
130 }
131 
132 /// Check that the first argument to __builtin_annotation is an integer
133 /// and the second argument is a non-wide string literal.
134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
135  if (checkArgCount(S, TheCall, 2))
136  return true;
137 
138  // First argument should be an integer.
139  Expr *ValArg = TheCall->getArg(0);
140  QualType Ty = ValArg->getType();
141  if (!Ty->isIntegerType()) {
142  S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
143  << ValArg->getSourceRange();
144  return true;
145  }
146 
147  // Second argument should be a constant string.
148  Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
149  StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
150  if (!Literal || !Literal->isAscii()) {
151  S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
152  << StrArg->getSourceRange();
153  return true;
154  }
155 
156  TheCall->setType(Ty);
157  return false;
158 }
159 
160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
161  // We need at least one argument.
162  if (TheCall->getNumArgs() < 1) {
163  S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
164  << 0 << 1 << TheCall->getNumArgs()
165  << TheCall->getCallee()->getSourceRange();
166  return true;
167  }
168 
169  // All arguments should be wide string literals.
170  for (Expr *Arg : TheCall->arguments()) {
171  auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
172  if (!Literal || !Literal->isWide()) {
173  S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
174  << Arg->getSourceRange();
175  return true;
176  }
177  }
178 
179  return false;
180 }
181 
182 /// Check that the argument to __builtin_addressof is a glvalue, and set the
183 /// result type to the corresponding pointer type.
184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
185  if (checkArgCount(S, TheCall, 1))
186  return true;
187 
188  ExprResult Arg(TheCall->getArg(0));
189  QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
190  if (ResultType.isNull())
191  return true;
192 
193  TheCall->setArg(0, Arg.get());
194  TheCall->setType(ResultType);
195  return false;
196 }
197 
198 /// Check the number of arguments and set the result type to
199 /// the argument type.
200 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
201  if (checkArgCount(S, TheCall, 1))
202  return true;
203 
204  TheCall->setType(TheCall->getArg(0)->getType());
205  return false;
206 }
207 
208 /// Check that the value argument for __builtin_is_aligned(value, alignment) and
209 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
210 /// type (but not a function pointer) and that the alignment is a power-of-two.
211 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
212  if (checkArgCount(S, TheCall, 2))
213  return true;
214 
215  clang::Expr *Source = TheCall->getArg(0);
216  bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
217 
218  auto IsValidIntegerType = [](QualType Ty) {
219  return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
220  };
221  QualType SrcTy = Source->getType();
222  // We should also be able to use it with arrays (but not functions!).
223  if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
224  SrcTy = S.Context.getDecayedType(SrcTy);
225  }
226  if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
227  SrcTy->isFunctionPointerType()) {
228  // FIXME: this is not quite the right error message since we don't allow
229  // floating point types, or member pointers.
230  S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
231  << SrcTy;
232  return true;
233  }
234 
235  clang::Expr *AlignOp = TheCall->getArg(1);
236  if (!IsValidIntegerType(AlignOp->getType())) {
237  S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
238  << AlignOp->getType();
239  return true;
240  }
241  Expr::EvalResult AlignResult;
242  unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
243  // We can't check validity of alignment if it is value dependent.
244  if (!AlignOp->isValueDependent() &&
245  AlignOp->EvaluateAsInt(AlignResult, S.Context,
247  llvm::APSInt AlignValue = AlignResult.Val.getInt();
248  llvm::APSInt MaxValue(
249  llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
250  if (AlignValue < 1) {
251  S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
252  return true;
253  }
254  if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
255  S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
256  << toString(MaxValue, 10);
257  return true;
258  }
259  if (!AlignValue.isPowerOf2()) {
260  S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
261  return true;
262  }
263  if (AlignValue == 1) {
264  S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
265  << IsBooleanAlignBuiltin;
266  }
267  }
268 
271  SourceLocation(), Source);
272  if (SrcArg.isInvalid())
273  return true;
274  TheCall->setArg(0, SrcArg.get());
275  ExprResult AlignArg =
277  S.Context, AlignOp->getType(), false),
278  SourceLocation(), AlignOp);
279  if (AlignArg.isInvalid())
280  return true;
281  TheCall->setArg(1, AlignArg.get());
282  // For align_up/align_down, the return type is the same as the (potentially
283  // decayed) argument type including qualifiers. For is_aligned(), the result
284  // is always bool.
285  TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
286  return false;
287 }
288 
289 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
290  unsigned BuiltinID) {
291  if (checkArgCount(S, TheCall, 3))
292  return true;
293 
294  // First two arguments should be integers.
295  for (unsigned I = 0; I < 2; ++I) {
297  if (Arg.isInvalid()) return true;
298  TheCall->setArg(I, Arg.get());
299 
300  QualType Ty = Arg.get()->getType();
301  if (!Ty->isIntegerType()) {
302  S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
303  << Ty << Arg.get()->getSourceRange();
304  return true;
305  }
306  }
307 
308  // Third argument should be a pointer to a non-const integer.
309  // IRGen correctly handles volatile, restrict, and address spaces, and
310  // the other qualifiers aren't possible.
311  {
313  if (Arg.isInvalid()) return true;
314  TheCall->setArg(2, Arg.get());
315 
316  QualType Ty = Arg.get()->getType();
317  const auto *PtrTy = Ty->getAs<PointerType>();
318  if (!PtrTy ||
319  !PtrTy->getPointeeType()->isIntegerType() ||
320  PtrTy->getPointeeType().isConstQualified()) {
321  S.Diag(Arg.get()->getBeginLoc(),
322  diag::err_overflow_builtin_must_be_ptr_int)
323  << Ty << Arg.get()->getSourceRange();
324  return true;
325  }
326  }
327 
328  // Disallow signed ExtIntType args larger than 128 bits to mul function until
329  // we improve backend support.
330  if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
331  for (unsigned I = 0; I < 3; ++I) {
332  const auto Arg = TheCall->getArg(I);
333  // Third argument will be a pointer.
334  auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
335  if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
336  S.getASTContext().getIntWidth(Ty) > 128)
337  return S.Diag(Arg->getBeginLoc(),
338  diag::err_overflow_builtin_ext_int_max_size)
339  << 128;
340  }
341  }
342 
343  return false;
344 }
345 
346 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
347  if (checkArgCount(S, BuiltinCall, 2))
348  return true;
349 
350  SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
351  Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
352  Expr *Call = BuiltinCall->getArg(0);
353  Expr *Chain = BuiltinCall->getArg(1);
354 
355  if (Call->getStmtClass() != Stmt::CallExprClass) {
356  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
357  << Call->getSourceRange();
358  return true;
359  }
360 
361  auto CE = cast<CallExpr>(Call);
362  if (CE->getCallee()->getType()->isBlockPointerType()) {
363  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
364  << Call->getSourceRange();
365  return true;
366  }
367 
368  const Decl *TargetDecl = CE->getCalleeDecl();
369  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
370  if (FD->getBuiltinID()) {
371  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
372  << Call->getSourceRange();
373  return true;
374  }
375 
376  if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
377  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
378  << Call->getSourceRange();
379  return true;
380  }
381 
382  ExprResult ChainResult = S.UsualUnaryConversions(Chain);
383  if (ChainResult.isInvalid())
384  return true;
385  if (!ChainResult.get()->getType()->isPointerType()) {
386  S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
387  << Chain->getSourceRange();
388  return true;
389  }
390 
391  QualType ReturnTy = CE->getCallReturnType(S.Context);
392  QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
393  QualType BuiltinTy = S.Context.getFunctionType(
394  ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
395  QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
396 
397  Builtin =
398  S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
399 
400  BuiltinCall->setType(CE->getType());
401  BuiltinCall->setValueKind(CE->getValueKind());
402  BuiltinCall->setObjectKind(CE->getObjectKind());
403  BuiltinCall->setCallee(Builtin);
404  BuiltinCall->setArg(1, ChainResult.get());
405 
406  return false;
407 }
408 
409 namespace {
410 
411 class EstimateSizeFormatHandler
413  size_t Size;
414 
415 public:
416  EstimateSizeFormatHandler(StringRef Format)
417  : Size(std::min(Format.find(0), Format.size()) +
418  1 /* null byte always written by sprintf */) {}
419 
420  bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
421  const char *, unsigned SpecifierLen) override {
422 
423  const size_t FieldWidth = computeFieldWidth(FS);
424  const size_t Precision = computePrecision(FS);
425 
426  // The actual format.
427  switch (FS.getConversionSpecifier().getKind()) {
428  // Just a char.
431  Size += std::max(FieldWidth, (size_t)1);
432  break;
433  // Just an integer.
443  Size += std::max(FieldWidth, Precision);
444  break;
445 
446  // %g style conversion switches between %f or %e style dynamically.
447  // %f always takes less space, so default to it.
450 
451  // Floating point number in the form '[+]ddd.ddd'.
454  Size += std::max(FieldWidth, 1 /* integer part */ +
455  (Precision ? 1 + Precision
456  : 0) /* period + decimal */);
457  break;
458 
459  // Floating point number in the form '[-]d.ddde[+-]dd'.
462  Size +=
463  std::max(FieldWidth,
464  1 /* integer part */ +
465  (Precision ? 1 + Precision : 0) /* period + decimal */ +
466  1 /* e or E letter */ + 2 /* exponent */);
467  break;
468 
469  // Floating point number in the form '[-]0xh.hhhhp±dd'.
472  Size +=
473  std::max(FieldWidth,
474  2 /* 0x */ + 1 /* integer part */ +
475  (Precision ? 1 + Precision : 0) /* period + decimal */ +
476  1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
477  break;
478 
479  // Just a string.
482  Size += FieldWidth;
483  break;
484 
485  // Just a pointer in the form '0xddd'.
487  Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
488  break;
489 
490  // A plain percent.
492  Size += 1;
493  break;
494 
495  default:
496  break;
497  }
498 
499  Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
500 
501  if (FS.hasAlternativeForm()) {
502  switch (FS.getConversionSpecifier().getKind()) {
503  default:
504  break;
505  // Force a leading '0'.
507  Size += 1;
508  break;
509  // Force a leading '0x'.
512  Size += 2;
513  break;
514  // Force a period '.' before decimal, even if precision is 0.
523  Size += (Precision ? 0 : 1);
524  break;
525  }
526  }
527  assert(SpecifierLen <= Size && "no underflow");
528  Size -= SpecifierLen;
529  return true;
530  }
531 
532  size_t getSizeLowerBound() const { return Size; }
533 
534 private:
535  static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
536  const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
537  size_t FieldWidth = 0;
539  FieldWidth = FW.getConstantAmount();
540  return FieldWidth;
541  }
542 
543  static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
544  const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
545  size_t Precision = 0;
546 
547  // See man 3 printf for default precision value based on the specifier.
548  switch (FW.getHowSpecified()) {
550  switch (FS.getConversionSpecifier().getKind()) {
551  default:
552  break;
556  Precision = 1;
557  break;
564  Precision = 1;
565  break;
572  Precision = 6;
573  break;
575  Precision = 1;
576  break;
577  }
578  break;
580  Precision = FW.getConstantAmount();
581  break;
582  default:
583  break;
584  }
585  return Precision;
586  }
587 };
588 
589 } // namespace
590 
591 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
592  CallExpr *TheCall) {
593  if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
594  isConstantEvaluated())
595  return;
596 
597  unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
598  if (!BuiltinID)
599  return;
600 
601  const TargetInfo &TI = getASTContext().getTargetInfo();
602  unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
603 
604  auto ComputeExplicitObjectSizeArgument =
605  [&](unsigned Index) -> Optional<llvm::APSInt> {
606  Expr::EvalResult Result;
607  Expr *SizeArg = TheCall->getArg(Index);
608  if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
609  return llvm::None;
610  return Result.Val.getInt();
611  };
612 
613  auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
614  // If the parameter has a pass_object_size attribute, then we should use its
615  // (potentially) more strict checking mode. Otherwise, conservatively assume
616  // type 0.
617  int BOSType = 0;
618  if (const auto *POS =
619  FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
620  BOSType = POS->getType();
621 
622  const Expr *ObjArg = TheCall->getArg(Index);
623  uint64_t Result;
624  if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
625  return llvm::None;
626 
627  // Get the object size in the target's size_t width.
628  return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
629  };
630 
631  auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
632  Expr *ObjArg = TheCall->getArg(Index);
633  uint64_t Result;
634  if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
635  return llvm::None;
636  // Add 1 for null byte.
637  return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth);
638  };
639 
640  Optional<llvm::APSInt> SourceSize;
641  Optional<llvm::APSInt> DestinationSize;
642  unsigned DiagID = 0;
643  bool IsChkVariant = false;
644 
645  switch (BuiltinID) {
646  default:
647  return;
648  case Builtin::BI__builtin_strcpy:
649  case Builtin::BIstrcpy: {
650  DiagID = diag::warn_fortify_strlen_overflow;
651  SourceSize = ComputeStrLenArgument(1);
652  DestinationSize = ComputeSizeArgument(0);
653  break;
654  }
655 
656  case Builtin::BI__builtin___strcpy_chk: {
657  DiagID = diag::warn_fortify_strlen_overflow;
658  SourceSize = ComputeStrLenArgument(1);
659  DestinationSize = ComputeExplicitObjectSizeArgument(2);
660  IsChkVariant = true;
661  break;
662  }
663 
664  case Builtin::BIsprintf:
665  case Builtin::BI__builtin___sprintf_chk: {
666  size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
667  auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
668 
669  if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
670 
671  if (!Format->isAscii() && !Format->isUTF8())
672  return;
673 
674  StringRef FormatStrRef = Format->getString();
675  EstimateSizeFormatHandler H(FormatStrRef);
676  const char *FormatBytes = FormatStrRef.data();
677  const ConstantArrayType *T =
678  Context.getAsConstantArrayType(Format->getType());
679  assert(T && "String literal not of constant array type!");
680  size_t TypeSize = T->getSize().getZExtValue();
681 
682  // In case there's a null byte somewhere.
683  size_t StrLen =
684  std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
686  H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
687  Context.getTargetInfo(), false)) {
688  DiagID = diag::warn_fortify_source_format_overflow;
689  SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
690  .extOrTrunc(SizeTypeWidth);
691  if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
692  DestinationSize = ComputeExplicitObjectSizeArgument(2);
693  IsChkVariant = true;
694  } else {
695  DestinationSize = ComputeSizeArgument(0);
696  }
697  break;
698  }
699  }
700  return;
701  }
702  case Builtin::BI__builtin___memcpy_chk:
703  case Builtin::BI__builtin___memmove_chk:
704  case Builtin::BI__builtin___memset_chk:
705  case Builtin::BI__builtin___strlcat_chk:
706  case Builtin::BI__builtin___strlcpy_chk:
707  case Builtin::BI__builtin___strncat_chk:
708  case Builtin::BI__builtin___strncpy_chk:
709  case Builtin::BI__builtin___stpncpy_chk:
710  case Builtin::BI__builtin___memccpy_chk:
711  case Builtin::BI__builtin___mempcpy_chk: {
712  DiagID = diag::warn_builtin_chk_overflow;
713  SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
714  DestinationSize =
715  ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
716  IsChkVariant = true;
717  break;
718  }
719 
720  case Builtin::BI__builtin___snprintf_chk:
721  case Builtin::BI__builtin___vsnprintf_chk: {
722  DiagID = diag::warn_builtin_chk_overflow;
723  SourceSize = ComputeExplicitObjectSizeArgument(1);
724  DestinationSize = ComputeExplicitObjectSizeArgument(3);
725  IsChkVariant = true;
726  break;
727  }
728 
729  case Builtin::BIstrncat:
730  case Builtin::BI__builtin_strncat:
731  case Builtin::BIstrncpy:
732  case Builtin::BI__builtin_strncpy:
733  case Builtin::BIstpncpy:
734  case Builtin::BI__builtin_stpncpy: {
735  // Whether these functions overflow depends on the runtime strlen of the
736  // string, not just the buffer size, so emitting the "always overflow"
737  // diagnostic isn't quite right. We should still diagnose passing a buffer
738  // size larger than the destination buffer though; this is a runtime abort
739  // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
740  DiagID = diag::warn_fortify_source_size_mismatch;
741  SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
742  DestinationSize = ComputeSizeArgument(0);
743  break;
744  }
745 
746  case Builtin::BImemcpy:
747  case Builtin::BI__builtin_memcpy:
748  case Builtin::BImemmove:
749  case Builtin::BI__builtin_memmove:
750  case Builtin::BImemset:
751  case Builtin::BI__builtin_memset:
752  case Builtin::BImempcpy:
753  case Builtin::BI__builtin_mempcpy: {
754  DiagID = diag::warn_fortify_source_overflow;
755  SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
756  DestinationSize = ComputeSizeArgument(0);
757  break;
758  }
759  case Builtin::BIsnprintf:
760  case Builtin::BI__builtin_snprintf:
761  case Builtin::BIvsnprintf:
762  case Builtin::BI__builtin_vsnprintf: {
763  DiagID = diag::warn_fortify_source_size_mismatch;
764  SourceSize = ComputeExplicitObjectSizeArgument(1);
765  DestinationSize = ComputeSizeArgument(0);
766  break;
767  }
768  }
769 
770  if (!SourceSize || !DestinationSize ||
771  SourceSize.getValue().ule(DestinationSize.getValue()))
772  return;
773 
774  StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
775  // Skim off the details of whichever builtin was called to produce a better
776  // diagnostic, as it's unlikely that the user wrote the __builtin explicitly.
777  if (IsChkVariant) {
778  FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
779  FunctionName = FunctionName.drop_back(std::strlen("_chk"));
780  } else if (FunctionName.startswith("__builtin_")) {
781  FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
782  }
783 
784  SmallString<16> DestinationStr;
785  SmallString<16> SourceStr;
786  DestinationSize->toString(DestinationStr, /*Radix=*/10);
787  SourceSize->toString(SourceStr, /*Radix=*/10);
788  DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
789  PDiag(DiagID)
790  << FunctionName << DestinationStr << SourceStr);
791 }
792 
793 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
794  Scope::ScopeFlags NeededScopeFlags,
795  unsigned DiagID) {
796  // Scopes aren't available during instantiation. Fortunately, builtin
797  // functions cannot be template args so they cannot be formed through template
798  // instantiation. Therefore checking once during the parse is sufficient.
799  if (SemaRef.inTemplateInstantiation())
800  return false;
801 
802  Scope *S = SemaRef.getCurScope();
803  while (S && !S->isSEHExceptScope())
804  S = S->getParent();
805  if (!S || !(S->getFlags() & NeededScopeFlags)) {
806  auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
807  SemaRef.Diag(TheCall->getExprLoc(), DiagID)
808  << DRE->getDecl()->getIdentifier();
809  return true;
810  }
811 
812  return false;
813 }
814 
815 static inline bool isBlockPointer(Expr *Arg) {
816  return Arg->getType()->isBlockPointerType();
817 }
818 
819 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
820 /// void*, which is a requirement of device side enqueue.
821 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
822  const BlockPointerType *BPT =
823  cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
824  ArrayRef<QualType> Params =
825  BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
826  unsigned ArgCounter = 0;
827  bool IllegalParams = false;
828  // Iterate through the block parameters until either one is found that is not
829  // a local void*, or the block is valid.
830  for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
831  I != E; ++I, ++ArgCounter) {
832  if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
833  (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
835  // Get the location of the error. If a block literal has been passed
836  // (BlockExpr) then we can point straight to the offending argument,
837  // else we just point to the variable reference.
838  SourceLocation ErrorLoc;
839  if (isa<BlockExpr>(BlockArg)) {
840  BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
841  ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
842  } else if (isa<DeclRefExpr>(BlockArg)) {
843  ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
844  }
845  S.Diag(ErrorLoc,
846  diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
847  IllegalParams = true;
848  }
849  }
850 
851  return IllegalParams;
852 }
853 
854 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
855  if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) {
856  S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
857  << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
858  return true;
859  }
860  return false;
861 }
862 
863 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
864  if (checkArgCount(S, TheCall, 2))
865  return true;
866 
867  if (checkOpenCLSubgroupExt(S, TheCall))
868  return true;
869 
870  // First argument is an ndrange_t type.
871  Expr *NDRangeArg = TheCall->getArg(0);
872  if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
873  S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
874  << TheCall->getDirectCallee() << "'ndrange_t'";
875  return true;
876  }
877 
878  Expr *BlockArg = TheCall->getArg(1);
879  if (!isBlockPointer(BlockArg)) {
880  S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
881  << TheCall->getDirectCallee() << "block";
882  return true;
883  }
884  return checkOpenCLBlockArgs(S, BlockArg);
885 }
886 
887 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
888 /// get_kernel_work_group_size
889 /// and get_kernel_preferred_work_group_size_multiple builtin functions.
891  if (checkArgCount(S, TheCall, 1))
892  return true;
893 
894  Expr *BlockArg = TheCall->getArg(0);
895  if (!isBlockPointer(BlockArg)) {
896  S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
897  << TheCall->getDirectCallee() << "block";
898  return true;
899  }
900  return checkOpenCLBlockArgs(S, BlockArg);
901 }
902 
903 /// Diagnose integer type and any valid implicit conversion to it.
904 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
905  const QualType &IntType);
906 
908  unsigned Start, unsigned End) {
909  bool IllegalParams = false;
910  for (unsigned I = Start; I <= End; ++I)
911  IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
912  S.Context.getSizeType());
913  return IllegalParams;
914 }
915 
916 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
917 /// 'local void*' parameter of passed block.
919  Expr *BlockArg,
920  unsigned NumNonVarArgs) {
921  const BlockPointerType *BPT =
922  cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
923  unsigned NumBlockParams =
924  BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
925  unsigned TotalNumArgs = TheCall->getNumArgs();
926 
927  // For each argument passed to the block, a corresponding uint needs to
928  // be passed to describe the size of the local memory.
929  if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
930  S.Diag(TheCall->getBeginLoc(),
931  diag::err_opencl_enqueue_kernel_local_size_args);
932  return true;
933  }
934 
935  // Check that the sizes of the local memory are specified by integers.
936  return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
937  TotalNumArgs - 1);
938 }
939 
940 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
941 /// overload formats specified in Table 6.13.17.1.
942 /// int enqueue_kernel(queue_t queue,
943 /// kernel_enqueue_flags_t flags,
944 /// const ndrange_t ndrange,
945 /// void (^block)(void))
946 /// int enqueue_kernel(queue_t queue,
947 /// kernel_enqueue_flags_t flags,
948 /// const ndrange_t ndrange,
949 /// uint num_events_in_wait_list,
950 /// clk_event_t *event_wait_list,
951 /// clk_event_t *event_ret,
952 /// void (^block)(void))
953 /// int enqueue_kernel(queue_t queue,
954 /// kernel_enqueue_flags_t flags,
955 /// const ndrange_t ndrange,
956 /// void (^block)(local void*, ...),
957 /// uint size0, ...)
958 /// int enqueue_kernel(queue_t queue,
959 /// kernel_enqueue_flags_t flags,
960 /// const ndrange_t ndrange,
961 /// uint num_events_in_wait_list,
962 /// clk_event_t *event_wait_list,
963 /// clk_event_t *event_ret,
964 /// void (^block)(local void*, ...),
965 /// uint size0, ...)
966 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
967  unsigned NumArgs = TheCall->getNumArgs();
968 
969  if (NumArgs < 4) {
970  S.Diag(TheCall->getBeginLoc(),
971  diag::err_typecheck_call_too_few_args_at_least)
972  << 0 << 4 << NumArgs;
973  return true;
974  }
975 
976  Expr *Arg0 = TheCall->getArg(0);
977  Expr *Arg1 = TheCall->getArg(1);
978  Expr *Arg2 = TheCall->getArg(2);
979  Expr *Arg3 = TheCall->getArg(3);
980 
981  // First argument always needs to be a queue_t type.
982  if (!Arg0->getType()->isQueueT()) {
983  S.Diag(TheCall->getArg(0)->getBeginLoc(),
984  diag::err_opencl_builtin_expected_type)
985  << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
986  return true;
987  }
988 
989  // Second argument always needs to be a kernel_enqueue_flags_t enum value.
990  if (!Arg1->getType()->isIntegerType()) {
991  S.Diag(TheCall->getArg(1)->getBeginLoc(),
992  diag::err_opencl_builtin_expected_type)
993  << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
994  return true;
995  }
996 
997  // Third argument is always an ndrange_t type.
998  if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
999  S.Diag(TheCall->getArg(2)->getBeginLoc(),
1000  diag::err_opencl_builtin_expected_type)
1001  << TheCall->getDirectCallee() << "'ndrange_t'";
1002  return true;
1003  }
1004 
1005  // With four arguments, there is only one form that the function could be
1006  // called in: no events and no variable arguments.
1007  if (NumArgs == 4) {
1008  // check that the last argument is the right block type.
1009  if (!isBlockPointer(Arg3)) {
1010  S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1011  << TheCall->getDirectCallee() << "block";
1012  return true;
1013  }
1014  // we have a block type, check the prototype
1015  const BlockPointerType *BPT =
1016  cast<BlockPointerType>(Arg3->getType().getCanonicalType());
1017  if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1018  S.Diag(Arg3->getBeginLoc(),
1019  diag::err_opencl_enqueue_kernel_blocks_no_args);
1020  return true;
1021  }
1022  return false;
1023  }
1024  // we can have block + varargs.
1025  if (isBlockPointer(Arg3))
1026  return (checkOpenCLBlockArgs(S, Arg3) ||
1027  checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
1028  // last two cases with either exactly 7 args or 7 args and varargs.
1029  if (NumArgs >= 7) {
1030  // check common block argument.
1031  Expr *Arg6 = TheCall->getArg(6);
1032  if (!isBlockPointer(Arg6)) {
1033  S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1034  << TheCall->getDirectCallee() << "block";
1035  return true;
1036  }
1037  if (checkOpenCLBlockArgs(S, Arg6))
1038  return true;
1039 
1040  // Forth argument has to be any integer type.
1041  if (!Arg3->getType()->isIntegerType()) {
1042  S.Diag(TheCall->getArg(3)->getBeginLoc(),
1043  diag::err_opencl_builtin_expected_type)
1044  << TheCall->getDirectCallee() << "integer";
1045  return true;
1046  }
1047  // check remaining common arguments.
1048  Expr *Arg4 = TheCall->getArg(4);
1049  Expr *Arg5 = TheCall->getArg(5);
1050 
1051  // Fifth argument is always passed as a pointer to clk_event_t.
1052  if (!Arg4->isNullPointerConstant(S.Context,
1055  S.Diag(TheCall->getArg(4)->getBeginLoc(),
1056  diag::err_opencl_builtin_expected_type)
1057  << TheCall->getDirectCallee()
1059  return true;
1060  }
1061 
1062  // Sixth argument is always passed as a pointer to clk_event_t.
1063  if (!Arg5->isNullPointerConstant(S.Context,
1065  !(Arg5->getType()->isPointerType() &&
1066  Arg5->getType()->getPointeeType()->isClkEventT())) {
1067  S.Diag(TheCall->getArg(5)->getBeginLoc(),
1068  diag::err_opencl_builtin_expected_type)
1069  << TheCall->getDirectCallee()
1071  return true;
1072  }
1073 
1074  if (NumArgs == 7)
1075  return false;
1076 
1077  return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
1078  }
1079 
1080  // None of the specific case has been detected, give generic error
1081  S.Diag(TheCall->getBeginLoc(),
1082  diag::err_opencl_enqueue_kernel_incorrect_args);
1083  return true;
1084 }
1085 
1086 /// Returns OpenCL access qual.
1087 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1088  return D->getAttr<OpenCLAccessAttr>();
1089 }
1090 
1091 /// Returns true if pipe element type is different from the pointer.
1092 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
1093  const Expr *Arg0 = Call->getArg(0);
1094  // First argument type should always be pipe.
1095  if (!Arg0->getType()->isPipeType()) {
1096  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1097  << Call->getDirectCallee() << Arg0->getSourceRange();
1098  return true;
1099  }
1100  OpenCLAccessAttr *AccessQual =
1101  getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1102  // Validates the access qualifier is compatible with the call.
1103  // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1104  // read_only and write_only, and assumed to be read_only if no qualifier is
1105  // specified.
1106  switch (Call->getDirectCallee()->getBuiltinID()) {
1107  case Builtin::BIread_pipe:
1108  case Builtin::BIreserve_read_pipe:
1109  case Builtin::BIcommit_read_pipe:
1110  case Builtin::BIwork_group_reserve_read_pipe:
1111  case Builtin::BIsub_group_reserve_read_pipe:
1112  case Builtin::BIwork_group_commit_read_pipe:
1113  case Builtin::BIsub_group_commit_read_pipe:
1114  if (!(!AccessQual || AccessQual->isReadOnly())) {
1115  S.Diag(Arg0->getBeginLoc(),
1116  diag::err_opencl_builtin_pipe_invalid_access_modifier)
1117  << "read_only" << Arg0->getSourceRange();
1118  return true;
1119  }
1120  break;
1121  case Builtin::BIwrite_pipe:
1122  case Builtin::BIreserve_write_pipe:
1123  case Builtin::BIcommit_write_pipe:
1124  case Builtin::BIwork_group_reserve_write_pipe:
1125  case Builtin::BIsub_group_reserve_write_pipe:
1126  case Builtin::BIwork_group_commit_write_pipe:
1127  case Builtin::BIsub_group_commit_write_pipe:
1128  if (!(AccessQual && AccessQual->isWriteOnly())) {
1129  S.Diag(Arg0->getBeginLoc(),
1130  diag::err_opencl_builtin_pipe_invalid_access_modifier)
1131  << "write_only" << Arg0->getSourceRange();
1132  return true;
1133  }
1134  break;
1135  default:
1136  break;
1137  }
1138  return false;
1139 }
1140 
1141 /// Returns true if pipe element type is different from the pointer.
1142 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1143  const Expr *Arg0 = Call->getArg(0);
1144  const Expr *ArgIdx = Call->getArg(Idx);
1145  const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
1146  const QualType EltTy = PipeTy->getElementType();
1147  const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1148  // The Idx argument should be a pointer and the type of the pointer and
1149  // the type of pipe element should also be the same.
1150  if (!ArgTy ||
1151  !S.Context.hasSameType(
1152  EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1153  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1154  << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1155  << ArgIdx->getType() << ArgIdx->getSourceRange();
1156  return true;
1157  }
1158  return false;
1159 }
1160 
1161 // Performs semantic analysis for the read/write_pipe call.
1162 // \param S Reference to the semantic analyzer.
1163 // \param Call A pointer to the builtin call.
1164 // \return True if a semantic error has been found, false otherwise.
1165 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
1166  // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1167  // functions have two forms.
1168  switch (Call->getNumArgs()) {
1169  case 2:
1170  if (checkOpenCLPipeArg(S, Call))
1171  return true;
1172  // The call with 2 arguments should be
1173  // read/write_pipe(pipe T, T*).
1174  // Check packet type T.
1175  if (checkOpenCLPipePacketType(S, Call, 1))
1176  return true;
1177  break;
1178 
1179  case 4: {
1180  if (checkOpenCLPipeArg(S, Call))
1181  return true;
1182  // The call with 4 arguments should be
1183  // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1184  // Check reserve_id_t.
1185  if (!Call->getArg(1)->getType()->isReserveIDT()) {
1186  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1187  << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1188  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1189  return true;
1190  }
1191 
1192  // Check the index.
1193  const Expr *Arg2 = Call->getArg(2);
1194  if (!Arg2->getType()->isIntegerType() &&
1195  !Arg2->getType()->isUnsignedIntegerType()) {
1196  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1197  << Call->getDirectCallee() << S.Context.UnsignedIntTy
1198  << Arg2->getType() << Arg2->getSourceRange();
1199  return true;
1200  }
1201 
1202  // Check packet type T.
1203  if (checkOpenCLPipePacketType(S, Call, 3))
1204  return true;
1205  } break;
1206  default:
1207  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1208  << Call->getDirectCallee() << Call->getSourceRange();
1209  return true;
1210  }
1211 
1212  return false;
1213 }
1214 
1215 // Performs a semantic analysis on the {work_group_/sub_group_
1216 // /_}reserve_{read/write}_pipe
1217 // \param S Reference to the semantic analyzer.
1218 // \param Call The call to the builtin function to be analyzed.
1219 // \return True if a semantic error was found, false otherwise.
1220 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
1221  if (checkArgCount(S, Call, 2))
1222  return true;
1223 
1224  if (checkOpenCLPipeArg(S, Call))
1225  return true;
1226 
1227  // Check the reserve size.
1228  if (!Call->getArg(1)->getType()->isIntegerType() &&
1229  !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
1230  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1231  << Call->getDirectCallee() << S.Context.UnsignedIntTy
1232  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1233  return true;
1234  }
1235 
1236  // Since return type of reserve_read/write_pipe built-in function is
1237  // reserve_id_t, which is not defined in the builtin def file , we used int
1238  // as return type and need to override the return type of these functions.
1239  Call->setType(S.Context.OCLReserveIDTy);
1240 
1241  return false;
1242 }
1243 
1244 // Performs a semantic analysis on {work_group_/sub_group_
1245 // /_}commit_{read/write}_pipe
1246 // \param S Reference to the semantic analyzer.
1247 // \param Call The call to the builtin function to be analyzed.
1248 // \return True if a semantic error was found, false otherwise.
1249 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1250  if (checkArgCount(S, Call, 2))
1251  return true;
1252 
1253  if (checkOpenCLPipeArg(S, Call))
1254  return true;
1255 
1256  // Check reserve_id_t.
1257  if (!Call->getArg(1)->getType()->isReserveIDT()) {
1258  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1259  << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1260  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1261  return true;
1262  }
1263 
1264  return false;
1265 }
1266 
1267 // Performs a semantic analysis on the call to built-in Pipe
1268 // Query Functions.
1269 // \param S Reference to the semantic analyzer.
1270 // \param Call The call to the builtin function to be analyzed.
1271 // \return True if a semantic error was found, false otherwise.
1272 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
1273  if (checkArgCount(S, Call, 1))
1274  return true;
1275 
1276  if (!Call->getArg(0)->getType()->isPipeType()) {
1277  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1278  << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1279  return true;
1280  }
1281 
1282  return false;
1283 }
1284 
1285 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1286 // Performs semantic analysis for the to_global/local/private call.
1287 // \param S Reference to the semantic analyzer.
1288 // \param BuiltinID ID of the builtin function.
1289 // \param Call A pointer to the builtin call.
1290 // \return True if a semantic error has been found, false otherwise.
1291 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
1292  CallExpr *Call) {
1293  if (checkArgCount(S, Call, 1))
1294  return true;
1295 
1296  auto RT = Call->getArg(0)->getType();
1297  if (!RT->isPointerType() || RT->getPointeeType()
1298  .getAddressSpace() == LangAS::opencl_constant) {
1299  S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1300  << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1301  return true;
1302  }
1303 
1304  if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1305  S.Diag(Call->getArg(0)->getBeginLoc(),
1306  diag::warn_opencl_generic_address_space_arg)
1307  << Call->getDirectCallee()->getNameInfo().getAsString()
1308  << Call->getArg(0)->getSourceRange();
1309  }
1310 
1311  RT = RT->getPointeeType();
1312  auto Qual = RT.getQualifiers();
1313  switch (BuiltinID) {
1314  case Builtin::BIto_global:
1315  Qual.setAddressSpace(LangAS::opencl_global);
1316  break;
1317  case Builtin::BIto_local:
1318  Qual.setAddressSpace(LangAS::opencl_local);
1319  break;
1320  case Builtin::BIto_private:
1321  Qual.setAddressSpace(LangAS::opencl_private);
1322  break;
1323  default:
1324  llvm_unreachable("Invalid builtin function");
1325  }
1326  Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
1327  RT.getUnqualifiedType(), Qual)));
1328 
1329  return false;
1330 }
1331 
1333  if (checkArgCount(S, TheCall, 1))
1334  return ExprError();
1335 
1336  // Compute __builtin_launder's parameter type from the argument.
1337  // The parameter type is:
1338  // * The type of the argument if it's not an array or function type,
1339  // Otherwise,
1340  // * The decayed argument type.
1341  QualType ParamTy = [&]() {
1342  QualType ArgTy = TheCall->getArg(0)->getType();
1343  if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1344  return S.Context.getPointerType(Ty->getElementType());
1345  if (ArgTy->isFunctionType()) {
1346  return S.Context.getPointerType(ArgTy);
1347  }
1348  return ArgTy;
1349  }();
1350 
1351  TheCall->setType(ParamTy);
1352 
1353  auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
1354  if (!ParamTy->isPointerType())
1355  return 0;
1356  if (ParamTy->isFunctionPointerType())
1357  return 1;
1358  if (ParamTy->isVoidPointerType())
1359  return 2;
1360  return llvm::Optional<unsigned>{};
1361  }();
1362  if (DiagSelect.hasValue()) {
1363  S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
1364  << DiagSelect.getValue() << TheCall->getSourceRange();
1365  return ExprError();
1366  }
1367 
1368  // We either have an incomplete class type, or we have a class template
1369  // whose instantiation has not been forced. Example:
1370  //
1371  // template <class T> struct Foo { T value; };
1372  // Foo<int> *p = nullptr;
1373  // auto *d = __builtin_launder(p);
1374  if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
1375  diag::err_incomplete_type))
1376  return ExprError();
1377 
1378  assert(ParamTy->getPointeeType()->isObjectType() &&
1379  "Unhandled non-object pointer case");
1380 
1381  InitializedEntity Entity =
1383  ExprResult Arg =
1384  S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
1385  if (Arg.isInvalid())
1386  return ExprError();
1387  TheCall->setArg(0, Arg.get());
1388 
1389  return TheCall;
1390 }
1391 
1392 // Emit an error and return true if the current architecture is not in the list
1393 // of supported architectures.
1394 static bool
1395 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1396  ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
1397  llvm::Triple::ArchType CurArch =
1398  S.getASTContext().getTargetInfo().getTriple().getArch();
1399  if (llvm::is_contained(SupportedArchs, CurArch))
1400  return false;
1401  S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1402  << TheCall->getSourceRange();
1403  return true;
1404 }
1405 
1406 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
1407  SourceLocation CallSiteLoc);
1408 
1409 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
1410  CallExpr *TheCall) {
1411  switch (TI.getTriple().getArch()) {
1412  default:
1413  // Some builtins don't require additional checking, so just consider these
1414  // acceptable.
1415  return false;
1416  case llvm::Triple::arm:
1417  case llvm::Triple::armeb:
1418  case llvm::Triple::thumb:
1419  case llvm::Triple::thumbeb:
1420  return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
1421  case llvm::Triple::aarch64:
1422  case llvm::Triple::aarch64_32:
1423  case llvm::Triple::aarch64_be:
1424  return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
1425  case llvm::Triple::bpfeb:
1426  case llvm::Triple::bpfel:
1427  return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
1428  case llvm::Triple::hexagon:
1429  return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
1430  case llvm::Triple::mips:
1431  case llvm::Triple::mipsel:
1432  case llvm::Triple::mips64:
1433  case llvm::Triple::mips64el:
1434  return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
1435  case llvm::Triple::systemz:
1436  return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
1437  case llvm::Triple::x86:
1438  case llvm::Triple::x86_64:
1439  return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
1440  case llvm::Triple::ppc:
1441  case llvm::Triple::ppcle:
1442  case llvm::Triple::ppc64:
1443  case llvm::Triple::ppc64le:
1444  return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
1445  case llvm::Triple::amdgcn:
1446  return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
1447  case llvm::Triple::riscv32:
1448  case llvm::Triple::riscv64:
1449  return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
1450  }
1451 }
1452 
1453 ExprResult
1454 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
1455  CallExpr *TheCall) {
1456  ExprResult TheCallResult(TheCall);
1457 
1458  // Find out if any arguments are required to be integer constant expressions.
1459  unsigned ICEArguments = 0;
1461  Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
1462  if (Error != ASTContext::GE_None)
1463  ICEArguments = 0; // Don't diagnose previously diagnosed errors.
1464 
1465  // If any arguments are required to be ICE's, check and diagnose.
1466  for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
1467  // Skip arguments not required to be ICE's.
1468  if ((ICEArguments & (1 << ArgNo)) == 0) continue;
1469 
1470  llvm::APSInt Result;
1471  if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
1472  return true;
1473  ICEArguments &= ~(1 << ArgNo);
1474  }
1475 
1476  switch (BuiltinID) {
1477  case Builtin::BI__builtin___CFStringMakeConstantString:
1478  assert(TheCall->getNumArgs() == 1 &&
1479  "Wrong # arguments to builtin CFStringMakeConstantString");
1480  if (CheckObjCString(TheCall->getArg(0)))
1481  return ExprError();
1482  break;
1483  case Builtin::BI__builtin_ms_va_start:
1484  case Builtin::BI__builtin_stdarg_start:
1485  case Builtin::BI__builtin_va_start:
1486  if (SemaBuiltinVAStart(BuiltinID, TheCall))
1487  return ExprError();
1488  break;
1489  case Builtin::BI__va_start: {
1490  switch (Context.getTargetInfo().getTriple().getArch()) {
1491  case llvm::Triple::aarch64:
1492  case llvm::Triple::arm:
1493  case llvm::Triple::thumb:
1494  if (SemaBuiltinVAStartARMMicrosoft(TheCall))
1495  return ExprError();
1496  break;
1497  default:
1498  if (SemaBuiltinVAStart(BuiltinID, TheCall))
1499  return ExprError();
1500  break;
1501  }
1502  break;
1503  }
1504 
1505  // The acquire, release, and no fence variants are ARM and AArch64 only.
1506  case Builtin::BI_interlockedbittestandset_acq:
1507  case Builtin::BI_interlockedbittestandset_rel:
1508  case Builtin::BI_interlockedbittestandset_nf:
1509  case Builtin::BI_interlockedbittestandreset_acq:
1510  case Builtin::BI_interlockedbittestandreset_rel:
1511  case Builtin::BI_interlockedbittestandreset_nf:
1513  *this, BuiltinID, TheCall,
1514  {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
1515  return ExprError();
1516  break;
1517 
1518  // The 64-bit bittest variants are x64, ARM, and AArch64 only.
1519  case Builtin::BI_bittest64:
1520  case Builtin::BI_bittestandcomplement64:
1521  case Builtin::BI_bittestandreset64:
1522  case Builtin::BI_bittestandset64:
1523  case Builtin::BI_interlockedbittestandreset64:
1524  case Builtin::BI_interlockedbittestandset64:
1525  if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
1526  {llvm::Triple::x86_64, llvm::Triple::arm,
1527  llvm::Triple::thumb, llvm::Triple::aarch64}))
1528  return ExprError();
1529  break;
1530 
1531  case Builtin::BI__builtin_isgreater:
1532  case Builtin::BI__builtin_isgreaterequal:
1533  case Builtin::BI__builtin_isless:
1534  case Builtin::BI__builtin_islessequal:
1535  case Builtin::BI__builtin_islessgreater:
1536  case Builtin::BI__builtin_isunordered:
1537  if (SemaBuiltinUnorderedCompare(TheCall))
1538  return ExprError();
1539  break;
1540  case Builtin::BI__builtin_fpclassify:
1541  if (SemaBuiltinFPClassification(TheCall, 6))
1542  return ExprError();
1543  break;
1544  case Builtin::BI__builtin_isfinite:
1545  case Builtin::BI__builtin_isinf:
1546  case Builtin::BI__builtin_isinf_sign:
1547  case Builtin::BI__builtin_isnan:
1548  case Builtin::BI__builtin_isnormal:
1549  case Builtin::BI__builtin_signbit:
1550  case Builtin::BI__builtin_signbitf:
1551  case Builtin::BI__builtin_signbitl:
1552  if (SemaBuiltinFPClassification(TheCall, 1))
1553  return ExprError();
1554  break;
1555  case Builtin::BI__builtin_shufflevector:
1556  return SemaBuiltinShuffleVector(TheCall);
1557  // TheCall will be freed by the smart pointer here, but that's fine, since
1558  // SemaBuiltinShuffleVector guts it, but then doesn't release it.
1559  case Builtin::BI__builtin_prefetch:
1560  if (SemaBuiltinPrefetch(TheCall))
1561  return ExprError();
1562  break;
1563  case Builtin::BI__builtin_alloca_with_align:
1564  if (SemaBuiltinAllocaWithAlign(TheCall))
1565  return ExprError();
1566  LLVM_FALLTHROUGH;
1567  case Builtin::BI__builtin_alloca:
1568  Diag(TheCall->getBeginLoc(), diag::warn_alloca)
1569  << TheCall->getDirectCallee();
1570  break;
1571  case Builtin::BI__arithmetic_fence:
1572  if (SemaBuiltinArithmeticFence(TheCall))
1573  return ExprError();
1574  break;
1575  case Builtin::BI__assume:
1576  case Builtin::BI__builtin_assume:
1577  if (SemaBuiltinAssume(TheCall))
1578  return ExprError();
1579  break;
1580  case Builtin::BI__builtin_assume_aligned:
1581  if (SemaBuiltinAssumeAligned(TheCall))
1582  return ExprError();
1583  break;
1584  case Builtin::BI__builtin_dynamic_object_size:
1585  case Builtin::BI__builtin_object_size:
1586  if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
1587  return ExprError();
1588  break;
1589  case Builtin::BI__builtin_longjmp:
1590  if (SemaBuiltinLongjmp(TheCall))
1591  return ExprError();
1592  break;
1593  case Builtin::BI__builtin_setjmp:
1594  if (SemaBuiltinSetjmp(TheCall))
1595  return ExprError();
1596  break;
1597  case Builtin::BI__builtin_classify_type:
1598  if (checkArgCount(*this, TheCall, 1)) return true;
1599  TheCall->setType(Context.IntTy);
1600  break;
1601  case Builtin::BI__builtin_complex:
1602  if (SemaBuiltinComplex(TheCall))
1603  return ExprError();
1604  break;
1605  case Builtin::BI__builtin_constant_p: {
1606  if (checkArgCount(*this, TheCall, 1)) return true;
1607  ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
1608  if (Arg.isInvalid()) return true;
1609  TheCall->setArg(0, Arg.get());
1610  TheCall->setType(Context.IntTy);
1611  break;
1612  }
1613  case Builtin::BI__builtin_launder:
1614  return SemaBuiltinLaunder(*this, TheCall);
1615  case Builtin::BI__sync_fetch_and_add:
1616  case Builtin::BI__sync_fetch_and_add_1:
1617  case Builtin::BI__sync_fetch_and_add_2:
1618  case Builtin::BI__sync_fetch_and_add_4:
1619  case Builtin::BI__sync_fetch_and_add_8:
1620  case Builtin::BI__sync_fetch_and_add_16:
1621  case Builtin::BI__sync_fetch_and_sub:
1622  case Builtin::BI__sync_fetch_and_sub_1:
1623  case Builtin::BI__sync_fetch_and_sub_2:
1624  case Builtin::BI__sync_fetch_and_sub_4:
1625  case Builtin::BI__sync_fetch_and_sub_8:
1626  case Builtin::BI__sync_fetch_and_sub_16:
1627  case Builtin::BI__sync_fetch_and_or:
1628  case Builtin::BI__sync_fetch_and_or_1:
1629  case Builtin::BI__sync_fetch_and_or_2:
1630  case Builtin::BI__sync_fetch_and_or_4:
1631  case Builtin::BI__sync_fetch_and_or_8:
1632  case Builtin::BI__sync_fetch_and_or_16:
1633  case Builtin::BI__sync_fetch_and_and:
1634  case Builtin::BI__sync_fetch_and_and_1:
1635  case Builtin::BI__sync_fetch_and_and_2:
1636  case Builtin::BI__sync_fetch_and_and_4:
1637  case Builtin::BI__sync_fetch_and_and_8:
1638  case Builtin::BI__sync_fetch_and_and_16:
1639  case Builtin::BI__sync_fetch_and_xor:
1640  case Builtin::BI__sync_fetch_and_xor_1:
1641  case Builtin::BI__sync_fetch_and_xor_2:
1642  case Builtin::BI__sync_fetch_and_xor_4:
1643  case Builtin::BI__sync_fetch_and_xor_8:
1644  case Builtin::BI__sync_fetch_and_xor_16:
1645  case Builtin::BI__sync_fetch_and_nand:
1646  case Builtin::BI__sync_fetch_and_nand_1:
1647  case Builtin::BI__sync_fetch_and_nand_2:
1648  case Builtin::BI__sync_fetch_and_nand_4:
1649  case Builtin::BI__sync_fetch_and_nand_8:
1650  case Builtin::BI__sync_fetch_and_nand_16:
1651  case Builtin::BI__sync_add_and_fetch:
1652  case Builtin::BI__sync_add_and_fetch_1:
1653  case Builtin::BI__sync_add_and_fetch_2:
1654  case Builtin::BI__sync_add_and_fetch_4:
1655  case Builtin::BI__sync_add_and_fetch_8:
1656  case Builtin::BI__sync_add_and_fetch_16:
1657  case Builtin::BI__sync_sub_and_fetch:
1658  case Builtin::BI__sync_sub_and_fetch_1:
1659  case Builtin::BI__sync_sub_and_fetch_2:
1660  case Builtin::BI__sync_sub_and_fetch_4:
1661  case Builtin::BI__sync_sub_and_fetch_8:
1662  case Builtin::BI__sync_sub_and_fetch_16:
1663  case Builtin::BI__sync_and_and_fetch:
1664  case Builtin::BI__sync_and_and_fetch_1:
1665  case Builtin::BI__sync_and_and_fetch_2:
1666  case Builtin::BI__sync_and_and_fetch_4:
1667  case Builtin::BI__sync_and_and_fetch_8:
1668  case Builtin::BI__sync_and_and_fetch_16:
1669  case Builtin::BI__sync_or_and_fetch:
1670  case Builtin::BI__sync_or_and_fetch_1:
1671  case Builtin::BI__sync_or_and_fetch_2:
1672  case Builtin::BI__sync_or_and_fetch_4:
1673  case Builtin::BI__sync_or_and_fetch_8:
1674  case Builtin::BI__sync_or_and_fetch_16:
1675  case Builtin::BI__sync_xor_and_fetch:
1676  case Builtin::BI__sync_xor_and_fetch_1:
1677  case Builtin::BI__sync_xor_and_fetch_2:
1678  case Builtin::BI__sync_xor_and_fetch_4:
1679  case Builtin::BI__sync_xor_and_fetch_8:
1680  case Builtin::BI__sync_xor_and_fetch_16:
1681  case Builtin::BI__sync_nand_and_fetch:
1682  case Builtin::BI__sync_nand_and_fetch_1:
1683  case Builtin::BI__sync_nand_and_fetch_2:
1684  case Builtin::BI__sync_nand_and_fetch_4:
1685  case Builtin::BI__sync_nand_and_fetch_8:
1686  case Builtin::BI__sync_nand_and_fetch_16:
1687  case Builtin::BI__sync_val_compare_and_swap:
1688  case Builtin::BI__sync_val_compare_and_swap_1:
1689  case Builtin::BI__sync_val_compare_and_swap_2:
1690  case Builtin::BI__sync_val_compare_and_swap_4:
1691  case Builtin::BI__sync_val_compare_and_swap_8:
1692  case Builtin::BI__sync_val_compare_and_swap_16:
1693  case Builtin::BI__sync_bool_compare_and_swap:
1694  case Builtin::BI__sync_bool_compare_and_swap_1:
1695  case Builtin::BI__sync_bool_compare_and_swap_2:
1696  case Builtin::BI__sync_bool_compare_and_swap_4:
1697  case Builtin::BI__sync_bool_compare_and_swap_8:
1698  case Builtin::BI__sync_bool_compare_and_swap_16:
1699  case Builtin::BI__sync_lock_test_and_set:
1700  case Builtin::BI__sync_lock_test_and_set_1:
1701  case Builtin::BI__sync_lock_test_and_set_2:
1702  case Builtin::BI__sync_lock_test_and_set_4:
1703  case Builtin::BI__sync_lock_test_and_set_8:
1704  case Builtin::BI__sync_lock_test_and_set_16:
1705  case Builtin::BI__sync_lock_release:
1706  case Builtin::BI__sync_lock_release_1:
1707  case Builtin::BI__sync_lock_release_2:
1708  case Builtin::BI__sync_lock_release_4:
1709  case Builtin::BI__sync_lock_release_8:
1710  case Builtin::BI__sync_lock_release_16:
1711  case Builtin::BI__sync_swap:
1712  case Builtin::BI__sync_swap_1:
1713  case Builtin::BI__sync_swap_2:
1714  case Builtin::BI__sync_swap_4:
1715  case Builtin::BI__sync_swap_8:
1716  case Builtin::BI__sync_swap_16:
1717  return SemaBuiltinAtomicOverloaded(TheCallResult);
1718  case Builtin::BI__sync_synchronize:
1719  Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
1720  << TheCall->getCallee()->getSourceRange();
1721  break;
1722  case Builtin::BI__builtin_nontemporal_load:
1723  case Builtin::BI__builtin_nontemporal_store:
1724  return SemaBuiltinNontemporalOverloaded(TheCallResult);
1725  case Builtin::BI__builtin_memcpy_inline: {
1726  clang::Expr *SizeOp = TheCall->getArg(2);
1727  // We warn about copying to or from `nullptr` pointers when `size` is
1728  // greater than 0. When `size` is value dependent we cannot evaluate its
1729  // value so we bail out.
1730  if (SizeOp->isValueDependent())
1731  break;
1732  if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) {
1733  CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
1734  CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
1735  }
1736  break;
1737  }
1738 #define BUILTIN(ID, TYPE, ATTRS)
1739 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
1740  case Builtin::BI##ID: \
1741  return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
1742 #include "clang/Basic/Builtins.def"
1743  case Builtin::BI__annotation:
1744  if (SemaBuiltinMSVCAnnotation(*this, TheCall))
1745  return ExprError();
1746  break;
1747  case Builtin::BI__builtin_annotation:
1748  if (SemaBuiltinAnnotation(*this, TheCall))
1749  return ExprError();
1750  break;
1751  case Builtin::BI__builtin_addressof:
1752  if (SemaBuiltinAddressof(*this, TheCall))
1753  return ExprError();
1754  break;
1755  case Builtin::BI__builtin_is_aligned:
1756  case Builtin::BI__builtin_align_up:
1757  case Builtin::BI__builtin_align_down:
1758  if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
1759  return ExprError();
1760  break;
1761  case Builtin::BI__builtin_add_overflow:
1762  case Builtin::BI__builtin_sub_overflow:
1763  case Builtin::BI__builtin_mul_overflow:
1764  if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
1765  return ExprError();
1766  break;
1767  case Builtin::BI__builtin_operator_new:
1768  case Builtin::BI__builtin_operator_delete: {
1769  bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
1770  ExprResult Res =
1771  SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
1772  if (Res.isInvalid())
1773  CorrectDelayedTyposInExpr(TheCallResult.get());
1774  return Res;
1775  }
1776  case Builtin::BI__builtin_dump_struct: {
1777  // We first want to ensure we are called with 2 arguments
1778  if (checkArgCount(*this, TheCall, 2))
1779  return ExprError();
1780  // Ensure that the first argument is of type 'struct XX *'
1781  const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
1782  const QualType PtrArgType = PtrArg->getType();
1783  if (!PtrArgType->isPointerType() ||
1784  !PtrArgType->getPointeeType()->isRecordType()) {
1785  Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1786  << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
1787  << "structure pointer";
1788  return ExprError();
1789  }
1790 
1791  // Ensure that the second argument is of type 'FunctionType'
1792  const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
1793  const QualType FnPtrArgType = FnPtrArg->getType();
1794  if (!FnPtrArgType->isPointerType()) {
1795  Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1796  << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1797  << FnPtrArgType << "'int (*)(const char *, ...)'";
1798  return ExprError();
1799  }
1800 
1801  const auto *FuncType =
1802  FnPtrArgType->getPointeeType()->getAs<FunctionType>();
1803 
1804  if (!FuncType) {
1805  Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1806  << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1807  << FnPtrArgType << "'int (*)(const char *, ...)'";
1808  return ExprError();
1809  }
1810 
1811  if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
1812  if (!FT->getNumParams()) {
1813  Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1814  << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1815  << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1816  return ExprError();
1817  }
1818  QualType PT = FT->getParamType(0);
1819  if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
1820  !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
1821  !PT->getPointeeType().isConstQualified()) {
1822  Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1823  << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1824  << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1825  return ExprError();
1826  }
1827  }
1828 
1829  TheCall->setType(Context.IntTy);
1830  break;
1831  }
1832  case Builtin::BI__builtin_expect_with_probability: {
1833  // We first want to ensure we are called with 3 arguments
1834  if (checkArgCount(*this, TheCall, 3))
1835  return ExprError();
1836  // then check probability is constant float in range [0.0, 1.0]
1837  const Expr *ProbArg = TheCall->getArg(2);
1839  Expr::EvalResult Eval;
1840  Eval.Diag = &Notes;
1841  if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
1842  !Eval.Val.isFloat()) {
1843  Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
1844  << ProbArg->getSourceRange();
1845  for (const PartialDiagnosticAt &PDiag : Notes)
1846  Diag(PDiag.first, PDiag.second);
1847  return ExprError();
1848  }
1849  llvm::APFloat Probability = Eval.Val.getFloat();
1850  bool LoseInfo = false;
1851  Probability.convert(llvm::APFloat::IEEEdouble(),
1852  llvm::RoundingMode::Dynamic, &LoseInfo);
1853  if (!(Probability >= llvm::APFloat(0.0) &&
1854  Probability <= llvm::APFloat(1.0))) {
1855  Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
1856  << ProbArg->getSourceRange();
1857  return ExprError();
1858  }
1859  break;
1860  }
1861  case Builtin::BI__builtin_preserve_access_index:
1862  if (SemaBuiltinPreserveAI(*this, TheCall))
1863  return ExprError();
1864  break;
1865  case Builtin::BI__builtin_call_with_static_chain:
1866  if (SemaBuiltinCallWithStaticChain(*this, TheCall))
1867  return ExprError();
1868  break;
1869  case Builtin::BI__exception_code:
1870  case Builtin::BI_exception_code:
1871  if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
1872  diag::err_seh___except_block))
1873  return ExprError();
1874  break;
1875  case Builtin::BI__exception_info:
1876  case Builtin::BI_exception_info:
1877  if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
1878  diag::err_seh___except_filter))
1879  return ExprError();
1880  break;
1881  case Builtin::BI__GetExceptionInfo:
1882  if (checkArgCount(*this, TheCall, 1))
1883  return ExprError();
1884 
1885  if (CheckCXXThrowOperand(
1886  TheCall->getBeginLoc(),
1887  Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
1888  TheCall))
1889  return ExprError();
1890 
1891  TheCall->setType(Context.VoidPtrTy);
1892  break;
1893  // OpenCL v2.0, s6.13.16 - Pipe functions
1894  case Builtin::BIread_pipe:
1895  case Builtin::BIwrite_pipe:
1896  // Since those two functions are declared with var args, we need a semantic
1897  // check for the argument.
1898  if (SemaBuiltinRWPipe(*this, TheCall))
1899  return ExprError();
1900  break;
1901  case Builtin::BIreserve_read_pipe:
1902  case Builtin::BIreserve_write_pipe:
1903  case Builtin::BIwork_group_reserve_read_pipe:
1904  case Builtin::BIwork_group_reserve_write_pipe:
1905  if (SemaBuiltinReserveRWPipe(*this, TheCall))
1906  return ExprError();
1907  break;
1908  case Builtin::BIsub_group_reserve_read_pipe:
1909  case Builtin::BIsub_group_reserve_write_pipe:
1910  if (checkOpenCLSubgroupExt(*this, TheCall) ||
1911  SemaBuiltinReserveRWPipe(*this, TheCall))
1912  return ExprError();
1913  break;
1914  case Builtin::BIcommit_read_pipe:
1915  case Builtin::BIcommit_write_pipe:
1916  case Builtin::BIwork_group_commit_read_pipe:
1917  case Builtin::BIwork_group_commit_write_pipe:
1918  if (SemaBuiltinCommitRWPipe(*this, TheCall))
1919  return ExprError();
1920  break;
1921  case Builtin::BIsub_group_commit_read_pipe:
1922  case Builtin::BIsub_group_commit_write_pipe:
1923  if (checkOpenCLSubgroupExt(*this, TheCall) ||
1924  SemaBuiltinCommitRWPipe(*this, TheCall))
1925  return ExprError();
1926  break;
1927  case Builtin::BIget_pipe_num_packets:
1928  case Builtin::BIget_pipe_max_packets:
1929  if (SemaBuiltinPipePackets(*this, TheCall))
1930  return ExprError();
1931  break;
1932  case Builtin::BIto_global:
1933  case Builtin::BIto_local:
1934  case Builtin::BIto_private:
1935  if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
1936  return ExprError();
1937  break;
1938  // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
1939  case Builtin::BIenqueue_kernel:
1940  if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
1941  return ExprError();
1942  break;
1943  case Builtin::BIget_kernel_work_group_size:
1944  case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1945  if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
1946  return ExprError();
1947  break;
1948  case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1949  case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1950  if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
1951  return ExprError();
1952  break;
1953  case Builtin::BI__builtin_os_log_format:
1954  Cleanup.setExprNeedsCleanups(true);
1955  LLVM_FALLTHROUGH;
1956  case Builtin::BI__builtin_os_log_format_buffer_size:
1957  if (SemaBuiltinOSLogFormat(TheCall))
1958  return ExprError();
1959  break;
1960  case Builtin::BI__builtin_frame_address:
1961  case Builtin::BI__builtin_return_address: {
1962  if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
1963  return ExprError();
1964 
1965  // -Wframe-address warning if non-zero passed to builtin
1966  // return/frame address.
1967  Expr::EvalResult Result;
1968  if (!TheCall->getArg(0)->isValueDependent() &&
1969  TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
1970  Result.Val.getInt() != 0)
1971  Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
1972  << ((BuiltinID == Builtin::BI__builtin_return_address)
1973  ? "__builtin_return_address"
1974  : "__builtin_frame_address")
1975  << TheCall->getSourceRange();
1976  break;
1977  }
1978 
1979  case Builtin::BI__builtin_matrix_transpose:
1980  return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
1981 
1982  case Builtin::BI__builtin_matrix_column_major_load:
1983  return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
1984 
1985  case Builtin::BI__builtin_matrix_column_major_store:
1986  return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
1987 
1988  case Builtin::BI__builtin_get_device_side_mangled_name: {
1989  auto Check = [](CallExpr *TheCall) {
1990  if (TheCall->getNumArgs() != 1)
1991  return false;
1992  auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
1993  if (!DRE)
1994  return false;
1995  auto *D = DRE->getDecl();
1996  if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
1997  return false;
1998  return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
1999  D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
2000  };
2001  if (!Check(TheCall)) {
2002  Diag(TheCall->getBeginLoc(),
2003  diag::err_hip_invalid_args_builtin_mangled_name);
2004  return ExprError();
2005  }
2006  }
2007  }
2008 
2009  // Since the target specific builtins for each arch overlap, only check those
2010  // of the arch we are compiling for.
2011  if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
2012  if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
2013  assert(Context.getAuxTargetInfo() &&
2014  "Aux Target Builtin, but not an aux target?");
2015 
2016  if (CheckTSBuiltinFunctionCall(
2017  *Context.getAuxTargetInfo(),
2018  Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
2019  return ExprError();
2020  } else {
2021  if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
2022  TheCall))
2023  return ExprError();
2024  }
2025  }
2026 
2027  return TheCallResult;
2028 }
2029 
2030 // Get the valid immediate range for the specified NEON type code.
2031 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
2032  NeonTypeFlags Type(t);
2033  int IsQuad = ForceQuad ? true : Type.isQuad();
2034  switch (Type.getEltType()) {
2035  case NeonTypeFlags::Int8:
2036  case NeonTypeFlags::Poly8:
2037  return shift ? 7 : (8 << IsQuad) - 1;
2038  case NeonTypeFlags::Int16:
2039  case NeonTypeFlags::Poly16:
2040  return shift ? 15 : (4 << IsQuad) - 1;
2041  case NeonTypeFlags::Int32:
2042  return shift ? 31 : (2 << IsQuad) - 1;
2043  case NeonTypeFlags::Int64:
2044  case NeonTypeFlags::Poly64:
2045  return shift ? 63 : (1 << IsQuad) - 1;
2047  return shift ? 127 : (1 << IsQuad) - 1;
2049  assert(!shift && "cannot shift float types!");
2050  return (4 << IsQuad) - 1;
2052  assert(!shift && "cannot shift float types!");
2053  return (2 << IsQuad) - 1;
2055  assert(!shift && "cannot shift float types!");
2056  return (1 << IsQuad) - 1;
2058  assert(!shift && "cannot shift float types!");
2059  return (4 << IsQuad) - 1;
2060  }
2061  llvm_unreachable("Invalid NeonTypeFlag!");
2062 }
2063 
2064 /// getNeonEltType - Return the QualType corresponding to the elements of
2065 /// the vector type specified by the NeonTypeFlags. This is used to check
2066 /// the pointer arguments for Neon load/store intrinsics.
2068  bool IsPolyUnsigned, bool IsInt64Long) {
2069  switch (Flags.getEltType()) {
2070  case NeonTypeFlags::Int8:
2071  return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
2072  case NeonTypeFlags::Int16:
2073  return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
2074  case NeonTypeFlags::Int32:
2075  return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
2076  case NeonTypeFlags::Int64:
2077  if (IsInt64Long)
2078  return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
2079  else
2080  return Flags.isUnsigned() ? Context.UnsignedLongLongTy
2081  : Context.LongLongTy;
2082  case NeonTypeFlags::Poly8:
2083  return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
2084  case NeonTypeFlags::Poly16:
2085  return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
2086  case NeonTypeFlags::Poly64:
2087  if (IsInt64Long)
2088  return Context.UnsignedLongTy;
2089  else
2090  return Context.UnsignedLongLongTy;
2092  break;
2094  return Context.HalfTy;
2096  return Context.FloatTy;
2098  return Context.DoubleTy;
2100  return Context.BFloat16Ty;
2101  }
2102  llvm_unreachable("Invalid NeonTypeFlag!");
2103 }
2104 
2105 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2106  // Range check SVE intrinsics that take immediate values.
2108 
2109  switch (BuiltinID) {
2110  default:
2111  return false;
2112 #define GET_SVE_IMMEDIATE_CHECK
2113 #include "clang/Basic/arm_sve_sema_rangechecks.inc"
2114 #undef GET_SVE_IMMEDIATE_CHECK
2115  }
2116 
2117  // Perform all the immediate checks for this builtin call.
2118  bool HasError = false;
2119  for (auto &I : ImmChecks) {
2120  int ArgNum, CheckTy, ElementSizeInBits;
2121  std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
2122 
2123  typedef bool(*OptionSetCheckFnTy)(int64_t Value);
2124 
2125  // Function that checks whether the operand (ArgNum) is an immediate
2126  // that is one of the predefined values.
2127  auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
2128  int ErrDiag) -> bool {
2129  // We can't check the value of a dependent argument.
2130  Expr *Arg = TheCall->getArg(ArgNum);
2131  if (Arg->isTypeDependent() || Arg->isValueDependent())
2132  return false;
2133 
2134  // Check constant-ness first.
2135  llvm::APSInt Imm;
2136  if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
2137  return true;
2138 
2139  if (!CheckImm(Imm.getSExtValue()))
2140  return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
2141  return false;
2142  };
2143 
2144  switch ((SVETypeFlags::ImmCheckType)CheckTy) {
2145  case SVETypeFlags::ImmCheck0_31:
2146  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
2147  HasError = true;
2148  break;
2149  case SVETypeFlags::ImmCheck0_13:
2150  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
2151  HasError = true;
2152  break;
2153  case SVETypeFlags::ImmCheck1_16:
2154  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
2155  HasError = true;
2156  break;
2157  case SVETypeFlags::ImmCheck0_7:
2158  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
2159  HasError = true;
2160  break;
2161  case SVETypeFlags::ImmCheckExtract:
2162  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2163  (2048 / ElementSizeInBits) - 1))
2164  HasError = true;
2165  break;
2166  case SVETypeFlags::ImmCheckShiftRight:
2167  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
2168  HasError = true;
2169  break;
2170  case SVETypeFlags::ImmCheckShiftRightNarrow:
2171  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
2172  ElementSizeInBits / 2))
2173  HasError = true;
2174  break;
2175  case SVETypeFlags::ImmCheckShiftLeft:
2176  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2177  ElementSizeInBits - 1))
2178  HasError = true;
2179  break;
2180  case SVETypeFlags::ImmCheckLaneIndex:
2181  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2182  (128 / (1 * ElementSizeInBits)) - 1))
2183  HasError = true;
2184  break;
2185  case SVETypeFlags::ImmCheckLaneIndexCompRotate:
2186  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2187  (128 / (2 * ElementSizeInBits)) - 1))
2188  HasError = true;
2189  break;
2190  case SVETypeFlags::ImmCheckLaneIndexDot:
2191  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2192  (128 / (4 * ElementSizeInBits)) - 1))
2193  HasError = true;
2194  break;
2195  case SVETypeFlags::ImmCheckComplexRot90_270:
2196  if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
2197  diag::err_rotation_argument_to_cadd))
2198  HasError = true;
2199  break;
2200  case SVETypeFlags::ImmCheckComplexRotAll90:
2201  if (CheckImmediateInSet(
2202  [](int64_t V) {
2203  return V == 0 || V == 90 || V == 180 || V == 270;
2204  },
2205  diag::err_rotation_argument_to_cmla))
2206  HasError = true;
2207  break;
2208  case SVETypeFlags::ImmCheck0_1:
2209  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
2210  HasError = true;
2211  break;
2212  case SVETypeFlags::ImmCheck0_2:
2213  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
2214  HasError = true;
2215  break;
2216  case SVETypeFlags::ImmCheck0_3:
2217  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
2218  HasError = true;
2219  break;
2220  }
2221  }
2222 
2223  return HasError;
2224 }
2225 
2226 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
2227  unsigned BuiltinID, CallExpr *TheCall) {
2228  llvm::APSInt Result;
2229  uint64_t mask = 0;
2230  unsigned TV = 0;
2231  int PtrArgNum = -1;
2232  bool HasConstPtr = false;
2233  switch (BuiltinID) {
2234 #define GET_NEON_OVERLOAD_CHECK
2235 #include "clang/Basic/arm_neon.inc"
2236 #include "clang/Basic/arm_fp16.inc"
2237 #undef GET_NEON_OVERLOAD_CHECK
2238  }
2239 
2240  // For NEON intrinsics which are overloaded on vector element type, validate
2241  // the immediate which specifies which variant to emit.
2242  unsigned ImmArg = TheCall->getNumArgs()-1;
2243  if (mask) {
2244  if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
2245  return true;
2246 
2247  TV = Result.getLimitedValue(64);
2248  if ((TV > 63) || (mask & (1ULL << TV)) == 0)
2249  return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
2250  << TheCall->getArg(ImmArg)->getSourceRange();
2251  }
2252 
2253  if (PtrArgNum >= 0) {
2254  // Check that pointer arguments have the specified type.
2255  Expr *Arg = TheCall->getArg(PtrArgNum);
2256  if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
2257  Arg = ICE->getSubExpr();
2258  ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
2259  QualType RHSTy = RHS.get()->getType();
2260 
2261  llvm::Triple::ArchType Arch = TI.getTriple().getArch();
2262  bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
2263  Arch == llvm::Triple::aarch64_32 ||
2264  Arch == llvm::Triple::aarch64_be;
2265  bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
2266  QualType EltTy =
2267  getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
2268  if (HasConstPtr)
2269  EltTy = EltTy.withConst();
2270  QualType LHSTy = Context.getPointerType(EltTy);
2271  AssignConvertType ConvTy;
2272  ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
2273  if (RHS.isInvalid())
2274  return true;
2275  if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
2276  RHS.get(), AA_Assigning))
2277  return true;
2278  }
2279 
2280  // For NEON intrinsics which take an immediate value as part of the
2281  // instruction, range check them here.
2282  unsigned i = 0, l = 0, u = 0;
2283  switch (BuiltinID) {
2284  default:
2285  return false;
2286  #define GET_NEON_IMMEDIATE_CHECK
2287  #include "clang/Basic/arm_neon.inc"
2288  #include "clang/Basic/arm_fp16.inc"
2289  #undef GET_NEON_IMMEDIATE_CHECK
2290  }
2291 
2292  return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2293 }
2294 
2295 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2296  switch (BuiltinID) {
2297  default:
2298  return false;
2299  #include "clang/Basic/arm_mve_builtin_sema.inc"
2300  }
2301 }
2302 
2303 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2304  CallExpr *TheCall) {
2305  bool Err = false;
2306  switch (BuiltinID) {
2307  default:
2308  return false;
2309 #include "clang/Basic/arm_cde_builtin_sema.inc"
2310  }
2311 
2312  if (Err)
2313  return true;
2314 
2315  return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
2316 }
2317 
2318 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
2319  const Expr *CoprocArg, bool WantCDE) {
2320  if (isConstantEvaluated())
2321  return false;
2322 
2323  // We can't check the value of a dependent argument.
2324  if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
2325  return false;
2326 
2327  llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
2328  int64_t CoprocNo = CoprocNoAP.getExtValue();
2329  assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
2330 
2331  uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
2332  bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
2333 
2334  if (IsCDECoproc != WantCDE)
2335  return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
2336  << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
2337 
2338  return false;
2339 }
2340 
2341 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
2342  unsigned MaxWidth) {
2343  assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
2344  BuiltinID == ARM::BI__builtin_arm_ldaex ||
2345  BuiltinID == ARM::BI__builtin_arm_strex ||
2346  BuiltinID == ARM::BI__builtin_arm_stlex ||
2347  BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2348  BuiltinID == AArch64::BI__builtin_arm_ldaex ||
2349  BuiltinID == AArch64::BI__builtin_arm_strex ||
2350  BuiltinID == AArch64::BI__builtin_arm_stlex) &&
2351  "unexpected ARM builtin");
2352  bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
2353  BuiltinID == ARM::BI__builtin_arm_ldaex ||
2354  BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2355  BuiltinID == AArch64::BI__builtin_arm_ldaex;
2356 
2357  DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
2358 
2359  // Ensure that we have the proper number of arguments.
2360  if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
2361  return true;
2362 
2363  // Inspect the pointer argument of the atomic builtin. This should always be
2364  // a pointer type, whose element is an integral scalar or pointer type.
2365  // Because it is a pointer type, we don't have to worry about any implicit
2366  // casts here.
2367  Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
2368  ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
2369  if (PointerArgRes.isInvalid())
2370  return true;
2371  PointerArg = PointerArgRes.get();
2372 
2373  const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
2374  if (!pointerType) {
2375  Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
2376  << PointerArg->getType() << PointerArg->getSourceRange();
2377  return true;
2378  }
2379 
2380  // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
2381  // task is to insert the appropriate casts into the AST. First work out just
2382  // what the appropriate type is.
2383  QualType ValType = pointerType->getPointeeType();
2384  QualType AddrType = ValType.getUnqualifiedType().withVolatile();
2385  if (IsLdrex)
2386  AddrType.addConst();
2387 
2388  // Issue a warning if the cast is dodgy.
2389  CastKind CastNeeded = CK_NoOp;
2390  if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
2391  CastNeeded = CK_BitCast;
2392  Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
2393  << PointerArg->getType() << Context.getPointerType(AddrType)
2394  << AA_Passing << PointerArg->getSourceRange();
2395  }
2396 
2397  // Finally, do the cast and replace the argument with the corrected version.
2398  AddrType = Context.getPointerType(AddrType);
2399  PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
2400  if (PointerArgRes.isInvalid())
2401  return true;
2402  PointerArg = PointerArgRes.get();
2403 
2404  TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
2405 
2406  // In general, we allow ints, floats and pointers to be loaded and stored.
2407  if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
2408  !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
2409  Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
2410  << PointerArg->getType() << PointerArg->getSourceRange();
2411  return true;
2412  }
2413 
2414  // But ARM doesn't have instructions to deal with 128-bit versions.
2415  if (Context.getTypeSize(ValType) > MaxWidth) {
2416  assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
2417  Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
2418  << PointerArg->getType() << PointerArg->getSourceRange();
2419  return true;
2420  }
2421 
2422  switch (ValType.getObjCLifetime()) {
2423  case Qualifiers::OCL_None:
2425  // okay
2426  break;
2427 
2428  case Qualifiers::OCL_Weak:
2431  Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
2432  << ValType << PointerArg->getSourceRange();
2433  return true;
2434  }
2435 
2436  if (IsLdrex) {
2437  TheCall->setType(ValType);
2438  return false;
2439  }
2440 
2441  // Initialize the argument to be stored.
2442  ExprResult ValArg = TheCall->getArg(0);
2444  Context, ValType, /*consume*/ false);
2445  ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
2446  if (ValArg.isInvalid())
2447  return true;
2448  TheCall->setArg(0, ValArg.get());
2449 
2450  // __builtin_arm_strex always returns an int. It's marked as such in the .def,
2451  // but the custom checker bypasses all default analysis.
2452  TheCall->setType(Context.IntTy);
2453  return false;
2454 }
2455 
2456 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2457  CallExpr *TheCall) {
2458  if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
2459  BuiltinID == ARM::BI__builtin_arm_ldaex ||
2460  BuiltinID == ARM::BI__builtin_arm_strex ||
2461  BuiltinID == ARM::BI__builtin_arm_stlex) {
2462  return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
2463  }
2464 
2465  if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
2466  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2467  SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
2468  }
2469 
2470  if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
2471  BuiltinID == ARM::BI__builtin_arm_wsr64)
2472  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
2473 
2474  if (BuiltinID == ARM::BI__builtin_arm_rsr ||
2475  BuiltinID == ARM::BI__builtin_arm_rsrp ||
2476  BuiltinID == ARM::BI__builtin_arm_wsr ||
2477  BuiltinID == ARM::BI__builtin_arm_wsrp)
2478  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2479 
2480  if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
2481  return true;
2482  if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
2483  return true;
2484  if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
2485  return true;
2486 
2487  // For intrinsics which take an immediate value as part of the instruction,
2488  // range check them here.
2489  // FIXME: VFP Intrinsics should error if VFP not present.
2490  switch (BuiltinID) {
2491  default: return false;
2492  case ARM::BI__builtin_arm_ssat:
2493  return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
2494  case ARM::BI__builtin_arm_usat:
2495  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
2496  case ARM::BI__builtin_arm_ssat16:
2497  return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
2498  case ARM::BI__builtin_arm_usat16:
2499  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
2500  case ARM::BI__builtin_arm_vcvtr_f:
2501  case ARM::BI__builtin_arm_vcvtr_d:
2502  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
2503  case ARM::BI__builtin_arm_dmb:
2504  case ARM::BI__builtin_arm_dsb:
2505  case ARM::BI__builtin_arm_isb:
2506  case ARM::BI__builtin_arm_dbg:
2507  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
2508  case ARM::BI__builtin_arm_cdp:
2509  case ARM::BI__builtin_arm_cdp2:
2510  case ARM::BI__builtin_arm_mcr:
2511  case ARM::BI__builtin_arm_mcr2:
2512  case ARM::BI__builtin_arm_mrc:
2513  case ARM::BI__builtin_arm_mrc2:
2514  case ARM::BI__builtin_arm_mcrr:
2515  case ARM::BI__builtin_arm_mcrr2:
2516  case ARM::BI__builtin_arm_mrrc:
2517  case ARM::BI__builtin_arm_mrrc2:
2518  case ARM::BI__builtin_arm_ldc:
2519  case ARM::BI__builtin_arm_ldcl:
2520  case ARM::BI__builtin_arm_ldc2:
2521  case ARM::BI__builtin_arm_ldc2l:
2522  case ARM::BI__builtin_arm_stc:
2523  case ARM::BI__builtin_arm_stcl:
2524  case ARM::BI__builtin_arm_stc2:
2525  case ARM::BI__builtin_arm_stc2l:
2526  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
2527  CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
2528  /*WantCDE*/ false);
2529  }
2530 }
2531 
2532 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
2533  unsigned BuiltinID,
2534  CallExpr *TheCall) {
2535  if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2536  BuiltinID == AArch64::BI__builtin_arm_ldaex ||
2537  BuiltinID == AArch64::BI__builtin_arm_strex ||
2538  BuiltinID == AArch64::BI__builtin_arm_stlex) {
2539  return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
2540  }
2541 
2542  if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
2543  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2544  SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
2545  SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
2546  SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
2547  }
2548 
2549  if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
2550  BuiltinID == AArch64::BI__builtin_arm_wsr64)
2551  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2552 
2553  // Memory Tagging Extensions (MTE) Intrinsics
2554  if (BuiltinID == AArch64::BI__builtin_arm_irg ||
2555  BuiltinID == AArch64::BI__builtin_arm_addg ||
2556  BuiltinID == AArch64::BI__builtin_arm_gmi ||
2557  BuiltinID == AArch64::BI__builtin_arm_ldg ||
2558  BuiltinID == AArch64::BI__builtin_arm_stg ||
2559  BuiltinID == AArch64::BI__builtin_arm_subp) {
2560  return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
2561  }
2562 
2563  if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
2564  BuiltinID == AArch64::BI__builtin_arm_rsrp ||
2565  BuiltinID == AArch64::BI__builtin_arm_wsr ||
2566  BuiltinID == AArch64::BI__builtin_arm_wsrp)
2567  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2568 
2569  // Only check the valid encoding range. Any constant in this range would be
2570  // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
2571  // an exception for incorrect registers. This matches MSVC behavior.
2572  if (BuiltinID == AArch64::BI_ReadStatusReg ||
2573  BuiltinID == AArch64::BI_WriteStatusReg)
2574  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
2575 
2576  if (BuiltinID == AArch64::BI__getReg)
2577  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
2578 
2579  if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
2580  return true;
2581 
2582  if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
2583  return true;
2584 
2585  // For intrinsics which take an immediate value as part of the instruction,
2586  // range check them here.
2587  unsigned i = 0, l = 0, u = 0;
2588  switch (BuiltinID) {
2589  default: return false;
2590  case AArch64::BI__builtin_arm_dmb:
2591  case AArch64::BI__builtin_arm_dsb:
2592  case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
2593  case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
2594  }
2595 
2596  return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2597 }
2598 
2600  if (Arg->getType()->getAsPlaceholderType())
2601  return false;
2602 
2603  // The first argument needs to be a record field access.
2604  // If it is an array element access, we delay decision
2605  // to BPF backend to check whether the access is a
2606  // field access or not.
2607  return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
2608  dyn_cast<MemberExpr>(Arg->IgnoreParens()) ||
2609  dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()));
2610 }
2611 
2612 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S,
2613  QualType VectorTy, QualType EltTy) {
2614  QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType();
2615  if (!Context.hasSameType(VectorEltTy, EltTy)) {
2616  S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types)
2617  << Call->getSourceRange() << VectorEltTy << EltTy;
2618  return false;
2619  }
2620  return true;
2621 }
2622 
2624  QualType ArgType = Arg->getType();
2625  if (ArgType->getAsPlaceholderType())
2626  return false;
2627 
2628  // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type
2629  // format:
2630  // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
2631  // 2. <type> var;
2632  // __builtin_preserve_type_info(var, flag);
2633  if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) &&
2634  !dyn_cast<UnaryOperator>(Arg->IgnoreParens()))
2635  return false;
2636 
2637  // Typedef type.
2638  if (ArgType->getAs<TypedefType>())
2639  return true;
2640 
2641  // Record type or Enum type.
2642  const Type *Ty = ArgType->getUnqualifiedDesugaredType();
2643  if (const auto *RT = Ty->getAs<RecordType>()) {
2644  if (!RT->getDecl()->getDeclName().isEmpty())
2645  return true;
2646  } else if (const auto *ET = Ty->getAs<EnumType>()) {
2647  if (!ET->getDecl()->getDeclName().isEmpty())
2648  return true;
2649  }
2650 
2651  return false;
2652 }
2653 
2655  QualType ArgType = Arg->getType();
2656  if (ArgType->getAsPlaceholderType())
2657  return false;
2658 
2659  // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
2660  // format:
2661  // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
2662  // flag);
2663  const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
2664  if (!UO)
2665  return false;
2666 
2667  const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
2668  if (!CE)
2669  return false;
2670  if (CE->getCastKind() != CK_IntegralToPointer &&
2671  CE->getCastKind() != CK_NullToPointer)
2672  return false;
2673 
2674  // The integer must be from an EnumConstantDecl.
2675  const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
2676  if (!DR)
2677  return false;
2678 
2679  const EnumConstantDecl *Enumerator =
2680  dyn_cast<EnumConstantDecl>(DR->getDecl());
2681  if (!Enumerator)
2682  return false;
2683 
2684  // The type must be EnumType.
2685  const Type *Ty = ArgType->getUnqualifiedDesugaredType();
2686  const auto *ET = Ty->getAs<EnumType>();
2687  if (!ET)
2688  return false;
2689 
2690  // The enum value must be supported.
2691  return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
2692 }
2693 
2694 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
2695  CallExpr *TheCall) {
2696  assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
2697  BuiltinID == BPF::BI__builtin_btf_type_id ||
2698  BuiltinID == BPF::BI__builtin_preserve_type_info ||
2699  BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
2700  "unexpected BPF builtin");
2701 
2702  if (checkArgCount(*this, TheCall, 2))
2703  return true;
2704 
2705  // The second argument needs to be a constant int
2706  Expr *Arg = TheCall->getArg(1);
2708  diag::kind kind;
2709  if (!Value) {
2710  if (BuiltinID == BPF::BI__builtin_preserve_field_info)
2711  kind = diag::err_preserve_field_info_not_const;
2712  else if (BuiltinID == BPF::BI__builtin_btf_type_id)
2713  kind = diag::err_btf_type_id_not_const;
2714  else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
2715  kind = diag::err_preserve_type_info_not_const;
2716  else
2717  kind = diag::err_preserve_enum_value_not_const;
2718  Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
2719  return true;
2720  }
2721 
2722  // The first argument
2723  Arg = TheCall->getArg(0);
2724  bool InvalidArg = false;
2725  bool ReturnUnsignedInt = true;
2726  if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
2727  if (!isValidBPFPreserveFieldInfoArg(Arg)) {
2728  InvalidArg = true;
2729  kind = diag::err_preserve_field_info_not_field;
2730  }
2731  } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
2732  if (!isValidBPFPreserveTypeInfoArg(Arg)) {
2733  InvalidArg = true;
2734  kind = diag::err_preserve_type_info_invalid;
2735  }
2736  } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
2737  if (!isValidBPFPreserveEnumValueArg(Arg)) {
2738  InvalidArg = true;
2739  kind = diag::err_preserve_enum_value_invalid;
2740  }
2741  ReturnUnsignedInt = false;
2742  } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
2743  ReturnUnsignedInt = false;
2744  }
2745 
2746  if (InvalidArg) {
2747  Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
2748  return true;
2749  }
2750 
2751  if (ReturnUnsignedInt)
2752  TheCall->setType(Context.UnsignedIntTy);
2753  else
2754  TheCall->setType(Context.UnsignedLongTy);
2755  return false;
2756 }
2757 
2758 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
2759  struct ArgInfo {
2760  uint8_t OpNum;
2761  bool IsSigned;
2762  uint8_t BitWidth;
2763  uint8_t Align;
2764  };
2765  struct BuiltinInfo {
2766  unsigned BuiltinID;
2767  ArgInfo Infos[2];
2768  };
2769 
2770  static BuiltinInfo Infos[] = {
2771  { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
2772  { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
2773  { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
2774  { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
2775  { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
2776  { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
2777  { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
2778  { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
2779  { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
2780  { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
2781  { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
2782 
2783  { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
2784  { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
2785  { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
2786  { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
2787  { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
2788  { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
2789  { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
2790  { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
2791  { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
2792  { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
2793  { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
2794 
2795  { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
2796  { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
2797  { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
2798  { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
2799  { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
2800  { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
2801  { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
2802  { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
2803  { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
2804  { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
2805  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
2806  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
2807  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
2808  { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
2809  { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
2810  { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
2811  { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
2812  { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
2813  { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
2814  { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
2815  { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
2816  { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
2817  { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
2818  { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
2819  { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
2820  { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
2821  { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
2822  { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
2823  { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
2824  { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
2825  { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
2826  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
2827  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
2828  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
2829  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
2830  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
2831  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
2832  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
2833  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
2834  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
2835  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
2836  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
2837  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
2838  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
2839  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
2840  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
2841  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
2842  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
2843  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
2844  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
2845  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
2846  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
2847  {{ 1, false, 6, 0 }} },
2848  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
2849  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
2850  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
2851  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
2852  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
2853  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
2854  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
2855  {{ 1, false, 5, 0 }} },
2856  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
2857  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
2858  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
2859  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
2860  { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
2861  { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
2862  { 2, false, 5, 0 }} },
2863  { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
2864  { 2, false, 6, 0 }} },
2865  { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
2866  { 3, false, 5, 0 }} },
2867  { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
2868  { 3, false, 6, 0 }} },
2869  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
2870  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
2871  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
2872  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
2873  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
2874  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
2875  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
2876  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
2877  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
2878  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
2879  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
2880  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
2881  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
2882  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
2883  { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
2884  { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
2885  {{ 2, false, 4, 0 },
2886  { 3, false, 5, 0 }} },
2887  { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
2888  {{ 2, false, 4, 0 },
2889  { 3, false, 5, 0 }} },
2890  { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
2891  {{ 2, false, 4, 0 },
2892  { 3, false, 5, 0 }} },
2893  { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
2894  {{ 2, false, 4, 0 },
2895  { 3, false, 5, 0 }} },
2896  { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
2897  { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
2898  { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
2899  { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
2900  { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
2901  { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
2902  { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
2903  { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
2904  { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
2905  { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
2906  { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
2907  { 2, false, 5, 0 }} },
2908  { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
2909  { 2, false, 6, 0 }} },
2910  { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
2911  { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
2912  { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
2913  { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
2914  { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
2915  { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
2916  { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
2917  { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
2918  { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
2919  {{ 1, false, 4, 0 }} },
2920  { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
2921  { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
2922  {{ 1, false, 4, 0 }} },
2923  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
2924  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
2925  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
2926  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
2927  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
2928  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
2929  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
2930  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
2931  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
2932  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
2933  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
2934  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
2935  { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
2936  { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
2937  { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
2938  { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
2939  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
2940  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
2941  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
2942  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
2943  {{ 3, false, 1, 0 }} },
2944  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
2945  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
2946  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
2947  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
2948  {{ 3, false, 1, 0 }} },
2949  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
2950  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
2951  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
2952  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
2953  {{ 3, false, 1, 0 }} },
2954  };
2955 
2956  // Use a dynamically initialized static to sort the table exactly once on
2957  // first run.
2958  static const bool SortOnce =
2959  (llvm::sort(Infos,
2960  [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
2961  return LHS.BuiltinID < RHS.BuiltinID;
2962  }),
2963  true);
2964  (void)SortOnce;
2965 
2966  const BuiltinInfo *F = llvm::partition_point(
2967  Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
2968  if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
2969  return false;
2970 
2971  bool Error = false;
2972 
2973  for (const ArgInfo &A : F->Infos) {
2974  // Ignore empty ArgInfo elements.
2975  if (A.BitWidth == 0)
2976  continue;
2977 
2978  int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
2979  int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
2980  if (!A.Align) {
2981  Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
2982  } else {
2983  unsigned M = 1 << A.Align;
2984  Min *= M;
2985  Max *= M;
2986  Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
2987  Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
2988  }
2989  }
2990  return Error;
2991 }
2992 
2993 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
2994  CallExpr *TheCall) {
2995  return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
2996 }
2997 
2998 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
2999  unsigned BuiltinID, CallExpr *TheCall) {
3000  return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
3001  CheckMipsBuiltinArgument(BuiltinID, TheCall);
3002 }
3003 
3004 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
3005  CallExpr *TheCall) {
3006 
3007  if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
3008  BuiltinID <= Mips::BI__builtin_mips_lwx) {
3009  if (!TI.hasFeature("dsp"))
3010  return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
3011  }
3012 
3013  if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
3014  BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
3015  if (!TI.hasFeature("dspr2"))
3016  return Diag(TheCall->getBeginLoc(),
3017  diag::err_mips_builtin_requires_dspr2);
3018  }
3019 
3020  if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
3021  BuiltinID <= Mips::BI__builtin_msa_xori_b) {
3022  if (!TI.hasFeature("msa"))
3023  return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
3024  }
3025 
3026  return false;
3027 }
3028 
3029 // CheckMipsBuiltinArgument - Checks the constant value passed to the
3030 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The
3031 // ordering for DSP is unspecified. MSA is ordered by the data format used
3032 // by the underlying instruction i.e., df/m, df/n and then by size.
3033 //
3034 // FIXME: The size tests here should instead be tablegen'd along with the
3035 // definitions from include/clang/Basic/BuiltinsMips.def.
3036 // FIXME: GCC is strict on signedness for some of these intrinsics, we should
3037 // be too.
3038 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
3039  unsigned i = 0, l = 0, u = 0, m = 0;
3040  switch (BuiltinID) {
3041  default: return false;
3042  case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
3043  case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
3044  case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
3045  case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
3046  case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
3047  case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
3048  case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
3049  // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
3050  // df/m field.
3051  // These intrinsics take an unsigned 3 bit immediate.
3052  case Mips::BI__builtin_msa_bclri_b:
3053  case Mips::BI__builtin_msa_bnegi_b:
3054  case Mips::BI__builtin_msa_bseti_b:
3055  case Mips::BI__builtin_msa_sat_s_b:
3056  case Mips::BI__builtin_msa_sat_u_b:
3057  case Mips::BI__builtin_msa_slli_b:
3058  case Mips::BI__builtin_msa_srai_b:
3059  case Mips::BI__builtin_msa_srari_b:
3060  case Mips::BI__builtin_msa_srli_b:
3061  case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
3062  case Mips::BI__builtin_msa_binsli_b:
3063  case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
3064  // These intrinsics take an unsigned 4 bit immediate.
3065  case Mips::BI__builtin_msa_bclri_h:
3066  case Mips::BI__builtin_msa_bnegi_h:
3067  case Mips::BI__builtin_msa_bseti_h:
3068  case Mips::BI__builtin_msa_sat_s_h:
3069  case Mips::BI__builtin_msa_sat_u_h:
3070  case Mips::BI__builtin_msa_slli_h:
3071  case Mips::BI__builtin_msa_srai_h:
3072  case Mips::BI__builtin_msa_srari_h:
3073  case Mips::BI__builtin_msa_srli_h:
3074  case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
3075  case Mips::BI__builtin_msa_binsli_h:
3076  case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
3077  // These intrinsics take an unsigned 5 bit immediate.
3078  // The first block of intrinsics actually have an unsigned 5 bit field,
3079  // not a df/n field.
3080  case Mips::BI__builtin_msa_cfcmsa:
3081  case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
3082  case Mips::BI__builtin_msa_clei_u_b:
3083  case Mips::BI__builtin_msa_clei_u_h:
3084  case Mips::BI__builtin_msa_clei_u_w:
3085  case Mips::BI__builtin_msa_clei_u_d:
3086  case Mips::BI__builtin_msa_clti_u_b:
3087  case Mips::BI__builtin_msa_clti_u_h:
3088  case Mips::BI__builtin_msa_clti_u_w:
3089  case Mips::BI__builtin_msa_clti_u_d:
3090  case Mips::BI__builtin_msa_maxi_u_b:
3091  case Mips::BI__builtin_msa_maxi_u_h:
3092  case Mips::BI__builtin_msa_maxi_u_w:
3093  case Mips::BI__builtin_msa_maxi_u_d:
3094  case Mips::BI__builtin_msa_mini_u_b:
3095  case Mips::BI__builtin_msa_mini_u_h:
3096  case Mips::BI__builtin_msa_mini_u_w:
3097  case Mips::BI__builtin_msa_mini_u_d:
3098  case Mips::BI__builtin_msa_addvi_b:
3099  case Mips::BI__builtin_msa_addvi_h:
3100  case Mips::BI__builtin_msa_addvi_w:
3101  case Mips::BI__builtin_msa_addvi_d:
3102  case Mips::BI__builtin_msa_bclri_w:
3103  case Mips::BI__builtin_msa_bnegi_w:
3104  case Mips::BI__builtin_msa_bseti_w:
3105  case Mips::BI__builtin_msa_sat_s_w:
3106  case Mips::BI__builtin_msa_sat_u_w:
3107  case Mips::BI__builtin_msa_slli_w:
3108  case Mips::BI__builtin_msa_srai_w:
3109  case Mips::BI__builtin_msa_srari_w:
3110  case Mips::BI__builtin_msa_srli_w:
3111  case Mips::BI__builtin_msa_srlri_w:
3112  case Mips::BI__builtin_msa_subvi_b:
3113  case Mips::BI__builtin_msa_subvi_h:
3114  case Mips::BI__builtin_msa_subvi_w:
3115  case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
3116  case Mips::BI__builtin_msa_binsli_w:
3117  case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
3118  // These intrinsics take an unsigned 6 bit immediate.
3119  case Mips::BI__builtin_msa_bclri_d:
3120  case Mips::BI__builtin_msa_bnegi_d:
3121  case Mips::BI__builtin_msa_bseti_d:
3122  case Mips::BI__builtin_msa_sat_s_d:
3123  case Mips::BI__builtin_msa_sat_u_d:
3124  case Mips::BI__builtin_msa_slli_d:
3125  case Mips::BI__builtin_msa_srai_d:
3126  case Mips::BI__builtin_msa_srari_d:
3127  case Mips::BI__builtin_msa_srli_d:
3128  case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
3129  case Mips::BI__builtin_msa_binsli_d:
3130  case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
3131  // These intrinsics take a signed 5 bit immediate.
3132  case Mips::BI__builtin_msa_ceqi_b:
3133  case Mips::BI__builtin_msa_ceqi_h:
3134  case Mips::BI__builtin_msa_ceqi_w:
3135  case Mips::BI__builtin_msa_ceqi_d:
3136  case Mips::BI__builtin_msa_clti_s_b:
3137  case Mips::BI__builtin_msa_clti_s_h:
3138  case Mips::BI__builtin_msa_clti_s_w:
3139  case Mips::BI__builtin_msa_clti_s_d:
3140  case Mips::BI__builtin_msa_clei_s_b:
3141  case Mips::BI__builtin_msa_clei_s_h:
3142  case Mips::BI__builtin_msa_clei_s_w:
3143  case Mips::BI__builtin_msa_clei_s_d:
3144  case Mips::BI__builtin_msa_maxi_s_b:
3145  case Mips::BI__builtin_msa_maxi_s_h:
3146  case Mips::BI__builtin_msa_maxi_s_w:
3147  case Mips::BI__builtin_msa_maxi_s_d:
3148  case Mips::BI__builtin_msa_mini_s_b:
3149  case Mips::BI__builtin_msa_mini_s_h:
3150  case Mips::BI__builtin_msa_mini_s_w:
3151  case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
3152  // These intrinsics take an unsigned 8 bit immediate.
3153  case Mips::BI__builtin_msa_andi_b:
3154  case Mips::BI__builtin_msa_nori_b:
3155  case Mips::BI__builtin_msa_ori_b:
3156  case Mips::BI__builtin_msa_shf_b:
3157  case Mips::BI__builtin_msa_shf_h:
3158  case Mips::BI__builtin_msa_shf_w:
3159  case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
3160  case Mips::BI__builtin_msa_bseli_b:
3161  case Mips::BI__builtin_msa_bmnzi_b:
3162  case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
3163  // df/n format
3164  // These intrinsics take an unsigned 4 bit immediate.
3165  case Mips::BI__builtin_msa_copy_s_b:
3166  case Mips::BI__builtin_msa_copy_u_b:
3167  case Mips::BI__builtin_msa_insve_b:
3168  case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
3169  case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
3170  // These intrinsics take an unsigned 3 bit immediate.
3171  case Mips::BI__builtin_msa_copy_s_h:
3172  case Mips::BI__builtin_msa_copy_u_h:
3173  case Mips::BI__builtin_msa_insve_h:
3174  case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
3175  case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
3176  // These intrinsics take an unsigned 2 bit immediate.
3177  case Mips::BI__builtin_msa_copy_s_w:
3178  case Mips::BI__builtin_msa_copy_u_w:
3179  case Mips::BI__builtin_msa_insve_w:
3180  case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
3181  case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
3182  // These intrinsics take an unsigned 1 bit immediate.
3183  case Mips::BI__builtin_msa_copy_s_d:
3184  case Mips::BI__builtin_msa_copy_u_d:
3185  case Mips::BI__builtin_msa_insve_d:
3186  case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
3187  case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
3188  // Memory offsets and immediate loads.
3189  // These intrinsics take a signed 10 bit immediate.
3190  case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
3191  case Mips::BI__builtin_msa_ldi_h:
3192  case Mips::BI__builtin_msa_ldi_w:
3193  case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
3194  case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
3195  case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
3196  case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
3197  case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
3198  case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
3199  case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
3200  case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
3201  case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
3202  case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
3203  case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
3204  case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
3205  case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
3206  }
3207 
3208  if (!m)
3209  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3210 
3211  return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
3212  SemaBuiltinConstantArgMultiple(TheCall, i, m);
3213 }
3214 
3215 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
3216 /// advancing the pointer over the consumed characters. The decoded type is
3217 /// returned. If the decoded type represents a constant integer with a
3218 /// constraint on its value then Mask is set to that value. The type descriptors
3219 /// used in Str are specific to PPC MMA builtins and are documented in the file
3220 /// defining the PPC builtins.
3221 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
3222  unsigned &Mask) {
3223  bool RequireICE = false;
3225  switch (*Str++) {
3226  case 'V':
3227  return Context.getVectorType(Context.UnsignedCharTy, 16,
3228  VectorType::VectorKind::AltiVecVector);
3229  case 'i': {
3230  char *End;
3231  unsigned size = strtoul(Str, &End, 10);
3232  assert(End != Str && "Missing constant parameter constraint");
3233  Str = End;
3234  Mask = size;
3235  return Context.IntTy;
3236  }
3237  case 'W': {
3238  char *End;
3239  unsigned size = strtoul(Str, &End, 10);
3240  assert(End != Str && "Missing PowerPC MMA type size");
3241  Str = End;
3242  QualType Type;
3243  switch (size) {
3244  #define PPC_VECTOR_TYPE(typeName, Id, size) \
3245  case size: Type = Context.Id##Ty; break;
3246  #include "clang/Basic/PPCTypes.def"
3247  default: llvm_unreachable("Invalid PowerPC MMA vector type");
3248  }
3249  bool CheckVectorArgs = false;
3250  while (!CheckVectorArgs) {
3251  switch (*Str++) {
3252  case '*':
3253  Type = Context.getPointerType(Type);
3254  break;
3255  case 'C':
3256  Type = Type.withConst();
3257  break;
3258  default:
3259  CheckVectorArgs = true;
3260  --Str;
3261  break;
3262  }
3263  }
3264  return Type;
3265  }
3266  default:
3267  return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
3268  }
3269 }
3270 
3271 static bool isPPC_64Builtin(unsigned BuiltinID) {
3272  // These builtins only work on PPC 64bit targets.
3273  switch (BuiltinID) {
3274  case PPC::BI__builtin_divde:
3275  case PPC::BI__builtin_divdeu:
3276  case PPC::BI__builtin_bpermd:
3277  case PPC::BI__builtin_ppc_ldarx:
3278  case PPC::BI__builtin_ppc_stdcx:
3279  case PPC::BI__builtin_ppc_tdw:
3280  case PPC::BI__builtin_ppc_trapd:
3281  case PPC::BI__builtin_ppc_cmpeqb:
3282  case PPC::BI__builtin_ppc_setb:
3283  case PPC::BI__builtin_ppc_mulhd:
3284  case PPC::BI__builtin_ppc_mulhdu:
3285  case PPC::BI__builtin_ppc_maddhd:
3286  case PPC::BI__builtin_ppc_maddhdu:
3287  case PPC::BI__builtin_ppc_maddld:
3288  case PPC::BI__builtin_ppc_load8r:
3289  case PPC::BI__builtin_ppc_store8r:
3290  case PPC::BI__builtin_ppc_insert_exp:
3291  case PPC::BI__builtin_ppc_extract_sig:
3292  case PPC::BI__builtin_ppc_addex:
3293  case PPC::BI__builtin_darn:
3294  case PPC::BI__builtin_darn_raw:
3295  case PPC::BI__builtin_ppc_compare_and_swaplp:
3296  case PPC::BI__builtin_ppc_fetch_and_addlp:
3297  case PPC::BI__builtin_ppc_fetch_and_andlp:
3298  case PPC::BI__builtin_ppc_fetch_and_orlp:
3299  case PPC::BI__builtin_ppc_fetch_and_swaplp:
3300  return true;
3301  }
3302  return false;
3303 }
3304 
3305 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall,
3306  StringRef FeatureToCheck, unsigned DiagID,
3307  StringRef DiagArg = "") {
3308  if (S.Context.getTargetInfo().hasFeature(FeatureToCheck))
3309  return false;
3310 
3311  if (DiagArg.empty())
3312  S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange();
3313  else
3314  S.Diag(TheCall->getBeginLoc(), DiagID)
3315  << DiagArg << TheCall->getSourceRange();
3316 
3317  return true;
3318 }
3319 
3320 /// Returns true if the argument consists of one contiguous run of 1s with any
3321 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
3322 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
3323 /// since all 1s are not contiguous.
3324 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
3325  llvm::APSInt Result;
3326  // We can't check the value of a dependent argument.
3327  Expr *Arg = TheCall->getArg(ArgNum);
3328  if (Arg->isTypeDependent() || Arg->isValueDependent())
3329  return false;
3330 
3331  // Check constant-ness first.
3332  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3333  return true;
3334 
3335  // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
3336  if (Result.isShiftedMask() || (~Result).isShiftedMask())
3337  return false;
3338 
3339  return Diag(TheCall->getBeginLoc(),
3340  diag::err_argument_not_contiguous_bit_field)
3341  << ArgNum << Arg->getSourceRange();
3342 }
3343 
3344 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3345  CallExpr *TheCall) {
3346  unsigned i = 0, l = 0, u = 0;
3347  bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
3348  llvm::APSInt Result;
3349 
3350  if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
3351  return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
3352  << TheCall->getSourceRange();
3353 
3354  switch (BuiltinID) {
3355  default: return false;
3356  case PPC::BI__builtin_altivec_crypto_vshasigmaw:
3357  case PPC::BI__builtin_altivec_crypto_vshasigmad:
3358  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3359  SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3360  case PPC::BI__builtin_altivec_dss:
3361  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
3362  case PPC::BI__builtin_tbegin:
3363  case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
3364  case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
3365  case PPC::BI__builtin_tabortwc:
3366  case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
3367  case PPC::BI__builtin_tabortwci:
3368  case PPC::BI__builtin_tabortdci:
3369  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
3370  SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
3371  case PPC::BI__builtin_altivec_dst:
3372  case PPC::BI__builtin_altivec_dstt:
3373  case PPC::BI__builtin_altivec_dstst:
3374  case PPC::BI__builtin_altivec_dststt:
3375  return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
3376  case PPC::BI__builtin_vsx_xxpermdi:
3377  case PPC::BI__builtin_vsx_xxsldwi:
3378  return SemaBuiltinVSX(TheCall);
3379  case PPC::BI__builtin_divwe:
3380  case PPC::BI__builtin_divweu:
3381  case PPC::BI__builtin_divde:
3382  case PPC::BI__builtin_divdeu:
3383  return SemaFeatureCheck(*this, TheCall, "extdiv",
3384  diag::err_ppc_builtin_only_on_arch, "7");
3385  case PPC::BI__builtin_bpermd:
3386  return SemaFeatureCheck(*this, TheCall, "bpermd",
3387  diag::err_ppc_builtin_only_on_arch, "7");
3388  case PPC::BI__builtin_unpack_vector_int128:
3389  return SemaFeatureCheck(*this, TheCall, "vsx",
3390  diag::err_ppc_builtin_only_on_arch, "7") ||
3391  SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3392  case PPC::BI__builtin_pack_vector_int128:
3393  return SemaFeatureCheck(*this, TheCall, "vsx",
3394  diag::err_ppc_builtin_only_on_arch, "7");
3395  case PPC::BI__builtin_altivec_vgnb:
3396  return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
3397  case PPC::BI__builtin_altivec_vec_replace_elt:
3398  case PPC::BI__builtin_altivec_vec_replace_unaligned: {
3399  QualType VecTy = TheCall->getArg(0)->getType();
3400  QualType EltTy = TheCall->getArg(1)->getType();
3401  unsigned Width = Context.getIntWidth(EltTy);
3402  return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) ||
3403  !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy);
3404  }
3405  case PPC::BI__builtin_vsx_xxeval:
3406  return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
3407  case PPC::BI__builtin_altivec_vsldbi:
3408  return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
3409  case PPC::BI__builtin_altivec_vsrdbi:
3410  return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
3411  case PPC::BI__builtin_vsx_xxpermx:
3412  return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
3413  case PPC::BI__builtin_ppc_tw:
3414  case PPC::BI__builtin_ppc_tdw:
3415  return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
3416  case PPC::BI__builtin_ppc_cmpeqb:
3417  case PPC::BI__builtin_ppc_setb:
3418  case PPC::BI__builtin_ppc_maddhd:
3419  case PPC::BI__builtin_ppc_maddhdu:
3420  case PPC::BI__builtin_ppc_maddld:
3421  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3422  diag::err_ppc_builtin_only_on_arch, "9");
3423  case PPC::BI__builtin_ppc_cmprb:
3424  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3425  diag::err_ppc_builtin_only_on_arch, "9") ||
3426  SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
3427  // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
3428  // be a constant that represents a contiguous bit field.
3429  case PPC::BI__builtin_ppc_rlwnm:
3430  return SemaValueIsRunOfOnes(TheCall, 2);
3431  case PPC::BI__builtin_ppc_rlwimi:
3432  case PPC::BI__builtin_ppc_rldimi:
3433  return SemaBuiltinConstantArg(TheCall, 2, Result) ||
3434  SemaValueIsRunOfOnes(TheCall, 3);
3435  case PPC::BI__builtin_ppc_extract_exp:
3436  case PPC::BI__builtin_ppc_extract_sig:
3437  case PPC::BI__builtin_ppc_insert_exp:
3438  return SemaFeatureCheck(*this, TheCall, "power9-vector",
3439  diag::err_ppc_builtin_only_on_arch, "9");
3440  case PPC::BI__builtin_ppc_addex: {
3441  if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3442  diag::err_ppc_builtin_only_on_arch, "9") ||
3443  SemaBuiltinConstantArgRange(TheCall, 2, 0, 3))
3444  return true;
3445  // Output warning for reserved values 1 to 3.
3446  int ArgValue =
3447  TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
3448  if (ArgValue != 0)
3449  Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
3450  << ArgValue;
3451  return false;
3452  }
3453  case PPC::BI__builtin_ppc_mtfsb0:
3454  case PPC::BI__builtin_ppc_mtfsb1:
3455  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
3456  case PPC::BI__builtin_ppc_mtfsf:
3457  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255);
3458  case PPC::BI__builtin_ppc_mtfsfi:
3459  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
3460  SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
3461  case PPC::BI__builtin_ppc_alignx:
3462  return SemaBuiltinConstantArgPower2(TheCall, 0);
3463  case PPC::BI__builtin_ppc_rdlam:
3464  return SemaValueIsRunOfOnes(TheCall, 2);
3465  case PPC::BI__builtin_ppc_icbt:
3466  case PPC::BI__builtin_ppc_sthcx:
3467  case PPC::BI__builtin_ppc_stbcx:
3468  case PPC::BI__builtin_ppc_lharx:
3469  case PPC::BI__builtin_ppc_lbarx:
3470  return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
3471  diag::err_ppc_builtin_only_on_arch, "8");
3472  case PPC::BI__builtin_vsx_ldrmb:
3473  case PPC::BI__builtin_vsx_strmb:
3474  return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
3475  diag::err_ppc_builtin_only_on_arch, "8") ||
3476  SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
3477  case PPC::BI__builtin_altivec_vcntmbb:
3478  case PPC::BI__builtin_altivec_vcntmbh:
3479  case PPC::BI__builtin_altivec_vcntmbw:
3480  case PPC::BI__builtin_altivec_vcntmbd:
3481  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3482  case PPC::BI__builtin_darn:
3483  case PPC::BI__builtin_darn_raw:
3484  case PPC::BI__builtin_darn_32:
3485  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3486  diag::err_ppc_builtin_only_on_arch, "9");
3487  case PPC::BI__builtin_vsx_xxgenpcvbm:
3488  case PPC::BI__builtin_vsx_xxgenpcvhm:
3489  case PPC::BI__builtin_vsx_xxgenpcvwm:
3490  case PPC::BI__builtin_vsx_xxgenpcvdm:
3491  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3492  case PPC::BI__builtin_ppc_compare_exp_uo:
3493  case PPC::BI__builtin_ppc_compare_exp_lt:
3494  case PPC::BI__builtin_ppc_compare_exp_gt:
3495  case PPC::BI__builtin_ppc_compare_exp_eq:
3496  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3497  diag::err_ppc_builtin_only_on_arch, "9") ||
3498  SemaFeatureCheck(*this, TheCall, "vsx",
3499  diag::err_ppc_builtin_requires_vsx);
3500  case PPC::BI__builtin_ppc_test_data_class: {
3501  // Check if the first argument of the __builtin_ppc_test_data_class call is
3502  // valid. The argument must be either a 'float' or a 'double'.
3503  QualType ArgType = TheCall->getArg(0)->getType();
3504  if (ArgType != QualType(Context.FloatTy) &&
3505  ArgType != QualType(Context.DoubleTy))
3506  return Diag(TheCall->getBeginLoc(),
3507  diag::err_ppc_invalid_test_data_class_type);
3508  return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3509  diag::err_ppc_builtin_only_on_arch, "9") ||
3510  SemaFeatureCheck(*this, TheCall, "vsx",
3511  diag::err_ppc_builtin_requires_vsx) ||
3512  SemaBuiltinConstantArgRange(TheCall, 1, 0, 127);
3513  }
3514  case PPC::BI__builtin_ppc_load8r:
3515  case PPC::BI__builtin_ppc_store8r:
3516  return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions",
3517  diag::err_ppc_builtin_only_on_arch, "7");
3518 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
3519  case PPC::BI__builtin_##Name: \
3520  return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types);
3521 #include "clang/Basic/BuiltinsPPC.def"
3522  }
3523  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3524 }
3525 
3526 // Check if the given type is a non-pointer PPC MMA type. This function is used
3527 // in Sema to prevent invalid uses of restricted PPC MMA types.
3528 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
3529  if (Type->isPointerType() || Type->isArrayType())
3530  return false;
3531 
3532  QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
3533 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
3534  if (false
3535 #include "clang/Basic/PPCTypes.def"
3536  ) {
3537  Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
3538  return true;
3539  }
3540  return false;
3541 }
3542 
3543 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
3544  CallExpr *TheCall) {
3545  // position of memory order and scope arguments in the builtin
3546  unsigned OrderIndex, ScopeIndex;
3547  switch (BuiltinID) {
3548  case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
3549  case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
3550  case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
3551  case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
3552  OrderIndex = 2;
3553  ScopeIndex = 3;
3554  break;
3555  case AMDGPU::BI__builtin_amdgcn_fence:
3556  OrderIndex = 0;
3557  ScopeIndex = 1;
3558  break;
3559  default:
3560  return false;
3561  }
3562 
3563  ExprResult Arg = TheCall->getArg(OrderIndex);
3564  auto ArgExpr = Arg.get();
3565  Expr::EvalResult ArgResult;
3566 
3567  if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
3568  return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
3569  << ArgExpr->getType();
3570  auto Ord = ArgResult.Val.getInt().getZExtValue();
3571 
3572  // Check validity of memory ordering as per C11 / C++11's memody model.
3573  // Only fence needs check. Atomic dec/inc allow all memory orders.
3574  if (!llvm::isValidAtomicOrderingCABI(Ord))
3575  return Diag(ArgExpr->getBeginLoc(),
3576  diag::warn_atomic_op_has_invalid_memory_order)
3577  << ArgExpr->getSourceRange();
3578  switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
3579  case llvm::AtomicOrderingCABI::relaxed:
3580  case llvm::AtomicOrderingCABI::consume:
3581  if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
3582  return Diag(ArgExpr->getBeginLoc(),
3583  diag::warn_atomic_op_has_invalid_memory_order)
3584  << ArgExpr->getSourceRange();
3585  break;
3586  case llvm::AtomicOrderingCABI::acquire:
3587  case llvm::AtomicOrderingCABI::release:
3588  case llvm::AtomicOrderingCABI::acq_rel:
3589  case llvm::AtomicOrderingCABI::seq_cst:
3590  break;
3591  }
3592 
3593  Arg = TheCall->getArg(ScopeIndex);
3594  ArgExpr = Arg.get();
3595  Expr::EvalResult ArgResult1;
3596  // Check that sync scope is a constant literal
3597  if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
3598  return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
3599  << ArgExpr->getType();
3600 
3601  return false;
3602 }
3603 
3604 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
3605  llvm::APSInt Result;
3606 
3607  // We can't check the value of a dependent argument.
3608  Expr *Arg = TheCall->getArg(ArgNum);
3609  if (Arg->isTypeDependent() || Arg->isValueDependent())
3610  return false;
3611 
3612  // Check constant-ness first.
3613  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3614  return true;
3615 
3616  int64_t Val = Result.getSExtValue();
3617  if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
3618  return false;
3619 
3620  return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
3621  << Arg->getSourceRange();
3622 }
3623 
3624 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
3625  unsigned BuiltinID,
3626  CallExpr *TheCall) {
3627  // CodeGenFunction can also detect this, but this gives a better error
3628  // message.
3629  bool FeatureMissing = false;
3630  SmallVector<StringRef> ReqFeatures;
3631  StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
3632  Features.split(ReqFeatures, ',');
3633 
3634  // Check if each required feature is included
3635  for (StringRef F : ReqFeatures) {
3636  if (TI.hasFeature(F))
3637  continue;
3638 
3639  // If the feature is 64bit, alter the string so it will print better in
3640  // the diagnostic.
3641  if (F == "64bit")
3642  F = "RV64";
3643 
3644  // Convert features like "zbr" and "experimental-zbr" to "Zbr".
3645  F.consume_front("experimental-");
3646  std::string FeatureStr = F.str();
3647  FeatureStr[0] = std::toupper(FeatureStr[0]);
3648 
3649  // Error message
3650  FeatureMissing = true;
3651  Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
3652  << TheCall->getSourceRange() << StringRef(FeatureStr);
3653  }
3654 
3655  if (FeatureMissing)
3656  return true;
3657 
3658  switch (BuiltinID) {
3659  case RISCVVector::BI__builtin_rvv_vsetvli:
3660  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
3661  CheckRISCVLMUL(TheCall, 2);
3662  case RISCVVector::BI__builtin_rvv_vsetvlimax:
3663  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
3664  CheckRISCVLMUL(TheCall, 1);
3665  case RISCVVector::BI__builtin_rvv_vget_v_i8m2_i8m1:
3666  case RISCVVector::BI__builtin_rvv_vget_v_i16m2_i16m1:
3667  case RISCVVector::BI__builtin_rvv_vget_v_i32m2_i32m1:
3668  case RISCVVector::BI__builtin_rvv_vget_v_i64m2_i64m1:
3669  case RISCVVector::BI__builtin_rvv_vget_v_f32m2_f32m1:
3670  case RISCVVector::BI__builtin_rvv_vget_v_f64m2_f64m1:
3671  case RISCVVector::BI__builtin_rvv_vget_v_u8m2_u8m1:
3672  case RISCVVector::BI__builtin_rvv_vget_v_u16m2_u16m1:
3673  case RISCVVector::BI__builtin_rvv_vget_v_u32m2_u32m1:
3674  case RISCVVector::BI__builtin_rvv_vget_v_u64m2_u64m1:
3675  case RISCVVector::BI__builtin_rvv_vget_v_i8m4_i8m2:
3676  case RISCVVector::BI__builtin_rvv_vget_v_i16m4_i16m2:
3677  case RISCVVector::BI__builtin_rvv_vget_v_i32m4_i32m2:
3678  case RISCVVector::BI__builtin_rvv_vget_v_i64m4_i64m2:
3679  case RISCVVector::BI__builtin_rvv_vget_v_f32m4_f32m2:
3680  case RISCVVector::BI__builtin_rvv_vget_v_f64m4_f64m2:
3681  case RISCVVector::BI__builtin_rvv_vget_v_u8m4_u8m2:
3682  case RISCVVector::BI__builtin_rvv_vget_v_u16m4_u16m2:
3683  case RISCVVector::BI__builtin_rvv_vget_v_u32m4_u32m2:
3684  case RISCVVector::BI__builtin_rvv_vget_v_u64m4_u64m2:
3685  case RISCVVector::BI__builtin_rvv_vget_v_i8m8_i8m4:
3686  case RISCVVector::BI__builtin_rvv_vget_v_i16m8_i16m4:
3687  case RISCVVector::BI__builtin_rvv_vget_v_i32m8_i32m4:
3688  case RISCVVector::BI__builtin_rvv_vget_v_i64m8_i64m4:
3689  case RISCVVector::BI__builtin_rvv_vget_v_f32m8_f32m4:
3690  case RISCVVector::BI__builtin_rvv_vget_v_f64m8_f64m4:
3691  case RISCVVector::BI__builtin_rvv_vget_v_u8m8_u8m4:
3692  case RISCVVector::BI__builtin_rvv_vget_v_u16m8_u16m4:
3693  case RISCVVector::BI__builtin_rvv_vget_v_u32m8_u32m4:
3694  case RISCVVector::BI__builtin_rvv_vget_v_u64m8_u64m4:
3695  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3696  case RISCVVector::BI__builtin_rvv_vget_v_i8m4_i8m1:
3697  case RISCVVector::BI__builtin_rvv_vget_v_i16m4_i16m1:
3698  case RISCVVector::BI__builtin_rvv_vget_v_i32m4_i32m1:
3699  case RISCVVector::BI__builtin_rvv_vget_v_i64m4_i64m1:
3700  case RISCVVector::BI__builtin_rvv_vget_v_f32m4_f32m1:
3701  case RISCVVector::BI__builtin_rvv_vget_v_f64m4_f64m1:
3702  case RISCVVector::BI__builtin_rvv_vget_v_u8m4_u8m1:
3703  case RISCVVector::BI__builtin_rvv_vget_v_u16m4_u16m1:
3704  case RISCVVector::BI__builtin_rvv_vget_v_u32m4_u32m1:
3705  case RISCVVector::BI__builtin_rvv_vget_v_u64m4_u64m1:
3706  case RISCVVector::BI__builtin_rvv_vget_v_i8m8_i8m2:
3707  case RISCVVector::BI__builtin_rvv_vget_v_i16m8_i16m2:
3708  case RISCVVector::BI__builtin_rvv_vget_v_i32m8_i32m2:
3709  case RISCVVector::BI__builtin_rvv_vget_v_i64m8_i64m2:
3710  case RISCVVector::BI__builtin_rvv_vget_v_f32m8_f32m2:
3711  case RISCVVector::BI__builtin_rvv_vget_v_f64m8_f64m2:
3712  case RISCVVector::BI__builtin_rvv_vget_v_u8m8_u8m2:
3713  case RISCVVector::BI__builtin_rvv_vget_v_u16m8_u16m2:
3714  case RISCVVector::BI__builtin_rvv_vget_v_u32m8_u32m2:
3715  case RISCVVector::BI__builtin_rvv_vget_v_u64m8_u64m2:
3716  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3717  case RISCVVector::BI__builtin_rvv_vget_v_i8m8_i8m1:
3718  case RISCVVector::BI__builtin_rvv_vget_v_i16m8_i16m1:
3719  case RISCVVector::BI__builtin_rvv_vget_v_i32m8_i32m1:
3720  case RISCVVector::BI__builtin_rvv_vget_v_i64m8_i64m1:
3721  case RISCVVector::BI__builtin_rvv_vget_v_f32m8_f32m1:
3722  case RISCVVector::BI__builtin_rvv_vget_v_f64m8_f64m1:
3723  case RISCVVector::BI__builtin_rvv_vget_v_u8m8_u8m1:
3724  case RISCVVector::BI__builtin_rvv_vget_v_u16m8_u16m1:
3725  case RISCVVector::BI__builtin_rvv_vget_v_u32m8_u32m1:
3726  case RISCVVector::BI__builtin_rvv_vget_v_u64m8_u64m1:
3727  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3728  case RISCVVector::BI__builtin_rvv_vset_v_i8m1_i8m2:
3729  case RISCVVector::BI__builtin_rvv_vset_v_i16m1_i16m2:
3730  case RISCVVector::BI__builtin_rvv_vset_v_i32m1_i32m2:
3731  case RISCVVector::BI__builtin_rvv_vset_v_i64m1_i64m2:
3732  case RISCVVector::BI__builtin_rvv_vset_v_f32m1_f32m2:
3733  case RISCVVector::BI__builtin_rvv_vset_v_f64m1_f64m2:
3734  case RISCVVector::BI__builtin_rvv_vset_v_u8m1_u8m2:
3735  case RISCVVector::BI__builtin_rvv_vset_v_u16m1_u16m2:
3736  case RISCVVector::BI__builtin_rvv_vset_v_u32m1_u32m2:
3737  case RISCVVector::BI__builtin_rvv_vset_v_u64m1_u64m2:
3738  case RISCVVector::BI__builtin_rvv_vset_v_i8m2_i8m4:
3739  case RISCVVector::BI__builtin_rvv_vset_v_i16m2_i16m4:
3740  case RISCVVector::BI__builtin_rvv_vset_v_i32m2_i32m4:
3741  case RISCVVector::BI__builtin_rvv_vset_v_i64m2_i64m4:
3742  case RISCVVector::BI__builtin_rvv_vset_v_f32m2_f32m4:
3743  case RISCVVector::BI__builtin_rvv_vset_v_f64m2_f64m4:
3744  case RISCVVector::BI__builtin_rvv_vset_v_u8m2_u8m4:
3745  case RISCVVector::BI__builtin_rvv_vset_v_u16m2_u16m4:
3746  case RISCVVector::BI__builtin_rvv_vset_v_u32m2_u32m4:
3747  case RISCVVector::BI__builtin_rvv_vset_v_u64m2_u64m4:
3748  case RISCVVector::BI__builtin_rvv_vset_v_i8m4_i8m8:
3749  case RISCVVector::BI__builtin_rvv_vset_v_i16m4_i16m8:
3750  case RISCVVector::BI__builtin_rvv_vset_v_i32m4_i32m8:
3751  case RISCVVector::BI__builtin_rvv_vset_v_i64m4_i64m8:
3752  case RISCVVector::BI__builtin_rvv_vset_v_f32m4_f32m8:
3753  case RISCVVector::BI__builtin_rvv_vset_v_f64m4_f64m8:
3754  case RISCVVector::BI__builtin_rvv_vset_v_u8m4_u8m8:
3755  case RISCVVector::BI__builtin_rvv_vset_v_u16m4_u16m8:
3756  case RISCVVector::BI__builtin_rvv_vset_v_u32m4_u32m8:
3757  case RISCVVector::BI__builtin_rvv_vset_v_u64m4_u64m8:
3758  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3759  case RISCVVector::BI__builtin_rvv_vset_v_i8m1_i8m4:
3760  case RISCVVector::BI__builtin_rvv_vset_v_i16m1_i16m4:
3761  case RISCVVector::BI__builtin_rvv_vset_v_i32m1_i32m4:
3762  case RISCVVector::BI__builtin_rvv_vset_v_i64m1_i64m4:
3763  case RISCVVector::BI__builtin_rvv_vset_v_f32m1_f32m4:
3764  case RISCVVector::BI__builtin_rvv_vset_v_f64m1_f64m4:
3765  case RISCVVector::BI__builtin_rvv_vset_v_u8m1_u8m4:
3766  case RISCVVector::BI__builtin_rvv_vset_v_u16m1_u16m4:
3767  case RISCVVector::BI__builtin_rvv_vset_v_u32m1_u32m4:
3768  case RISCVVector::BI__builtin_rvv_vset_v_u64m1_u64m4:
3769  case RISCVVector::BI__builtin_rvv_vset_v_i8m2_i8m8:
3770  case RISCVVector::BI__builtin_rvv_vset_v_i16m2_i16m8:
3771  case RISCVVector::BI__builtin_rvv_vset_v_i32m2_i32m8:
3772  case RISCVVector::BI__builtin_rvv_vset_v_i64m2_i64m8:
3773  case RISCVVector::BI__builtin_rvv_vset_v_f32m2_f32m8:
3774  case RISCVVector::BI__builtin_rvv_vset_v_f64m2_f64m8:
3775  case RISCVVector::BI__builtin_rvv_vset_v_u8m2_u8m8:
3776  case RISCVVector::BI__builtin_rvv_vset_v_u16m2_u16m8:
3777  case RISCVVector::BI__builtin_rvv_vset_v_u32m2_u32m8:
3778  case RISCVVector::BI__builtin_rvv_vset_v_u64m2_u64m8:
3779  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3780  case RISCVVector::BI__builtin_rvv_vset_v_i8m1_i8m8:
3781  case RISCVVector::BI__builtin_rvv_vset_v_i16m1_i16m8:
3782  case RISCVVector::BI__builtin_rvv_vset_v_i32m1_i32m8:
3783  case RISCVVector::BI__builtin_rvv_vset_v_i64m1_i64m8:
3784  case RISCVVector::BI__builtin_rvv_vset_v_f32m1_f32m8:
3785  case RISCVVector::BI__builtin_rvv_vset_v_f64m1_f64m8:
3786  case RISCVVector::BI__builtin_rvv_vset_v_u8m1_u8m8:
3787  case RISCVVector::BI__builtin_rvv_vset_v_u16m1_u16m8:
3788  case RISCVVector::BI__builtin_rvv_vset_v_u32m1_u32m8:
3789  case RISCVVector::BI__builtin_rvv_vset_v_u64m1_u64m8:
3790  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3791  }
3792 
3793  return false;
3794 }
3795 
3796 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
3797  CallExpr *TheCall) {
3798  if (BuiltinID == SystemZ::BI__builtin_tabort) {
3799  Expr *Arg = TheCall->getArg(0);
3800  if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context))
3801  if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
3802  return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
3803  << Arg->getSourceRange();
3804  }
3805 
3806  // For intrinsics which take an immediate value as part of the instruction,
3807  // range check them here.
3808  unsigned i = 0, l = 0, u = 0;
3809  switch (BuiltinID) {
3810  default: return false;
3811  case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
3812  case SystemZ::BI__builtin_s390_verimb:
3813  case SystemZ::BI__builtin_s390_verimh:
3814  case SystemZ::BI__builtin_s390_verimf:
3815  case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
3816  case SystemZ::BI__builtin_s390_vfaeb:
3817  case SystemZ::BI__builtin_s390_vfaeh:
3818  case SystemZ::BI__builtin_s390_vfaef:
3819  case SystemZ::BI__builtin_s390_vfaebs:
3820  case SystemZ::BI__builtin_s390_vfaehs:
3821  case SystemZ::BI__builtin_s390_vfaefs:
3822  case SystemZ::BI__builtin_s390_vfaezb:
3823  case SystemZ::BI__builtin_s390_vfaezh:
3824  case SystemZ::BI__builtin_s390_vfaezf:
3825  case SystemZ::BI__builtin_s390_vfaezbs:
3826  case SystemZ::BI__builtin_s390_vfaezhs:
3827  case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
3828  case SystemZ::BI__builtin_s390_vfisb:
3829  case SystemZ::BI__builtin_s390_vfidb:
3830  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
3831  SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3832  case SystemZ::BI__builtin_s390_vftcisb:
3833  case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
3834  case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
3835  case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
3836  case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
3837  case SystemZ::BI__builtin_s390_vstrcb:
3838  case SystemZ::BI__builtin_s390_vstrch:
3839  case SystemZ::BI__builtin_s390_vstrcf:
3840  case SystemZ::BI__builtin_s390_vstrczb:
3841  case SystemZ::BI__builtin_s390_vstrczh:
3842  case SystemZ::BI__builtin_s390_vstrczf:
3843  case SystemZ::BI__builtin_s390_vstrcbs:
3844  case SystemZ::BI__builtin_s390_vstrchs:
3845  case SystemZ::BI__builtin_s390_vstrcfs:
3846  case SystemZ::BI__builtin_s390_vstrczbs:
3847  case SystemZ::BI__builtin_s390_vstrczhs:
3848  case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
3849  case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
3850  case SystemZ::BI__builtin_s390_vfminsb:
3851  case SystemZ::BI__builtin_s390_vfmaxsb:
3852  case SystemZ::BI__builtin_s390_vfmindb:
3853  case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
3854  case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
3855  case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
3856  case SystemZ::BI__builtin_s390_vclfnhs:
3857  case SystemZ::BI__builtin_s390_vclfnls:
3858  case SystemZ::BI__builtin_s390_vcfn:
3859  case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
3860  case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
3861  }
3862  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3863 }
3864 
3865 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
3866 /// This checks that the target supports __builtin_cpu_supports and
3867 /// that the string argument is constant and valid.
3868 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
3869  CallExpr *TheCall) {
3870  Expr *Arg = TheCall->getArg(0);
3871 
3872  // Check if the argument is a string literal.
3873  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3874  return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3875  << Arg->getSourceRange();
3876 
3877  // Check the contents of the string.
3878  StringRef Feature =
3879  cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3880  if (!TI.validateCpuSupports(Feature))
3881  return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
3882  << Arg->getSourceRange();
3883  return false;
3884 }
3885 
3886 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
3887 /// This checks that the target supports __builtin_cpu_is and
3888 /// that the string argument is constant and valid.
3889 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
3890  Expr *Arg = TheCall->getArg(0);
3891 
3892  // Check if the argument is a string literal.
3893  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3894  return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3895  << Arg->getSourceRange();
3896 
3897  // Check the contents of the string.
3898  StringRef Feature =
3899  cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3900  if (!TI.validateCpuIs(Feature))
3901  return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
3902  << Arg->getSourceRange();
3903  return false;
3904 }
3905 
3906 // Check if the rounding mode is legal.
3907 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
3908  // Indicates if this instruction has rounding control or just SAE.
3909  bool HasRC = false;
3910 
3911  unsigned ArgNum = 0;
3912  switch (BuiltinID) {
3913  default:
3914  return false;
3915  case X86::BI__builtin_ia32_vcvttsd2si32:
3916  case X86::BI__builtin_ia32_vcvttsd2si64:
3917  case X86::BI__builtin_ia32_vcvttsd2usi32:
3918  case X86::BI__builtin_ia32_vcvttsd2usi64:
3919  case X86::BI__builtin_ia32_vcvttss2si32:
3920  case X86::BI__builtin_ia32_vcvttss2si64:
3921  case X86::BI__builtin_ia32_vcvttss2usi32:
3922  case X86::BI__builtin_ia32_vcvttss2usi64:
3923  case X86::BI__builtin_ia32_vcvttsh2si32:
3924  case X86::BI__builtin_ia32_vcvttsh2si64:
3925  case X86::BI__builtin_ia32_vcvttsh2usi32:
3926  case X86::BI__builtin_ia32_vcvttsh2usi64:
3927  ArgNum = 1;
3928  break;
3929  case X86::BI__builtin_ia32_maxpd512:
3930  case X86::BI__builtin_ia32_maxps512:
3931  case X86::BI__builtin_ia32_minpd512:
3932  case X86::BI__builtin_ia32_minps512:
3933  case X86::BI__builtin_ia32_maxph512:
3934  case X86::BI__builtin_ia32_minph512:
3935  ArgNum = 2;
3936  break;
3937  case X86::BI__builtin_ia32_vcvtph2pd512_mask:
3938  case X86::BI__builtin_ia32_vcvtph2psx512_mask:
3939  case X86::BI__builtin_ia32_cvtps2pd512_mask:
3940  case X86::BI__builtin_ia32_cvttpd2dq512_mask:
3941  case X86::BI__builtin_ia32_cvttpd2qq512_mask:
3942  case X86::BI__builtin_ia32_cvttpd2udq512_mask:
3943  case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
3944  case X86::BI__builtin_ia32_cvttps2dq512_mask:
3945  case X86::BI__builtin_ia32_cvttps2qq512_mask:
3946  case X86::BI__builtin_ia32_cvttps2udq512_mask:
3947  case X86::BI__builtin_ia32_cvttps2uqq512_mask:
3948  case X86::BI__builtin_ia32_vcvttph2w512_mask:
3949  case X86::BI__builtin_ia32_vcvttph2uw512_mask:
3950  case X86::BI__builtin_ia32_vcvttph2dq512_mask:
3951  case X86::BI__builtin_ia32_vcvttph2udq512_mask:
3952  case X86::BI__builtin_ia32_vcvttph2qq512_mask:
3953  case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
3954  case X86::BI__builtin_ia32_exp2pd_mask:
3955  case X86::BI__builtin_ia32_exp2ps_mask:
3956  case X86::BI__builtin_ia32_getexppd512_mask:
3957  case X86::BI__builtin_ia32_getexpps512_mask:
3958  case X86::BI__builtin_ia32_getexpph512_mask:
3959  case X86::BI__builtin_ia32_rcp28pd_mask:
3960  case X86::BI__builtin_ia32_rcp28ps_mask:
3961  case X86::BI__builtin_ia32_rsqrt28pd_mask:
3962  case X86::BI__builtin_ia32_rsqrt28ps_mask:
3963  case X86::BI__builtin_ia32_vcomisd:
3964  case X86::BI__builtin_ia32_vcomiss:
3965  case X86::BI__builtin_ia32_vcomish:
3966  case X86::BI__builtin_ia32_vcvtph2ps512_mask:
3967  ArgNum = 3;
3968  break;
3969  case X86::BI__builtin_ia32_cmppd512_mask:
3970  case X86::BI__builtin_ia32_cmpps512_mask:
3971  case X86::BI__builtin_ia32_cmpsd_mask:
3972  case X86::BI__builtin_ia32_cmpss_mask:
3973  case X86::BI__builtin_ia32_cmpsh_mask:
3974  case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
3975  case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
3976  case X86::BI__builtin_ia32_cvtss2sd_round_mask:
3977  case X86::BI__builtin_ia32_getexpsd128_round_mask:
3978  case X86::BI__builtin_ia32_getexpss128_round_mask:
3979  case X86::BI__builtin_ia32_getexpsh128_round_mask:
3980  case X86::BI__builtin_ia32_getmantpd512_mask:
3981  case X86::BI__builtin_ia32_getmantps512_mask:
3982  case X86::BI__builtin_ia32_getmantph512_mask:
3983  case X86::BI__builtin_ia32_maxsd_round_mask:
3984  case X86::BI__builtin_ia32_maxss_round_mask:
3985  case X86::BI__builtin_ia32_maxsh_round_mask:
3986  case X86::BI__builtin_ia32_minsd_round_mask:
3987  case X86::BI__builtin_ia32_minss_round_mask:
3988  case X86::BI__builtin_ia32_minsh_round_mask:
3989  case X86::BI__builtin_ia32_rcp28sd_round_mask:
3990  case X86::BI__builtin_ia32_rcp28ss_round_mask:
3991  case X86::BI__builtin_ia32_reducepd512_mask:
3992  case X86::BI__builtin_ia32_reduceps512_mask:
3993  case X86::BI__builtin_ia32_reduceph512_mask:
3994  case X86::BI__builtin_ia32_rndscalepd_mask:
3995  case X86::BI__builtin_ia32_rndscaleps_mask:
3996  case X86::BI__builtin_ia32_rndscaleph_mask:
3997  case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
3998  case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
3999  ArgNum = 4;
4000  break;
4001  case X86::BI__builtin_ia32_fixupimmpd512_mask:
4002  case X86::BI__builtin_ia32_fixupimmpd512_maskz:
4003  case X86::BI__builtin_ia32_fixupimmps512_mask:
4004  case X86::BI__builtin_ia32_fixupimmps512_maskz:
4005  case X86::BI__builtin_ia32_fixupimmsd_mask:
4006  case X86::BI__builtin_ia32_fixupimmsd_maskz:
4007  case X86::BI__builtin_ia32_fixupimmss_mask:
4008  case X86::BI__builtin_ia32_fixupimmss_maskz:
4009  case X86::BI__builtin_ia32_getmantsd_round_mask:
4010  case X86::BI__builtin_ia32_getmantss_round_mask:
4011  case X86::BI__builtin_ia32_getmantsh_round_mask:
4012  case X86::BI__builtin_ia32_rangepd512_mask:
4013  case X86::BI__builtin_ia32_rangeps512_mask:
4014  case X86::BI__builtin_ia32_rangesd128_round_mask:
4015  case X86::BI__builtin_ia32_rangess128_round_mask:
4016  case X86::BI__builtin_ia32_reducesd_mask:
4017  case X86::BI__builtin_ia32_reducess_mask:
4018  case X86::BI__builtin_ia32_reducesh_mask:
4019  case X86::BI__builtin_ia32_rndscalesd_round_mask:
4020  case X86::BI__builtin_ia32_rndscaless_round_mask:
4021  case X86::BI__builtin_ia32_rndscalesh_round_mask:
4022  ArgNum = 5;
4023  break;
4024  case X86::BI__builtin_ia32_vcvtsd2si64:
4025  case X86::BI__builtin_ia32_vcvtsd2si32:
4026  case X86::BI__builtin_ia32_vcvtsd2usi32:
4027  case X86::BI__builtin_ia32_vcvtsd2usi64:
4028  case X86::BI__builtin_ia32_vcvtss2si32:
4029  case X86::BI__builtin_ia32_vcvtss2si64:
4030  case X86::BI__builtin_ia32_vcvtss2usi32:
4031  case X86::BI__builtin_ia32_vcvtss2usi64:
4032  case X86::BI__builtin_ia32_vcvtsh2si32:
4033  case X86::BI__builtin_ia32_vcvtsh2si64:
4034  case X86::BI__builtin_ia32_vcvtsh2usi32:
4035  case X86::BI__builtin_ia32_vcvtsh2usi64:
4036  case X86::BI__builtin_ia32_sqrtpd512:
4037  case X86::BI__builtin_ia32_sqrtps512:
4038  case X86::BI__builtin_ia32_sqrtph512:
4039  ArgNum = 1;
4040  HasRC = true;
4041  break;
4042  case X86::BI__builtin_ia32_addph512:
4043  case X86::BI__builtin_ia32_divph512:
4044  case X86::BI__builtin_ia32_mulph512:
4045  case X86::BI__builtin_ia32_subph512:
4046  case X86::BI__builtin_ia32_addpd512:
4047  case X86::BI__builtin_ia32_addps512:
4048  case X86::BI__builtin_ia32_divpd512:
4049  case X86::BI__builtin_ia32_divps512:
4050  case X86::BI__builtin_ia32_mulpd512:
4051  case X86::BI__builtin_ia32_mulps512:
4052  case X86::BI__builtin_ia32_subpd512:
4053  case X86::BI__builtin_ia32_subps512:
4054  case X86::BI__builtin_ia32_cvtsi2sd64:
4055  case X86::BI__builtin_ia32_cvtsi2ss32:
4056  case X86::BI__builtin_ia32_cvtsi2ss64:
4057  case X86::BI__builtin_ia32_cvtusi2sd64:
4058  case X86::BI__builtin_ia32_cvtusi2ss32:
4059  case X86::BI__builtin_ia32_cvtusi2ss64:
4060  case X86::BI__builtin_ia32_vcvtusi2sh:
4061  case X86::BI__builtin_ia32_vcvtusi642sh:
4062  case X86::BI__builtin_ia32_vcvtsi2sh:
4063  case X86::BI__builtin_ia32_vcvtsi642sh:
4064  ArgNum = 2;
4065  HasRC = true;
4066  break;
4067  case X86::BI__builtin_ia32_cvtdq2ps512_mask:
4068  case X86::BI__builtin_ia32_cvtudq2ps512_mask:
4069  case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
4070  case X86::BI__builtin_ia32_vcvtps2phx512_mask:
4071  case X86::BI__builtin_ia32_cvtpd2ps512_mask:
4072  case X86::BI__builtin_ia32_cvtpd2dq512_mask:
4073  case X86::BI__builtin_ia32_cvtpd2qq512_mask:
4074  case X86::BI__builtin_ia32_cvtpd2udq512_mask:
4075  case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
4076  case X86::BI__builtin_ia32_cvtps2dq512_mask:
4077  case X86::BI__builtin_ia32_cvtps2qq512_mask:
4078  case X86::BI__builtin_ia32_cvtps2udq512_mask:
4079  case X86::BI__builtin_ia32_cvtps2uqq512_mask:
4080  case X86::BI__builtin_ia32_cvtqq2pd512_mask:
4081  case X86::BI__builtin_ia32_cvtqq2ps512_mask:
4082  case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
4083  case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
4084  case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
4085  case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
4086  case X86::BI__builtin_ia32_vcvtw2ph512_mask:
4087  case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
4088  case X86::BI__builtin_ia32_vcvtph2w512_mask:
4089  case X86::BI__builtin_ia32_vcvtph2uw512_mask:
4090  case X86::BI__builtin_ia32_vcvtph2dq512_mask:
4091  case X86::BI__builtin_ia32_vcvtph2udq512_mask:
4092  case X86::BI__builtin_ia32_vcvtph2qq512_mask:
4093  case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
4094  case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
4095  case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
4096  ArgNum = 3;
4097  HasRC = true;
4098  break;
4099  case X86::BI__builtin_ia32_addsh_round_mask:
4100  case X86::BI__builtin_ia32_addss_round_mask:
4101  case X86::BI__builtin_ia32_addsd_round_mask:
4102  case X86::BI__builtin_ia32_divsh_round_mask:
4103  case X86::BI__builtin_ia32_divss_round_mask:
4104  case X86::BI__builtin_ia32_divsd_round_mask:
4105  case X86::BI__builtin_ia32_mulsh_round_mask:
4106  case X86::BI__builtin_ia32_mulss_round_mask:
4107  case X86::BI__builtin_ia32_mulsd_round_mask:
4108  case X86::BI__builtin_ia32_subsh_round_mask:
4109  case X86::BI__builtin_ia32_subss_round_mask:
4110  case X86::BI__builtin_ia32_subsd_round_mask:
4111  case X86::BI__builtin_ia32_scalefph512_mask:
4112  case X86::BI__builtin_ia32_scalefpd512_mask:
4113  case X86::BI__builtin_ia32_scalefps512_mask:
4114  case X86::BI__builtin_ia32_scalefsd_round_mask:
4115  case X86::BI__builtin_ia32_scalefss_round_mask:
4116  case X86::BI__builtin_ia32_scalefsh_round_mask:
4117  case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
4118  case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
4119  case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
4120  case X86::BI__builtin_ia32_sqrtsd_round_mask:
4121  case X86::BI__builtin_ia32_sqrtss_round_mask:
4122  case X86::BI__builtin_ia32_sqrtsh_round_mask:
4123  case X86::BI__builtin_ia32_vfmaddsd3_mask:
4124  case X86::BI__builtin_ia32_vfmaddsd3_maskz:
4125  case X86::BI__builtin_ia32_vfmaddsd3_mask3:
4126  case X86::BI__builtin_ia32_vfmaddss3_mask:
4127  case X86::BI__builtin_ia32_vfmaddss3_maskz:
4128  case X86::BI__builtin_ia32_vfmaddss3_mask3:
4129  case X86::BI__builtin_ia32_vfmaddsh3_mask:
4130  case X86::BI__builtin_ia32_vfmaddsh3_maskz:
4131  case X86::BI__builtin_ia32_vfmaddsh3_mask3:
4132  case X86::BI__builtin_ia32_vfmaddpd512_mask:
4133  case X86::BI__builtin_ia32_vfmaddpd512_maskz:
4134  case X86::BI__builtin_ia32_vfmaddpd512_mask3:
4135  case X86::BI__builtin_ia32_vfmsubpd512_mask3:
4136  case X86::BI__builtin_ia32_vfmaddps512_mask:
4137  case X86::BI__builtin_ia32_vfmaddps512_maskz:
4138  case X86::BI__builtin_ia32_vfmaddps512_mask3:
4139  case X86::BI__builtin_ia32_vfmsubps512_mask3:
4140  case X86::BI__builtin_ia32_vfmaddph512_mask:
4141  case X86::BI__builtin_ia32_vfmaddph512_maskz:
4142  case X86::BI__builtin_ia32_vfmaddph512_mask3:
4143  case X86::BI__builtin_ia32_vfmsubph512_mask3:
4144  case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
4145  case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
4146  case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
4147  case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
4148  case X86::BI__builtin_ia32_vfmaddsubps512_mask:
4149  case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
4150  case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
4151  case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
4152  case X86::BI__builtin_ia32_vfmaddsubph512_mask:
4153  case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
4154  case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
4155  case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
4156  case X86::BI__builtin_ia32_vfmaddcsh_mask:
4157  case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
4158  case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
4159  case X86::BI__builtin_ia32_vfmaddcph512_mask:
4160  case X86::BI__builtin_ia32_vfmaddcph512_maskz:
4161  case X86::BI__builtin_ia32_vfmaddcph512_mask3:
4162  case X86::BI__builtin_ia32_vfcmaddcsh_mask:
4163  case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
4164  case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
4165  case X86::BI__builtin_ia32_vfcmaddcph512_mask:
4166  case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
4167  case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
4168  case X86::BI__builtin_ia32_vfmulcsh_mask:
4169  case X86::BI__builtin_ia32_vfmulcph512_mask:
4170  case X86::BI__builtin_ia32_vfcmulcsh_mask:
4171  case X86::BI__builtin_ia32_vfcmulcph512_mask:
4172  ArgNum = 4;
4173  HasRC = true;
4174  break;
4175  }
4176 
4177  llvm::APSInt Result;
4178 
4179  // We can't check the value of a dependent argument.
4180  Expr *Arg = TheCall->getArg(ArgNum);
4181  if (Arg->isTypeDependent() || Arg->isValueDependent())
4182  return false;
4183 
4184  // Check constant-ness first.
4185  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4186  return true;
4187 
4188  // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
4189  // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
4190  // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
4191  // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
4192  if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
4193  Result == 8/*ROUND_NO_EXC*/ ||
4194  (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
4195  (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
4196  return false;
4197 
4198  return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
4199  << Arg->getSourceRange();
4200 }
4201 
4202 // Check if the gather/scatter scale is legal.
4203 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
4204  CallExpr *TheCall) {
4205  unsigned ArgNum = 0;
4206  switch (BuiltinID) {
4207  default:
4208  return false;
4209  case X86::BI__builtin_ia32_gatherpfdpd:
4210  case X86::BI__builtin_ia32_gatherpfdps:
4211  case X86::BI__builtin_ia32_gatherpfqpd:
4212  case X86::BI__builtin_ia32_gatherpfqps:
4213  case X86::BI__builtin_ia32_scatterpfdpd:
4214  case X86::BI__builtin_ia32_scatterpfdps:
4215  case X86::BI__builtin_ia32_scatterpfqpd:
4216  case X86::BI__builtin_ia32_scatterpfqps:
4217  ArgNum = 3;
4218  break;
4219  case X86::BI__builtin_ia32_gatherd_pd:
4220  case X86::BI__builtin_ia32_gatherd_pd256:
4221  case X86::BI__builtin_ia32_gatherq_pd:
4222  case X86::BI__builtin_ia32_gatherq_pd256:
4223  case X86::BI__builtin_ia32_gatherd_ps:
4224  case X86::BI__builtin_ia32_gatherd_ps256:
4225  case X86::BI__builtin_ia32_gatherq_ps:
4226  case X86::BI__builtin_ia32_gatherq_ps256:
4227  case X86::BI__builtin_ia32_gatherd_q:
4228  case X86::BI__builtin_ia32_gatherd_q256:
4229  case X86::BI__builtin_ia32_gatherq_q:
4230  case X86::BI__builtin_ia32_gatherq_q256:
4231  case X86::BI__builtin_ia32_gatherd_d:
4232  case X86::BI__builtin_ia32_gatherd_d256:
4233  case X86::BI__builtin_ia32_gatherq_d:
4234  case X86::BI__builtin_ia32_gatherq_d256:
4235  case X86::BI__builtin_ia32_gather3div2df:
4236  case X86::BI__builtin_ia32_gather3div2di:
4237  case X86::BI__builtin_ia32_gather3div4df:
4238  case X86::BI__builtin_ia32_gather3div4di:
4239  case X86::BI__builtin_ia32_gather3div4sf:
4240  case X86::BI__builtin_ia32_gather3div4si:
4241  case X86::BI__builtin_ia32_gather3div8sf:
4242  case X86::BI__builtin_ia32_gather3div8si:
4243  case X86::BI__builtin_ia32_gather3siv2df:
4244  case X86::BI__builtin_ia32_gather3siv2di:
4245  case X86::BI__builtin_ia32_gather3siv4df:
4246  case X86::BI__builtin_ia32_gather3siv4di:
4247  case X86::BI__builtin_ia32_gather3siv4sf:
4248  case X86::BI__builtin_ia32_gather3siv4si:
4249  case X86::BI__builtin_ia32_gather3siv8sf:
4250  case X86::BI__builtin_ia32_gather3siv8si:
4251  case X86::BI__builtin_ia32_gathersiv8df:
4252  case X86::BI__builtin_ia32_gathersiv16sf:
4253  case X86::BI__builtin_ia32_gatherdiv8df:
4254  case X86::BI__builtin_ia32_gatherdiv16sf:
4255  case X86::BI__builtin_ia32_gathersiv8di:
4256  case X86::BI__builtin_ia32_gathersiv16si:
4257  case X86::BI__builtin_ia32_gatherdiv8di:
4258  case X86::BI__builtin_ia32_gatherdiv16si:
4259  case X86::BI__builtin_ia32_scatterdiv2df:
4260  case X86::BI__builtin_ia32_scatterdiv2di:
4261  case X86::BI__builtin_ia32_scatterdiv4df:
4262  case X86::BI__builtin_ia32_scatterdiv4di:
4263  case X86::BI__builtin_ia32_scatterdiv4sf:
4264  case X86::BI__builtin_ia32_scatterdiv4si:
4265  case X86::BI__builtin_ia32_scatterdiv8sf:
4266  case X86::BI__builtin_ia32_scatterdiv8si:
4267  case X86::BI__builtin_ia32_scattersiv2df:
4268  case X86::BI__builtin_ia32_scattersiv2di:
4269  case X86::BI__builtin_ia32_scattersiv4df:
4270  case X86::BI__builtin_ia32_scattersiv4di:
4271  case X86::BI__builtin_ia32_scattersiv4sf:
4272  case X86::BI__builtin_ia32_scattersiv4si:
4273  case X86::BI__builtin_ia32_scattersiv8sf:
4274  case X86::BI__builtin_ia32_scattersiv8si:
4275  case X86::BI__builtin_ia32_scattersiv8df:
4276  case X86::BI__builtin_ia32_scattersiv16sf:
4277  case X86::BI__builtin_ia32_scatterdiv8df:
4278  case X86::BI__builtin_ia32_scatterdiv16sf:
4279  case X86::BI__builtin_ia32_scattersiv8di:
4280  case X86::BI__builtin_ia32_scattersiv16si:
4281  case X86::BI__builtin_ia32_scatterdiv8di:
4282  case X86::BI__builtin_ia32_scatterdiv16si:
4283  ArgNum = 4;
4284  break;
4285  }
4286 
4287  llvm::APSInt Result;
4288 
4289  // We can't check the value of a dependent argument.
4290  Expr *Arg = TheCall->getArg(ArgNum);
4291  if (Arg->isTypeDependent() || Arg->isValueDependent())
4292  return false;
4293 
4294  // Check constant-ness first.
4295  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4296  return true;
4297 
4298  if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
4299  return false;
4300 
4301  return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
4302  << Arg->getSourceRange();
4303 }
4304 
4305 enum { TileRegLow = 0, TileRegHigh = 7 };
4306 
4307 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
4308  ArrayRef<int> ArgNums) {
4309  for (int ArgNum : ArgNums) {
4310  if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
4311  return true;
4312  }
4313  return false;
4314 }
4315 
4316 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
4317  ArrayRef<int> ArgNums) {
4318  // Because the max number of tile register is TileRegHigh + 1, so here we use
4319  // each bit to represent the usage of them in bitset.
4320  std::bitset<TileRegHigh + 1> ArgValues;
4321  for (int ArgNum : ArgNums) {
4322  Expr *Arg = TheCall->getArg(ArgNum);
4323  if (Arg->isTypeDependent() || Arg->isValueDependent())
4324  continue;
4325 
4326  llvm::APSInt Result;
4327  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4328  return true;
4329  int ArgExtValue = Result.getExtValue();
4330  assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&
4331  "Incorrect tile register num.");
4332  if (ArgValues.test(ArgExtValue))
4333  return Diag(TheCall->getBeginLoc(),
4334  diag::err_x86_builtin_tile_arg_duplicate)
4335  << TheCall->getArg(ArgNum)->getSourceRange();
4336  ArgValues.set(ArgExtValue);
4337  }
4338  return false;
4339 }
4340 
4341 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
4342  ArrayRef<int> ArgNums) {
4343  return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
4344  CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
4345 }
4346 
4347 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
4348  switch (BuiltinID) {
4349  default:
4350  return false;
4351  case X86::BI__builtin_ia32_tileloadd64:
4352  case X86::BI__builtin_ia32_tileloaddt164:
4353  case X86::BI__builtin_ia32_tilestored64:
4354  case X86::BI__builtin_ia32_tilezero:
4355  return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
4356  case X86::BI__builtin_ia32_tdpbssd:
4357  case X86::BI__builtin_ia32_tdpbsud:
4358  case X86::BI__builtin_ia32_tdpbusd:
4359  case X86::BI__builtin_ia32_tdpbuud:
4360  case X86::BI__builtin_ia32_tdpbf16ps:
4361  return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
4362  }
4363 }
4364 static bool isX86_32Builtin(unsigned BuiltinID) {
4365  // These builtins only work on x86-32 targets.
4366  switch (BuiltinID) {
4367  case X86::BI__builtin_ia32_readeflags_u32:
4368  case X86::BI__builtin_ia32_writeeflags_u32:
4369  return true;
4370  }
4371 
4372  return false;
4373 }
4374 
4375 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
4376  CallExpr *TheCall) {
4377  if (BuiltinID == X86::BI__builtin_cpu_supports)
4378  return SemaBuiltinCpuSupports(*this, TI, TheCall);
4379 
4380  if (BuiltinID == X86::BI__builtin_cpu_is)
4381  return SemaBuiltinCpuIs(*this, TI, TheCall);
4382 
4383  // Check for 32-bit only builtins on a 64-bit target.
4384  const llvm::Triple &TT = TI.getTriple();
4385  if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
4386  return Diag(TheCall->getCallee()->getBeginLoc(),
4387  diag::err_32_bit_builtin_64_bit_tgt);
4388 
4389  // If the intrinsic has rounding or SAE make sure its valid.
4390  if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
4391  return true;
4392 
4393  // If the intrinsic has a gather/scatter scale immediate make sure its valid.
4394  if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
4395  return true;
4396 
4397  // If the intrinsic has a tile arguments, make sure they are valid.
4398  if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
4399  return true;
4400 
4401  // For intrinsics which take an immediate value as part of the instruction,
4402  // range check them here.
4403  int i = 0, l = 0, u = 0;
4404  switch (BuiltinID) {
4405  default:
4406  return false;
4407  case X86::BI__builtin_ia32_vec_ext_v2si:
4408  case X86::BI__builtin_ia32_vec_ext_v2di:
4409  case X86::BI__builtin_ia32_vextractf128_pd256:
4410  case X86::BI__builtin_ia32_vextractf128_ps256:
4411  case X86::BI__builtin_ia32_vextractf128_si256:
4412  case X86::BI__builtin_ia32_extract128i256:
4413  case X86::BI__builtin_ia32_extractf64x4_mask:
4414  case X86::BI__builtin_ia32_extracti64x4_mask:
4415  case X86::BI__builtin_ia32_extractf32x8_mask:
4416  case X86::BI__builtin_ia32_extracti32x8_mask:
4417  case X86::BI__builtin_ia32_extractf64x2_256_mask:
4418  case X86::BI__builtin_ia32_extracti64x2_256_mask:
4419  case X86::BI__builtin_ia32_extractf32x4_256_mask:
4420  case X86::BI__builtin_ia32_extracti32x4_256_mask:
4421  i = 1; l = 0; u = 1;
4422  break;
4423  case X86::BI__builtin_ia32_vec_set_v2di:
4424  case X86::BI__builtin_ia32_vinsertf128_pd256:
4425  case X86::BI__builtin_ia32_vinsertf128_ps256:
4426  case X86::BI__builtin_ia32_vinsertf128_si256:
4427  case X86::BI__builtin_ia32_insert128i256:
4428  case X86::BI__builtin_ia32_insertf32x8:
4429  case X86::BI__builtin_ia32_inserti32x8:
4430  case X86::BI__builtin_ia32_insertf64x4:
4431  case X86::BI__builtin_ia32_inserti64x4:
4432  case X86::BI__builtin_ia32_insertf64x2_256:
4433  case X86::BI__builtin_ia32_inserti64x2_256:
4434  case X86::BI__builtin_ia32_insertf32x4_256:
4435  case X86::BI__builtin_ia32_inserti32x4_256:
4436  i = 2; l = 0; u = 1;
4437  break;
4438  case X86::BI__builtin_ia32_vpermilpd:
4439  case X86::BI__builtin_ia32_vec_ext_v4hi:
4440  case X86::BI__builtin_ia32_vec_ext_v4si:
4441  case X86::BI__builtin_ia32_vec_ext_v4sf:
4442  case X86::BI__builtin_ia32_vec_ext_v4di:
4443  case X86::BI__builtin_ia32_extractf32x4_mask:
4444  case X86::BI__builtin_ia32_extracti32x4_mask:
4445  case X86::BI__builtin_ia32_extractf64x2_512_mask:
4446  case X86::BI__builtin_ia32_extracti64x2_512_mask:
4447  i = 1; l = 0; u = 3;
4448  break;
4449  case X86::BI_mm_prefetch:
4450  case X86::BI__builtin_ia32_vec_ext_v8hi:
4451  case X86::BI__builtin_ia32_vec_ext_v8si:
4452  i = 1; l = 0; u = 7;
4453  break;
4454  case X86::BI__builtin_ia32_sha1rnds4:
4455  case X86::BI__builtin_ia32_blendpd:
4456  case X86::BI__builtin_ia32_shufpd:
4457  case X86::BI__builtin_ia32_vec_set_v4hi:
4458  case X86::BI__builtin_ia32_vec_set_v4si:
4459  case X86::BI__builtin_ia32_vec_set_v4di:
4460  case X86::BI__builtin_ia32_shuf_f32x4_256:
4461  case X86::BI__builtin_ia32_shuf_f64x2_256:
4462  case X86::BI__builtin_ia32_shuf_i32x4_256:
4463  case X86::BI__builtin_ia32_shuf_i64x2_256:
4464  case X86::BI__builtin_ia32_insertf64x2_512:
4465  case X86::BI__builtin_ia32_inserti64x2_512:
4466  case X86::BI__builtin_ia32_insertf32x4:
4467  case X86::BI__builtin_ia32_inserti32x4:
4468  i = 2; l = 0; u = 3;
4469  break;
4470  case X86::BI__builtin_ia32_vpermil2pd:
4471  case X86::BI__builtin_ia32_vpermil2pd256:
4472  case X86::BI__builtin_ia32_vpermil2ps:
4473  case X86::BI__builtin_ia32_vpermil2ps256:
4474  i = 3; l = 0; u = 3;
4475  break;
4476  case X86::BI__builtin_ia32_cmpb128_mask:
4477  case X86::BI__builtin_ia32_cmpw128_mask:
4478  case X86::BI__builtin_ia32_cmpd128_mask:
4479  case X86::BI__builtin_ia32_cmpq128_mask:
4480  case X86::BI__builtin_ia32_cmpb256_mask:
4481  case X86::BI__builtin_ia32_cmpw256_mask:
4482  case X86::BI__builtin_ia32_cmpd256_mask:
4483  case X86::BI__builtin_ia32_cmpq256_mask:
4484  case X86::BI__builtin_ia32_cmpb512_mask:
4485  case X86::BI__builtin_ia32_cmpw512_mask:
4486  case X86::BI__builtin_ia32_cmpd512_mask:
4487  case X86::BI__builtin_ia32_cmpq512_mask:
4488  case X86::BI__builtin_ia32_ucmpb128_mask:
4489  case X86::BI__builtin_ia32_ucmpw128_mask:
4490  case X86::BI__builtin_ia32_ucmpd128_mask:
4491  case X86::BI__builtin_ia32_ucmpq128_mask:
4492  case X86::BI__builtin_ia32_ucmpb256_mask:
4493  case X86::BI__builtin_ia32_ucmpw256_mask:
4494  case X86::BI__builtin_ia32_ucmpd256_mask:
4495  case X86::BI__builtin_ia32_ucmpq256_mask:
4496  case X86::BI__builtin_ia32_ucmpb512_mask:
4497  case X86::BI__builtin_ia32_ucmpw512_mask:
4498  case X86::BI__builtin_ia32_ucmpd512_mask:
4499  case X86::BI__builtin_ia32_ucmpq512_mask:
4500  case X86::BI__builtin_ia32_vpcomub:
4501  case X86::BI__builtin_ia32_vpcomuw:
4502  case X86::BI__builtin_ia32_vpcomud:
4503  case X86::BI__builtin_ia32_vpcomuq:
4504  case X86::BI__builtin_ia32_vpcomb:
4505  case X86::BI__builtin_ia32_vpcomw:
4506  case X86::BI__builtin_ia32_vpcomd:
4507  case X86::BI__builtin_ia32_vpcomq:
4508  case X86::BI__builtin_ia32_vec_set_v8hi:
4509  case X86::BI__builtin_ia32_vec_set_v8si:
4510  i = 2; l = 0; u = 7;
4511  break;
4512  case X86::BI__builtin_ia32_vpermilpd256:
4513  case X86::BI__builtin_ia32_roundps:
4514  case X86::BI__builtin_ia32_roundpd:
4515  case X86::BI__builtin_ia32_roundps256:
4516  case X86::BI__builtin_ia32_roundpd256:
4517  case X86::BI__builtin_ia32_getmantpd128_mask:
4518  case X86::BI__builtin_ia32_getmantpd256_mask:
4519  case X86::BI__builtin_ia32_getmantps128_mask:
4520  case X86::BI__builtin_ia32_getmantps256_mask:
4521  case X86::BI__builtin_ia32_getmantpd512_mask:
4522  case X86::BI__builtin_ia32_getmantps512_mask:
4523  case X86::BI__builtin_ia32_getmantph128_mask:
4524  case X86::BI__builtin_ia32_getmantph256_mask:
4525  case X86::BI__builtin_ia32_getmantph512_mask:
4526  case X86::BI__builtin_ia32_vec_ext_v16qi:
4527  case X86::BI__builtin_ia32_vec_ext_v16hi:
4528  i = 1; l = 0; u = 15;
4529  break;
4530  case X86::BI__builtin_ia32_pblendd128:
4531  case X86::BI__builtin_ia32_blendps:
4532  case X86::BI__builtin_ia32_blendpd256:
4533  case X86::BI__builtin_ia32_shufpd256:
4534  case X86::BI__builtin_ia32_roundss:
4535  case X86::BI__builtin_ia32_roundsd:
4536  case X86::BI__builtin_ia32_rangepd128_mask:
4537  case X86::BI__builtin_ia32_rangepd256_mask:
4538  case X86::BI__builtin_ia32_rangepd512_mask:
4539  case X86::BI__builtin_ia32_rangeps128_mask:
4540  case X86::BI__builtin_ia32_rangeps256_mask:
4541  case X86::BI__builtin_ia32_rangeps512_mask:
4542  case X86::BI__builtin_ia32_getmantsd_round_mask:
4543  case X86::BI__builtin_ia32_getmantss_round_mask:
4544  case X86::BI__builtin_ia32_getmantsh_round_mask:
4545  case X86::BI__builtin_ia32_vec_set_v16qi:
4546  case X86::BI__builtin_ia32_vec_set_v16hi:
4547  i = 2; l = 0; u = 15;
4548  break;
4549  case X86::BI__builtin_ia32_vec_ext_v32qi:
4550  i = 1; l = 0; u = 31;
4551  break;
4552  case X86::BI__builtin_ia32_cmpps:
4553  case X86::BI__builtin_ia32_cmpss:
4554  case X86::BI__builtin_ia32_cmppd:
4555  case X86::BI__builtin_ia32_cmpsd:
4556  case X86::BI__builtin_ia32_cmpps256:
4557  case X86::BI__builtin_ia32_cmppd256:
4558  case X86::BI__builtin_ia32_cmpps128_mask:
4559  case X86::BI__builtin_ia32_cmppd128_mask:
4560  case X86::BI__builtin_ia32_cmpps256_mask:
4561  case X86::BI__builtin_ia32_cmppd256_mask:
4562  case X86::BI__builtin_ia32_cmpps512_mask:
4563  case X86::BI__builtin_ia32_cmppd512_mask:
4564  case X86::BI__builtin_ia32_cmpsd_mask:
4565  case X86::BI__builtin_ia32_cmpss_mask:
4566  case X86::BI__builtin_ia32_vec_set_v32qi:
4567  i = 2; l = 0; u = 31;
4568  break;
4569  case X86::BI__builtin_ia32_permdf256:
4570  case X86::BI__builtin_ia32_permdi256:
4571  case X86::BI__builtin_ia32_permdf512:
4572  case X86::BI__builtin_ia32_permdi512:
4573  case X86::BI__builtin_ia32_vpermilps:
4574  case X86::BI__builtin_ia32_vpermilps256:
4575  case X86::BI__builtin_ia32_vpermilpd512:
4576  case X86::BI__builtin_ia32_vpermilps512:
4577  case X86::BI__builtin_ia32_pshufd:
4578  case X86::BI__builtin_ia32_pshufd256:
4579  case X86::BI__builtin_ia32_pshufd512:
4580  case X86::BI__builtin_ia32_pshufhw:
4581  case X86::BI__builtin_ia32_pshufhw256:
4582  case X86::BI__builtin_ia32_pshufhw512:
4583  case X86::BI__builtin_ia32_pshuflw:
4584  case X86::BI__builtin_ia32_pshuflw256:
4585  case X86::BI__builtin_ia32_pshuflw512:
4586  case X86::BI__builtin_ia32_vcvtps2ph:
4587  case X86::BI__builtin_ia32_vcvtps2ph_mask:
4588  case X86::BI__builtin_ia32_vcvtps2ph256:
4589  case X86::BI__builtin_ia32_vcvtps2ph256_mask:
4590  case X86::BI__builtin_ia32_vcvtps2ph512_mask:
4591  case X86::BI__builtin_ia32_rndscaleps_128_mask:
4592  case X86::BI__builtin_ia32_rndscalepd_128_mask:
4593  case X86::BI__builtin_ia32_rndscaleps_256_mask:
4594  case X86::BI__builtin_ia32_rndscalepd_256_mask:
4595  case X86::BI__builtin_ia32_rndscaleps_mask:
4596  case X86::BI__builtin_ia32_rndscalepd_mask:
4597  case X86::BI__builtin_ia32_rndscaleph_mask:
4598  case X86::BI__builtin_ia32_reducepd128_mask:
4599  case X86::BI__builtin_ia32_reducepd256_mask:
4600  case X86::BI__builtin_ia32_reducepd512_mask:
4601  case X86::BI__builtin_ia32_reduceps128_mask:
4602  case X86::BI__builtin_ia32_reduceps256_mask:
4603  case X86::BI__builtin_ia32_reduceps512_mask:
4604  case X86::BI__builtin_ia32_reduceph128_mask:
4605  case X86::BI__builtin_ia32_reduceph256_mask:
4606  case X86::BI__builtin_ia32_reduceph512_mask:
4607  case X86::BI__builtin_ia32_prold512:
4608  case X86::BI__builtin_ia32_prolq512:
4609  case X86::BI__builtin_ia32_prold128:
4610  case X86::BI__builtin_ia32_prold256:
4611  case X86::BI__builtin_ia32_prolq128:
4612  case X86::BI__builtin_ia32_prolq256:
4613  case X86::BI__builtin_ia32_prord512:
4614  case X86::BI__builtin_ia32_prorq512:
4615  case X86::BI__builtin_ia32_prord128:
4616  case X86::BI__builtin_ia32_prord256:
4617  case X86::BI__builtin_ia32_prorq128:
4618  case X86::BI__builtin_ia32_prorq256:
4619  case X86::BI__builtin_ia32_fpclasspd128_mask:
4620  case X86::BI__builtin_ia32_fpclasspd256_mask:
4621  case X86::BI__builtin_ia32_fpclassps128_mask:
4622  case X86::BI__builtin_ia32_fpclassps256_mask:
4623  case X86::BI__builtin_ia32_fpclassps512_mask:
4624  case X86::BI__builtin_ia32_fpclasspd512_mask:
4625  case X86::BI__builtin_ia32_fpclassph128_mask:
4626  case X86::BI__builtin_ia32_fpclassph256_mask:
4627  case X86::BI__builtin_ia32_fpclassph512_mask:
4628  case X86::BI__builtin_ia32_fpclasssd_mask:
4629  case X86::BI__builtin_ia32_fpclassss_mask:
4630  case X86::BI__builtin_ia32_fpclasssh_mask:
4631  case X86::BI__builtin_ia32_pslldqi128_byteshift:
4632  case X86::BI__builtin_ia32_pslldqi256_byteshift:
4633  case X86::BI__builtin_ia32_pslldqi512_byteshift:
4634  case X86::BI__builtin_ia32_psrldqi128_byteshift:
4635  case X86::BI__builtin_ia32_psrldqi256_byteshift:
4636  case X86::BI__builtin_ia32_psrldqi512_byteshift:
4637  case X86::BI__builtin_ia32_kshiftliqi:
4638  case X86::BI__builtin_ia32_kshiftlihi:
4639  case X86::BI__builtin_ia32_kshiftlisi:
4640  case X86::BI__builtin_ia32_kshiftlidi:
4641  case X86::BI__builtin_ia32_kshiftriqi:
4642  case X86::BI__builtin_ia32_kshiftrihi:
4643  case X86::BI__builtin_ia32_kshiftrisi:
4644  case X86::BI__builtin_ia32_kshiftridi:
4645  i = 1; l = 0; u = 255;
4646  break;
4647  case X86::BI__builtin_ia32_vperm2f128_pd256:
4648  case X86::BI__builtin_ia32_vperm2f128_ps256:
4649  case X86::BI__builtin_ia32_vperm2f128_si256:
4650  case X86::BI__builtin_ia32_permti256:
4651  case X86::BI__builtin_ia32_pblendw128:
4652  case X86::BI__builtin_ia32_pblendw256:
4653  case X86::BI__builtin_ia32_blendps256:
4654  case X86::BI__builtin_ia32_pblendd256:
4655  case X86::BI__builtin_ia32_palignr128:
4656  case X86::BI__builtin_ia32_palignr256:
4657  case X86::BI__builtin_ia32_palignr512:
4658  case X86::BI__builtin_ia32_alignq512:
4659  case X86::BI__builtin_ia32_alignd512:
4660  case X86::BI__builtin_ia32_alignd128:
4661  case X86::BI__builtin_ia32_alignd256:
4662  case X86::BI__builtin_ia32_alignq128:
4663  case X86::BI__builtin_ia32_alignq256:
4664  case X86::BI__builtin_ia32_vcomisd:
4665  case X86::BI__builtin_ia32_vcomiss:
4666  case X86::BI__builtin_ia32_shuf_f32x4:
4667  case X86::BI__builtin_ia32_shuf_f64x2:
4668  case X86::BI__builtin_ia32_shuf_i32x4:
4669  case X86::BI__builtin_ia32_shuf_i64x2:
4670  case X86::BI__builtin_ia32_shufpd512:
4671  case X86::BI__builtin_ia32_shufps:
4672  case X86::BI__builtin_ia32_shufps256:
4673  case X86::BI__builtin_ia32_shufps512:
4674  case X86::BI__builtin_ia32_dbpsadbw128:
4675  case X86::BI__builtin_ia32_dbpsadbw256:
4676  case X86::BI__builtin_ia32_dbpsadbw512:
4677  case X86::BI__builtin_ia32_vpshldd128:
4678  case X86::BI__builtin_ia32_vpshldd256:
4679  case X86::BI__builtin_ia32_vpshldd512:
4680  case X86::BI__builtin_ia32_vpshldq128:
4681  case X86::BI__builtin_ia32_vpshldq256:
4682  case X86::BI__builtin_ia32_vpshldq512:
4683  case X86::BI__builtin_ia32_vpshldw128:
4684  case X86::BI__builtin_ia32_vpshldw256:
4685  case X86::BI__builtin_ia32_vpshldw512:
4686  case X86::BI__builtin_ia32_vpshrdd128:
4687  case X86::BI__builtin_ia32_vpshrdd256:
4688  case X86::BI__builtin_ia32_vpshrdd512:
4689  case X86::BI__builtin_ia32_vpshrdq128:
4690  case X86::BI__builtin_ia32_vpshrdq256:
4691  case X86::BI__builtin_ia32_vpshrdq512:
4692  case X86::BI__builtin_ia32_vpshrdw128:
4693  case X86::BI__builtin_ia32_vpshrdw256:
4694  case X86::BI__builtin_ia32_vpshrdw512:
4695  i = 2; l = 0; u = 255;
4696  break;
4697  case X86::BI__builtin_ia32_fixupimmpd512_mask:
4698  case X86::BI__builtin_ia32_fixupimmpd512_maskz:
4699  case X86::BI__builtin_ia32_fixupimmps512_mask:
4700  case X86::BI__builtin_ia32_fixupimmps512_maskz:
4701  case X86::BI__builtin_ia32_fixupimmsd_mask:
4702  case X86::BI__builtin_ia32_fixupimmsd_maskz:
4703  case X86::BI__builtin_ia32_fixupimmss_mask:
4704  case X86::BI__builtin_ia32_fixupimmss_maskz:
4705  case X86::BI__builtin_ia32_fixupimmpd128_mask:
4706  case X86::BI__builtin_ia32_fixupimmpd128_maskz:
4707  case X86::BI__builtin_ia32_fixupimmpd256_mask:
4708  case X86::BI__builtin_ia32_fixupimmpd256_maskz:
4709  case X86::BI__builtin_ia32_fixupimmps128_mask:
4710  case X86::BI__builtin_ia32_fixupimmps128_maskz:
4711  case X86::BI__builtin_ia32_fixupimmps256_mask:
4712  case X86::BI__builtin_ia32_fixupimmps256_maskz:
4713  case X86::BI__builtin_ia32_pternlogd512_mask:
4714  case X86::BI__builtin_ia32_pternlogd512_maskz:
4715  case X86::BI__builtin_ia32_pternlogq512_mask:
4716  case X86::BI__builtin_ia32_pternlogq512_maskz:
4717  case X86::BI__builtin_ia32_pternlogd128_mask:
4718  case X86::BI__builtin_ia32_pternlogd128_maskz:
4719  case X86::BI__builtin_ia32_pternlogd256_mask:
4720  case X86::BI__builtin_ia32_pternlogd256_maskz:
4721  case X86::BI__builtin_ia32_pternlogq128_mask:
4722  case X86::BI__builtin_ia32_pternlogq128_maskz:
4723  case X86::BI__builtin_ia32_pternlogq256_mask:
4724  case X86::BI__builtin_ia32_pternlogq256_maskz:
4725  i = 3; l = 0; u = 255;
4726  break;
4727  case X86::BI__builtin_ia32_gatherpfdpd:
4728  case X86::BI__builtin_ia32_gatherpfdps:
4729  case X86::BI__builtin_ia32_gatherpfqpd:
4730  case X86::BI__builtin_ia32_gatherpfqps:
4731  case X86::BI__builtin_ia32_scatterpfdpd:
4732  case X86::BI__builtin_ia32_scatterpfdps:
4733  case X86::BI__builtin_ia32_scatterpfqpd:
4734  case X86::BI__builtin_ia32_scatterpfqps:
4735  i = 4; l = 2; u = 3;
4736  break;
4737  case X86::BI__builtin_ia32_reducesd_mask:
4738  case X86::BI__builtin_ia32_reducess_mask:
4739  case X86::BI__builtin_ia32_rndscalesd_round_mask:
4740  case X86::BI__builtin_ia32_rndscaless_round_mask:
4741  case X86::BI__builtin_ia32_rndscalesh_round_mask:
4742  case X86::BI__builtin_ia32_reducesh_mask:
4743  i = 4; l = 0; u = 255;
4744  break;
4745  }
4746 
4747  // Note that we don't force a hard error on the range check here, allowing
4748  // template-generated or macro-generated dead code to potentially have out-of-
4749  // range values. These need to code generate, but don't need to necessarily
4750  // make any sense. We use a warning that defaults to an error.
4751  return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
4752 }
4753 
4754 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
4755 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
4756 /// Returns true when the format fits the function and the FormatStringInfo has
4757 /// been populated.
4758 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
4759  FormatStringInfo *FSI) {
4760  FSI->HasVAListArg = Format->getFirstArg() == 0;
4761  FSI->FormatIdx = Format->getFormatIdx() - 1;
4762  FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
4763 
4764  // The way the format attribute works in GCC, the implicit this argument
4765  // of member functions is counted. However, it doesn't appear in our own
4766  // lists, so decrement format_idx in that case.
4767  if (IsCXXMember) {
4768  if(FSI->FormatIdx == 0)
4769  return false;
4770  --FSI->FormatIdx;
4771  if (FSI->FirstDataArg != 0)
4772  --FSI->FirstDataArg;
4773  }
4774  return true;
4775 }
4776 
4777 /// Checks if a the given expression evaluates to null.
4778 ///
4779 /// Returns true if the value evaluates to null.
4780 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
4781  // If the expression has non-null type, it doesn't evaluate to null.
4782  if (auto nullability
4784  if (*nullability == NullabilityKind::NonNull)
4785  return false;
4786  }
4787 
4788  // As a special case, transparent unions initialized with zero are
4789  // considered null for the purposes of the nonnull attribute.
4790  if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
4791  if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
4792  if (const CompoundLiteralExpr *CLE =
4793  dyn_cast<CompoundLiteralExpr>(Expr))
4794  if (const InitListExpr *ILE =
4795  dyn_cast<InitListExpr>(CLE->getInitializer()))
4796  Expr = ILE->getInit(0);
4797  }
4798 
4799  bool Result;
4800  return (!Expr->isValueDependent() &&
4801  Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
4802  !Result);
4803 }
4804 
4806  const Expr *ArgExpr,
4807  SourceLocation CallSiteLoc) {
4808  if (CheckNonNullExpr(S, ArgExpr))
4809  S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
4810  S.PDiag(diag::warn_null_arg)
4811  << ArgExpr->getSourceRange());
4812 }
4813 
4814 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
4815  FormatStringInfo FSI;
4816  if ((GetFormatStringType(Format) == FST_NSString) &&
4817  getFormatStringInfo(Format, false, &FSI)) {
4818  Idx = FSI.FormatIdx;
4819  return true;
4820  }
4821  return false;
4822 }
4823 
4824 /// Diagnose use of %s directive in an NSString which is being passed
4825 /// as formatting string to formatting method.
4826 static void
4828  const NamedDecl *FDecl,
4829  Expr **Args,
4830  unsigned NumArgs) {
4831  unsigned Idx = 0;
4832  bool Format = false;
4834  if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
4835  Idx = 2;
4836  Format = true;
4837  }
4838  else
4839  for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4840  if (S.GetFormatNSStringIdx(I, Idx)) {
4841  Format = true;
4842  break;
4843  }
4844  }
4845  if (!Format || NumArgs <= Idx)
4846  return;
4847  const Expr *FormatExpr = Args[Idx];
4848  if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
4849  FormatExpr = CSCE->getSubExpr();
4850  const StringLiteral *FormatString;
4851  if (const ObjCStringLiteral *OSL =
4852  dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
4853  FormatString = OSL->getString();
4854  else
4855  FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
4856  if (!FormatString)
4857  return;
4858  if (S.FormatStringHasSArg(FormatString)) {
4859  S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
4860  << "%s" << 1 << 1;
4861  S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
4862  << FDecl->getDeclName();
4863  }
4864 }
4865 
4866 /// Determine whether the given type has a non-null nullability annotation.
4868  if (auto nullability = type->getNullability(ctx))
4869  return *nullability == NullabilityKind::NonNull;
4870 
4871  return false;
4872 }
4873 
4875  const NamedDecl *FDecl,
4876  const FunctionProtoType *Proto,
4878  SourceLocation CallSiteLoc) {
4879  assert((FDecl || Proto) && "Need a function declaration or prototype");
4880 
4881  // Already checked by by constant evaluator.
4882  if (S.isConstantEvaluated())
4883  return;
4884  // Check the attributes attached to the method/function itself.
4885  llvm::SmallBitVector NonNullArgs;
4886  if (FDecl) {
4887  // Handle the nonnull attribute on the function/method declaration itself.
4888  for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
4889  if (!NonNull->args_size()) {
4890  // Easy case: all pointer arguments are nonnull.
4891  for (const auto *Arg : Args)
4892  if (S.isValidPointerAttrType(Arg->getType()))
4893  CheckNonNullArgument(S, Arg, CallSiteLoc);
4894  return;
4895  }
4896 
4897  for (const ParamIdx &Idx : NonNull->args()) {
4898  unsigned IdxAST = Idx.getASTIndex();
4899  if (IdxAST >= Args.size())
4900  continue;
4901  if (NonNullArgs.empty())
4902  NonNullArgs.resize(Args.size());
4903  NonNullArgs.set(IdxAST);
4904  }
4905  }
4906  }
4907 
4908  if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
4909  // Handle the nonnull attribute on the parameters of the
4910  // function/method.
4911  ArrayRef<ParmVarDecl*> parms;
4912  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
4913  parms = FD->parameters();
4914  else
4915  parms = cast<ObjCMethodDecl>(FDecl)->parameters();
4916 
4917  unsigned ParamIndex = 0;
4918  for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
4919  I != E; ++I, ++ParamIndex) {
4920  const ParmVarDecl *PVD = *I;
4921  if (PVD->hasAttr<NonNullAttr>() ||
4922  isNonNullType(S.Context, PVD->getType())) {
4923  if (NonNullArgs.empty())
4924  NonNullArgs.resize(Args.size());
4925 
4926  NonNullArgs.set(ParamIndex);
4927  }
4928  }
4929  } else {
4930  // If we have a non-function, non-method declaration but no
4931  // function prototype, try to dig out the function prototype.
4932  if (!Proto) {
4933  if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
4934  QualType type = VD->getType().getNonReferenceType();
4935  if (auto pointerType = type->getAs<PointerType>())
4936  type = pointerType->getPointeeType();
4937  else if (auto blockType = type->getAs<BlockPointerType>())
4938  type = blockType->getPointeeType();
4939  // FIXME: data member pointers?
4940 
4941  // Dig out the function prototype, if there is one.
4942  Proto = type->getAs<FunctionProtoType>();
4943  }
4944  }
4945 
4946  // Fill in non-null argument information from the nullability
4947  // information on the parameter types (if we have them).
4948  if (Proto) {
4949  unsigned Index = 0;
4950  for (auto paramType : Proto->getParamTypes()) {
4951  if (isNonNullType(S.Context, paramType)) {
4952  if (NonNullArgs.empty())
4953  NonNullArgs.resize(Args.size());
4954 
4955  NonNullArgs.set(Index);
4956  }
4957 
4958  ++Index;
4959  }
4960  }
4961  }
4962 
4963  // Check for non-null arguments.
4964  for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
4965  ArgIndex != ArgIndexEnd; ++ArgIndex) {
4966  if (NonNullArgs[ArgIndex])
4967  CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
4968  }
4969 }
4970 
4971 /// Warn if a pointer or reference argument passed to a function points to an
4972 /// object that is less aligned than the parameter. This can happen when
4973 /// creating a typedef with a lower alignment than the original type and then
4974 /// calling functions defined in terms of the original type.
4975 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
4976  StringRef ParamName, QualType ArgTy,
4977  QualType ParamTy) {
4978 
4979  // If a function accepts a pointer or reference type
4980  if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
4981  return;
4982 
4983  // If the parameter is a pointer type, get the pointee type for the
4984  // argument too. If the parameter is a reference type, don't try to get
4985  // the pointee type for the argument.
4986  if (ParamTy->isPointerType())
4987  ArgTy = ArgTy->getPointeeType();
4988 
4989  // Remove reference or pointer
4990  ParamTy = ParamTy->getPointeeType();
4991 
4992  // Find expected alignment, and the actual alignment of the passed object.
4993  // getTypeAlignInChars requires complete types
4994  if (ArgTy.isNull() || ParamTy->isIncompleteType() ||
4995  ArgTy->isIncompleteType() || ParamTy->isUndeducedType() ||
4996  ArgTy->isUndeducedType())
4997  return;
4998 
4999  CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
5000  CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
5001 
5002  // If the argument is less aligned than the parameter, there is a
5003  // potential alignment issue.
5004  if (ArgAlign < ParamAlign)
5005  Diag(Loc, diag::warn_param_mismatched_alignment)
5006  << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
5007  << ParamName << FDecl;
5008 }
5009 
5010 /// Handles the checks for format strings, non-POD arguments to vararg
5011 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
5012 /// attributes.
5013 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
5014  const Expr *ThisArg, ArrayRef<const Expr *> Args,
5015  bool IsMemberFunction, SourceLocation Loc,
5016  SourceRange Range, VariadicCallType CallType) {
5017  // FIXME: We should check as much as we can in the template definition.
5018  if (CurContext->isDependentContext())
5019  return;
5020 
5021  // Printf and scanf checking.
5022  llvm::SmallBitVector CheckedVarArgs;
5023  if (FDecl) {
5024  for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
5025  // Only create vector if there are format attributes.
5026  CheckedVarArgs.resize(Args.size());
5027 
5028  CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
5029  CheckedVarArgs);
5030  }
5031  }
5032 
5033  // Refuse POD arguments that weren't caught by the format string
5034  // checks above.
5035  auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
5036  if (CallType != VariadicDoesNotApply &&
5037  (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
5038  unsigned NumParams = Proto ? Proto->getNumParams()
5039  : FDecl && isa<FunctionDecl>(FDecl)
5040  ? cast<FunctionDecl>(FDecl)->getNumParams()
5041  : FDecl && isa<ObjCMethodDecl>(FDecl)
5042  ? cast<ObjCMethodDecl>(FDecl)->param_size()
5043  : 0;
5044 
5045  for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
5046  // Args[ArgIdx] can be null in malformed code.
5047  if (const Expr *Arg = Args[ArgIdx]) {
5048  if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
5049  checkVariadicArgument(Arg, CallType);
5050  }
5051  }
5052  }
5053 
5054  if (FDecl || Proto) {
5055  CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
5056 
5057  // Type safety checking.
5058  if (FDecl) {
5059  for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
5060  CheckArgumentWithTypeTag(I, Args, Loc);
5061  }
5062  }
5063 
5064  // Check that passed arguments match the alignment of original arguments.
5065  // Try to get the missing prototype from the declaration.
5066  if (!Proto && FDecl) {
5067  const auto *FT = FDecl->getFunctionType();
5068  if (isa_and_nonnull<FunctionProtoType>(FT))
5069  Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
5070  }
5071  if (Proto) {
5072  // For variadic functions, we may have more args than parameters.
5073  // For some K&R functions, we may have less args than parameters.
5074  const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
5075  for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
5076  // Args[ArgIdx] can be null in malformed code.
5077  if (const Expr *Arg = Args[ArgIdx]) {
5078  if (Arg->containsErrors())
5079  continue;
5080 
5081  QualType ParamTy = Proto->getParamType(ArgIdx);
5082  QualType ArgTy = Arg->getType();
5083  CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
5084  ArgTy, ParamTy);
5085  }
5086  }
5087  }
5088 
5089  if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
5090  auto *AA = FDecl->getAttr<AllocAlignAttr>();
5091  const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
5092  if (!Arg->isValueDependent()) {
5093  Expr::EvalResult Align;
5094  if (Arg->EvaluateAsInt(Align, Context)) {
5095  const llvm::APSInt &I = Align.Val.getInt();
5096  if (!I.isPowerOf2())
5097  Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
5098  << Arg->getSourceRange();
5099 
5100  if (I > Sema::MaximumAlignment)
5101  Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
5102  << Arg->getSourceRange() << Sema::MaximumAlignment;
5103  }
5104  }
5105  }
5106 
5107  if (FD)
5108  diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
5109 }
5110 
5111 /// CheckConstructorCall - Check a constructor call for correctness and safety
5112 /// properties not enforced by the C type system.
5113 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
5115  const FunctionProtoType *Proto,
5116  SourceLocation Loc) {
5117  VariadicCallType CallType =
5118  Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
5119 
5120  auto *Ctor = cast<CXXConstructorDecl>(FDecl);
5121  CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType),
5122  Context.getPointerType(Ctor->getThisObjectType()));
5123 
5124  checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
5125  Loc, SourceRange(), CallType);
5126 }
5127 
5128 /// CheckFunctionCall - Check a direct function call for various correctness
5129 /// and safety properties not strictly enforced by the C type system.
5130 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
5131  const FunctionProtoType *Proto) {
5132  bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
5133  isa<CXXMethodDecl>(FDecl);
5134  bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
5135  IsMemberOperatorCall;
5136  VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
5137  TheCall->getCallee());
5138  Expr** Args = TheCall->getArgs();
5139  unsigned NumArgs = TheCall->getNumArgs();
5140 
5141  Expr *ImplicitThis = nullptr;
5142  if (IsMemberOperatorCall) {
5143  // If this is a call to a member operator, hide the first argument
5144  // from checkCall.
5145  // FIXME: Our choice of AST representation here is less than ideal.
5146  ImplicitThis = Args[0];
5147  ++Args;
5148  --NumArgs;
5149  } else if (IsMemberFunction)
5150  ImplicitThis =
5151  cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
5152 
5153  if (ImplicitThis) {
5154  // ImplicitThis may or may not be a pointer, depending on whether . or -> is
5155  // used.
5156  QualType ThisType = ImplicitThis->getType();
5157  if (!ThisType->isPointerType()) {
5158  assert(!ThisType->isReferenceType());
5159  ThisType = Context.getPointerType(ThisType);
5160  }
5161 
5162  QualType ThisTypeFromDecl =
5163  Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType());
5164 
5165  CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
5166  ThisTypeFromDecl);
5167  }
5168 
5169  checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
5170  IsMemberFunction, TheCall->getRParenLoc(),
5171  TheCall->getCallee()->getSourceRange(), CallType);
5172 
5173  IdentifierInfo *FnInfo = FDecl->getIdentifier();
5174  // None of the checks below are needed for functions that don't have
5175  // simple names (e.g., C++ conversion functions).
5176  if (!FnInfo)
5177  return false;
5178 
5179  CheckTCBEnforcement(TheCall, FDecl);
5180 
5181  CheckAbsoluteValueFunction(TheCall, FDecl);
5182  CheckMaxUnsignedZero(TheCall, FDecl);
5183 
5184  if (getLangOpts().ObjC)
5185  DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
5186 
5187  unsigned CMId = FDecl->getMemoryFunctionKind();
5188 
5189  // Handle memory setting and copying functions.
5190  switch (CMId) {
5191  case 0:
5192  return false;
5193  case Builtin::BIstrlcpy: // fallthrough
5194  case Builtin::BIstrlcat:
5195  CheckStrlcpycatArguments(TheCall, FnInfo);
5196  break;
5197  case Builtin::BIstrncat:
5198  CheckStrncatArguments(TheCall, FnInfo);
5199  break;
5200  case Builtin::BIfree:
5201  CheckFreeArguments(TheCall);
5202  break;
5203  default:
5204  CheckMemaccessArguments(TheCall, CMId, FnInfo);
5205  }
5206 
5207  return false;
5208 }
5209 
5210 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
5211  ArrayRef<const Expr *> Args) {
5212  VariadicCallType CallType =
5213  Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
5214 
5215  checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
5216  /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
5217  CallType);
5218 
5219  return false;
5220 }
5221 
5222 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
5223  const FunctionProtoType *Proto) {
5224  QualType Ty;
5225  if (const auto *V = dyn_cast<VarDecl>(NDecl))
5226  Ty = V->getType().getNonReferenceType();
5227  else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
5228  Ty = F->getType().getNonReferenceType();
5229  else
5230  return false;
5231 
5232  if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
5233  !Ty->isFunctionProtoType())
5234  return false;
5235 
5236  VariadicCallType CallType;
5237  if (!Proto || !Proto->isVariadic()) {
5238  CallType = VariadicDoesNotApply;
5239  } else if (Ty->isBlockPointerType()) {
5240  CallType = VariadicBlock;
5241  } else { // Ty->isFunctionPointerType()
5242  CallType = VariadicFunction;
5243  }
5244 
5245  checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
5246  llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
5247  /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
5248  TheCall->getCallee()->getSourceRange(), CallType);
5249 
5250  return false;
5251 }
5252 
5253 /// Checks function calls when a FunctionDecl or a NamedDecl is not available,
5254 /// such as function pointers returned from functions.
5255 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
5256  VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
5257  TheCall->getCallee());
5258  checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
5259  llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
5260  /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
5261  TheCall->getCallee()->getSourceRange(), CallType);
5262 
5263  return false;
5264 }
5265 
5266 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
5267  if (!llvm::isValidAtomicOrderingCABI(Ordering))
5268  return false;
5269 
5270  auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
5271  switch (Op) {
5272  case AtomicExpr::AO__c11_atomic_init:
5273  case AtomicExpr::AO__opencl_atomic_init:
5274  llvm_unreachable("There is no ordering argument for an init");
5275 
5276  case AtomicExpr::AO__c11_atomic_load:
5277  case AtomicExpr::AO__opencl_atomic_load:
5278  case AtomicExpr::AO__atomic_load_n:
5279  case AtomicExpr::AO__atomic_load:
5280  return OrderingCABI != llvm::AtomicOrderingCABI::release &&
5281  OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
5282 
5283  case AtomicExpr::AO__c11_atomic_store:
5284  case AtomicExpr::AO__opencl_atomic_store:
5285  case AtomicExpr::AO__atomic_store:
5286  case AtomicExpr::AO__atomic_store_n:
5287  return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
5288  OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
5289  OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
5290 
5291  default:
5292  return true;
5293  }
5294 }
5295 
5296 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
5297  AtomicExpr::AtomicOp Op) {
5298  CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
5299  DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5300  MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
5301  return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()},
5302  DRE->getSourceRange(), TheCall->getRParenLoc(), Args,
5303  Op);
5304 }
5305 
5307  SourceLocation RParenLoc, MultiExprArg Args,
5309  AtomicArgumentOrder ArgOrder) {
5310  // All the non-OpenCL operations take one of the following forms.
5311  // The OpenCL operations take the __c11 forms with one extra argument for
5312  // synchronization scope.
5313  enum {
5314  // C __c11_atomic_init(A *, C)
5315  Init,
5316 
5317  // C __c11_atomic_load(A *, int)
5318  Load,
5319 
5320  // void __atomic_load(A *, CP, int)
5321  LoadCopy,
5322 
5323  // void __atomic_store(A *, CP, int)
5324  Copy,
5325 
5326  // C __c11_atomic_add(A *, M, int)
5327  Arithmetic,
5328 
5329  // C __atomic_exchange_n(A *, CP, int)
5330  Xchg,
5331 
5332  // void __atomic_exchange(A *, C *, CP, int)
5333  GNUXchg,
5334 
5335  // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
5336  C11CmpXchg,
5337 
5338  // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
5339  GNUCmpXchg
5340  } Form = Init;
5341 
5342  const unsigned NumForm = GNUCmpXchg + 1;
5343  const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
5344  const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
5345  // where:
5346  // C is an appropriate type,
5347  // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
5348  // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
5349  // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
5350  // the int parameters are for orderings.
5351 
5352  static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
5353  && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
5354  "need to update code for modified forms");
5355  static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
5356  AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
5357  AtomicExpr::AO__atomic_load,
5358  "need to update code for modified C11 atomics");
5359  bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
5360  Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
5361  bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
5362  Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
5363  IsOpenCL;
5364  bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
5365  Op == AtomicExpr::AO__atomic_store_n ||
5366  Op == AtomicExpr::AO__atomic_exchange_n ||
5367  Op == AtomicExpr::AO__atomic_compare_exchange_n;
5368  bool IsAddSub = false;
5369 
5370  switch (Op) {
5371  case AtomicExpr::AO__c11_atomic_init:
5372  case AtomicExpr::AO__opencl_atomic_init:
5373  Form = Init;
5374  break;
5375 
5376  case AtomicExpr::AO__c11_atomic_load:
5377  case AtomicExpr::AO__opencl_atomic_load:
5378  case AtomicExpr::AO__atomic_load_n:
5379  Form = Load;
5380  break;
5381 
5382  case AtomicExpr::AO__atomic_load:
5383  Form = LoadCopy;
5384  break;
5385 
5386  case AtomicExpr::AO__c11_atomic_store:
5387  case AtomicExpr::AO__opencl_atomic_store:
5388  case AtomicExpr::AO__atomic_store:
5389  case AtomicExpr::AO__atomic_store_n:
5390  Form = Copy;
5391  break;
5392 
5393  case AtomicExpr::AO__c11_atomic_fetch_add:
5394  case AtomicExpr::AO__c11_atomic_fetch_sub:
5395  case AtomicExpr::AO__opencl_atomic_fetch_add:
5396  case AtomicExpr::AO__opencl_atomic_fetch_sub:
5397  case AtomicExpr::AO__atomic_fetch_add:
5398  case AtomicExpr::AO__atomic_fetch_sub:
5399  case AtomicExpr::AO__atomic_add_fetch:
5400  case AtomicExpr::AO__atomic_sub_fetch:
5401  IsAddSub = true;
5402  Form = Arithmetic;
5403  break;
5404  case AtomicExpr::AO__c11_atomic_fetch_and:
5405  case AtomicExpr::AO__c11_atomic_fetch_or:
5406  case AtomicExpr::AO__c11_atomic_fetch_xor:
5407  case AtomicExpr::AO__opencl_atomic_fetch_and:
5408  case AtomicExpr::AO__opencl_atomic_fetch_or:
5409  case AtomicExpr::AO__opencl_atomic_fetch_xor:
5410  case AtomicExpr::AO__atomic_fetch_and:
5411  case AtomicExpr::AO__atomic_fetch_or:
5412  case AtomicExpr::AO__atomic_fetch_xor:
5413  case AtomicExpr::AO__atomic_fetch_nand:
5414  case AtomicExpr::AO__atomic_and_fetch:
5415  case AtomicExpr::AO__atomic_or_fetch:
5416  case AtomicExpr::AO__atomic_xor_fetch:
5417  case AtomicExpr::AO__atomic_nand_fetch:
5418  Form = Arithmetic;
5419  break;
5420  case AtomicExpr::AO__c11_atomic_fetch_min:
5421  case AtomicExpr::AO__c11_atomic_fetch_max:
5422  case AtomicExpr::AO__opencl_atomic_fetch_min:
5423  case AtomicExpr::AO__opencl_atomic_fetch_max:
5424  case AtomicExpr::AO__atomic_min_fetch:
5425  case AtomicExpr::AO__atomic_max_fetch:
5426  case AtomicExpr::AO__atomic_fetch_min:
5427  case AtomicExpr::AO__atomic_fetch_max:
5428  Form = Arithmetic;
5429  break;
5430 
5431  case AtomicExpr::AO__c11_atomic_exchange:
5432  case AtomicExpr::AO__opencl_atomic_exchange:
5433  case AtomicExpr::AO__atomic_exchange_n:
5434  Form = Xchg;
5435  break;
5436 
5437  case AtomicExpr::AO__atomic_exchange:
5438  Form = GNUXchg;
5439  break;
5440 
5441  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
5442  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
5443  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
5444  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
5445  Form = C11CmpXchg;
5446  break;
5447 
5448  case AtomicExpr::AO__atomic_compare_exchange:
5449  case AtomicExpr::AO__atomic_compare_exchange_n:
5450  Form = GNUCmpXchg;
5451  break;
5452  }
5453 
5454  unsigned AdjustedNumArgs = NumArgs[Form];
5455  if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
5456  ++AdjustedNumArgs;
5457  // Check we have the right number of arguments.
5458  if (Args.size() < AdjustedNumArgs) {
5459  Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
5460  << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
5461  << ExprRange;
5462  return ExprError();
5463  } else if (Args.size() > AdjustedNumArgs) {
5464  Diag(Args[AdjustedNumArgs]->getBeginLoc(),
5465  diag::err_typecheck_call_too_many_args)
5466  << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
5467  << ExprRange;
5468  return ExprError();
5469  }
5470 
5471  // Inspect the first argument of the atomic operation.
5472  Expr *Ptr = Args[0];
5473  ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
5474  if (ConvertedPtr.isInvalid())
5475  return ExprError();
5476 
5477  Ptr = ConvertedPtr.get();
5478  const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
5479  if (!pointerType) {
5480  Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
5481  << Ptr->getType() << Ptr->getSourceRange();
5482  return ExprError();
5483  }
5484 
5485  // For a __c11 builtin, this should be a pointer to an _Atomic type.
5486  QualType AtomTy = pointerType->getPointeeType(); // 'A'
5487  QualType ValType = AtomTy; // 'C'
5488  if (IsC11) {
5489  if (!AtomTy->isAtomicType()) {
5490  Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
5491  << Ptr->getType() << Ptr->getSourceRange();
5492  return ExprError();
5493  }
5494  if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
5496  Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
5497  << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
5498  << Ptr->getSourceRange();
5499  return ExprError();
5500  }
5501  ValType = AtomTy->castAs<AtomicType>()->getValueType();
5502  } else if (Form != Load && Form != LoadCopy) {
5503  if (ValType.isConstQualified()) {
5504  Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
5505  << Ptr->getType() << Ptr->getSourceRange();
5506  return ExprError();
5507  }
5508  }
5509 
5510  // For an arithmetic operation, the implied arithmetic must be well-formed.
5511  if (Form == Arithmetic) {
5512  // gcc does not enforce these rules for GNU atomics, but we do so for
5513  // sanity.
5514  auto IsAllowedValueType = [&](QualType ValType) {
5515  if (ValType->isIntegerType())
5516  return true;
5517  if (ValType->isPointerType())
5518  return true;
5519  if (!ValType->isFloatingType())
5520  return false;
5521  // LLVM Parser does not allow atomicrmw with x86_fp80 type.
5522  if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
5523  &Context.getTargetInfo().getLongDoubleFormat() ==
5524  &llvm::APFloat::x87DoubleExtended())
5525  return false;
5526  return true;
5527  };
5528  if (IsAddSub && !IsAllowedValueType(ValType)) {
5529  Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp)
5530  << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5531  return ExprError();
5532  }
5533  if (!IsAddSub && !ValType->isIntegerType()) {
5534  Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int)
5535  << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5536  return ExprError();
5537  }
5538  if (IsC11 && ValType->isPointerType() &&
5539  RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
5540  diag::err_incomplete_type)) {
5541  return ExprError();
5542  }
5543  } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
5544  // For __atomic_*_n operations, the value type must be a scalar integral or
5545  // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
5546  Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
5547  << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5548  return ExprError();
5549  }
5550 
5551  if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
5552  !AtomTy->isScalarType()) {
5553  // For GNU atomics, require a trivially-copyable type. This is not part of
5554  // the GNU atomics specification, but we enforce it for sanity.
5555  Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
5556  << Ptr->getType() << Ptr->getSourceRange();
5557  return ExprError();
5558  }
5559 
5560  switch (ValType.getObjCLifetime()) {
5561  case Qualifiers::OCL_None:
5563  // okay
5564  break;
5565 
5566  case Qualifiers::OCL_Weak:
5569  // FIXME: Can this happen? By this point, ValType should be known
5570  // to be trivially copyable.
5571  Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
5572  << ValType << Ptr->getSourceRange();
5573  return ExprError();
5574  }
5575 
5576  // All atomic operations have an overload which takes a pointer to a volatile
5577  // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
5578  // into the result or the other operands. Similarly atomic_load takes a
5579  // pointer to a const 'A'.
5580  ValType.removeLocalVolatile();
5581  ValType.removeLocalConst();
5582  QualType ResultType = ValType;
5583  if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
5584  Form == Init)
5585  ResultType = Context.VoidTy;
5586  else if (Form == C11CmpXchg || Form == GNUCmpXchg)
5587  ResultType = Context.BoolTy;
5588 
5589  // The type of a parameter passed 'by value'. In the GNU atomics, such
5590  // arguments are actually passed as pointers.
5591  QualType ByValType = ValType; // 'CP'
5592  bool IsPassedByAddress = false;
5593  if (!IsC11 && !IsN) {
5594  ByValType = Ptr->getType();
5595  IsPassedByAddress = true;
5596  }
5597 
5598  SmallVector<Expr *, 5> APIOrderedArgs;
5599  if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
5600  APIOrderedArgs.push_back(Args[0]);
5601  switch (Form) {
5602  case Init:
5603  case Load:
5604  APIOrderedArgs.push_back(Args[1]); // Val1/Order
5605  break;
5606  case LoadCopy:
5607  case Copy:
5608  case Arithmetic:
5609  case Xchg:
5610  APIOrderedArgs.push_back(Args[2]); // Val1
5611  APIOrderedArgs.push_back(Args[1]); // Order
5612  break;
5613  case GNUXchg:
5614  APIOrderedArgs.push_back(Args[2]); // Val1
5615  APIOrderedArgs.push_back(Args[3]); // Val2
5616  APIOrderedArgs.push_back(Args[1]); // Order
5617  break;
5618  case C11CmpXchg:
5619  APIOrderedArgs.push_back(Args[2]); // Val1
5620  APIOrderedArgs.push_back(Args[4]); // Val2
5621  APIOrderedArgs.push_back(Args[1]); // Order
5622  APIOrderedArgs.push_back(Args[3]); // OrderFail
5623  break;
5624  case GNUCmpXchg:
5625  APIOrderedArgs.push_back(Args[2]); // Val1
5626  APIOrderedArgs.push_back(Args[4]); // Val2
5627  APIOrderedArgs.push_back(Args[5]); // Weak
5628  APIOrderedArgs.push_back(Args[1]); // Order
5629  APIOrderedArgs.push_back(Args[3]); // OrderFail
5630  break;
5631  }
5632  } else
5633  APIOrderedArgs.append(Args.begin(), Args.end());
5634 
5635  // The first argument's non-CV pointer type is used to deduce the type of
5636  // subsequent arguments, except for:
5637  // - weak flag (always converted to bool)
5638  // - memory order (always converted to int)
5639  // - scope (always converted to int)
5640  for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
5641  QualType Ty;
5642  if (i < NumVals[Form] + 1) {
5643  switch (i) {
5644  case 0:
5645  // The first argument is always a pointer. It has a fixed type.
5646  // It is always dereferenced, a nullptr is undefined.
5647  CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
5648  // Nothing else to do: we already know all we want about this pointer.
5649  continue;
5650  case 1:
5651  // The second argument is the non-atomic operand. For arithmetic, this
5652  // is always passed by value, and for a compare_exchange it is always
5653  // passed by address. For the rest, GNU uses by-address and C11 uses
5654  // by-value.
5655  assert(Form != Load);
5656  if (Form == Arithmetic && ValType->isPointerType())
5657  Ty = Context.getPointerDiffType();
5658  else if (Form == Init || Form == Arithmetic)
5659  Ty = ValType;
5660  else if (Form == Copy || Form == Xchg) {
5661  if (IsPassedByAddress) {
5662  // The value pointer is always dereferenced, a nullptr is undefined.
5663  CheckNonNullArgument(*this, APIOrderedArgs[i],
5664  ExprRange.getBegin());
5665  }
5666  Ty = ByValType;
5667  } else {
5668  Expr *ValArg = APIOrderedArgs[i];
5669  // The value pointer is always dereferenced, a nullptr is undefined.
5670  CheckNonNullArgument(*this, ValArg, ExprRange.getBegin());
5671  LangAS AS = LangAS::Default;
5672  // Keep address space of non-atomic pointer type.
5673  if (const PointerType *PtrTy =
5674  ValArg->getType()->getAs<PointerType>()) {
5675  AS = PtrTy->getPointeeType().getAddressSpace();
5676  }
5677  Ty = Context.getPointerType(
5678  Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
5679  }
5680  break;
5681  case 2:
5682  // The third argument to compare_exchange / GNU exchange is the desired
5683  // value, either by-value (for the C11 and *_n variant) or as a pointer.
5684  if (IsPassedByAddress)
5685  CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
5686  Ty = ByValType;
5687  break;
5688  case 3:
5689  // The fourth argument to GNU compare_exchange is a 'weak' flag.
5690  Ty = Context.BoolTy;
5691  break;
5692  }
5693  } else {
5694  // The order(s) and scope are always converted to int.
5695  Ty = Context.IntTy;
5696  }
5697 
5698  InitializedEntity Entity =
5699  InitializedEntity::InitializeParameter(Context, Ty, false);
5700  ExprResult Arg = APIOrderedArgs[i];
5701  Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5702  if (Arg.isInvalid())
5703  return true;
5704  APIOrderedArgs[i] = Arg.get();
5705  }
5706 
5707  // Permute the arguments into a 'consistent' order.
5708  SmallVector<Expr*, 5> SubExprs;
5709  SubExprs.push_back(Ptr);
5710  switch (Form) {
5711  case Init:
5712  // Note, AtomicExpr::getVal1() has a special case for this atomic.
5713  SubExprs.push_back(APIOrderedArgs[1]); // Val1
5714  break;
5715  case Load:
5716  SubExprs.push_back(APIOrderedArgs[1]); // Order
5717  break;
5718  case LoadCopy:
5719  case Copy:
5720  case Arithmetic:
5721  case Xchg:
5722  SubExprs.push_back(APIOrderedArgs[2]); // Order
5723  SubExprs.push_back(APIOrderedArgs[1]); // Val1
5724  break;
5725  case GNUXchg:
5726  // Note, AtomicExpr::getVal2() has a special case for this atomic.
5727  SubExprs.push_back(APIOrderedArgs[3]); // Order
5728  SubExprs.push_back(APIOrderedArgs[1]); // Val1
5729  SubExprs.push_back(APIOrderedArgs[2]); // Val2
5730  break;
5731  case C11CmpXchg:
5732  SubExprs.push_back(APIOrderedArgs[3]); // Order
5733  SubExprs.push_back(APIOrderedArgs[1]); // Val1
5734  SubExprs.push_back(APIOrderedArgs[4]); // OrderFail
5735  SubExprs.push_back(APIOrderedArgs[2]); // Val2
5736  break;
5737  case GNUCmpXchg:
5738  SubExprs.push_back(APIOrderedArgs[4]); // Order
5739  SubExprs.push_back(APIOrderedArgs[1]); // Val1
5740  SubExprs.push_back(APIOrderedArgs[5]); // OrderFail
5741  SubExprs.push_back(APIOrderedArgs[2]); // Val2
5742  SubExprs.push_back(APIOrderedArgs[3]); // Weak
5743  break;
5744  }
5745 
5746  if (SubExprs.size() >= 2 && Form != Init) {
5747  if (Optional<llvm::APSInt> Result =
5748  SubExprs[1]->getIntegerConstantExpr(Context))
5749  if (!isValidOrderingForOp(Result->getSExtValue(), Op))
5750  Diag(SubExprs[1]->getBeginLoc(),
5751  diag::warn_atomic_op_has_invalid_memory_order)
5752  << SubExprs[1]->getSourceRange();
5753  }
5754 
5755  if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
5756  auto *Scope = Args[Args.size() - 1];
5757  if (Optional<llvm::APSInt> Result =
5758  Scope->getIntegerConstantExpr(Context)) {
5759  if (!ScopeModel->isValid(Result->getZExtValue()))
5760  Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
5761  << Scope->getSourceRange();
5762  }
5763  SubExprs.push_back(Scope);
5764  }
5765 
5766  AtomicExpr *AE = new (Context)
5767  AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
5768 
5769  if ((Op == AtomicExpr::AO__c11_atomic_load ||
5770  Op == AtomicExpr::AO__c11_atomic_store ||
5771  Op == AtomicExpr::AO__opencl_atomic_load ||
5772  Op == AtomicExpr::AO__opencl_atomic_store ) &&
5773  Context.AtomicUsesUnsupportedLibcall(AE))
5774  Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
5775  << ((Op == AtomicExpr::AO__c11_atomic_load ||
5776  Op == AtomicExpr::AO__opencl_atomic_load)
5777  ? 0
5778  : 1);
5779 
5780  if (ValType->isExtIntType()) {
5781  Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit);
5782  return ExprError();
5783  }
5784 
5785  return AE;
5786 }
5787 
5788 /// checkBuiltinArgument - Given a call to a builtin function, perform
5789 /// normal type-checking on the given argument, updating the call in
5790 /// place. This is useful when a builtin function requires custom
5791 /// type-checking for some of its arguments but not necessarily all of
5792 /// them.
5793 ///
5794 /// Returns true on error.
5795 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
5796  FunctionDecl *Fn = E->getDirectCallee();
5797  assert(Fn && "builtin call without direct callee!");
5798 
5799  ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
5800  InitializedEntity Entity =
5802 
5803  ExprResult Arg = E->getArg(0);
5804  Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
5805  if (Arg.isInvalid())
5806  return true;
5807 
5808  E->setArg(ArgIndex, Arg.get());
5809  return false;
5810 }
5811 
5812 /// We have a call to a function like __sync_fetch_and_add, which is an
5813 /// overloaded function based on the pointer type of its first argument.
5814 /// The main BuildCallExpr routines have already promoted the types of
5815 /// arguments because all of these calls are prototyped as void(...).
5816 ///
5817 /// This function goes through and does final semantic checking for these
5818 /// builtins, as well as generating any warnings.
5819 ExprResult
5820 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
5821  CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
5822  Expr *Callee = TheCall->getCallee();
5823  DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
5824  FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5825 
5826  // Ensure that we have at least one argument to do type inference from.
5827  if (TheCall->getNumArgs() < 1) {
5828  Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5829  << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange();
5830  return