clang  7.0.0svn
SemaChecking.cpp
Go to the documentation of this file.
1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements extra semantic analysis beyond what is enforced
11 // by the C type system.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "clang/AST/APValue.h"
16 #include "clang/AST/ASTContext.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/AttrIterator.h"
19 #include "clang/AST/CharUnits.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/DeclBase.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
26 #include "clang/AST/Expr.h"
27 #include "clang/AST/ExprCXX.h"
28 #include "clang/AST/ExprObjC.h"
29 #include "clang/AST/ExprOpenMP.h"
30 #include "clang/AST/NSAPI.h"
33 #include "clang/AST/Stmt.h"
34 #include "clang/AST/TemplateBase.h"
35 #include "clang/AST/Type.h"
36 #include "clang/AST/TypeLoc.h"
40 #include "clang/Basic/CharInfo.h"
41 #include "clang/Basic/Diagnostic.h"
43 #include "clang/Basic/LLVM.h"
50 #include "clang/Basic/Specifiers.h"
51 #include "clang/Basic/SyncScope.h"
54 #include "clang/Basic/TargetInfo.h"
55 #include "clang/Basic/TypeTraits.h"
56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
58 #include "clang/Sema/Lookup.h"
59 #include "clang/Sema/Ownership.h"
60 #include "clang/Sema/Scope.h"
61 #include "clang/Sema/ScopeInfo.h"
62 #include "clang/Sema/Sema.h"
64 #include "llvm/ADT/APFloat.h"
65 #include "llvm/ADT/APInt.h"
66 #include "llvm/ADT/APSInt.h"
67 #include "llvm/ADT/ArrayRef.h"
68 #include "llvm/ADT/DenseMap.h"
69 #include "llvm/ADT/FoldingSet.h"
70 #include "llvm/ADT/None.h"
71 #include "llvm/ADT/Optional.h"
72 #include "llvm/ADT/STLExtras.h"
73 #include "llvm/ADT/SmallBitVector.h"
74 #include "llvm/ADT/SmallPtrSet.h"
75 #include "llvm/ADT/SmallString.h"
76 #include "llvm/ADT/SmallVector.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/ADT/StringSwitch.h"
79 #include "llvm/ADT/Triple.h"
80 #include "llvm/Support/AtomicOrdering.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/Compiler.h"
83 #include "llvm/Support/ConvertUTF.h"
84 #include "llvm/Support/ErrorHandling.h"
85 #include "llvm/Support/Format.h"
86 #include "llvm/Support/Locale.h"
87 #include "llvm/Support/MathExtras.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include <algorithm>
90 #include <cassert>
91 #include <cstddef>
92 #include <cstdint>
93 #include <functional>
94 #include <limits>
95 #include <string>
96 #include <tuple>
97 #include <utility>
98 
99 using namespace clang;
100 using namespace sema;
101 
103  unsigned ByteNo) const {
104  return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
105  Context.getTargetInfo());
106 }
107 
108 /// Checks that a call expression's argument count is the desired number.
109 /// This is useful when doing custom type-checking. Returns true on error.
110 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
111  unsigned argCount = call->getNumArgs();
112  if (argCount == desiredArgCount) return false;
113 
114  if (argCount < desiredArgCount)
115  return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args)
116  << 0 /*function call*/ << desiredArgCount << argCount
117  << call->getSourceRange();
118 
119  // Highlight all the excess arguments.
120  SourceRange range(call->getArg(desiredArgCount)->getLocStart(),
121  call->getArg(argCount - 1)->getLocEnd());
122 
123  return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
124  << 0 /*function call*/ << desiredArgCount << argCount
125  << call->getArg(1)->getSourceRange();
126 }
127 
128 /// Check that the first argument to __builtin_annotation is an integer
129 /// and the second argument is a non-wide string literal.
130 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
131  if (checkArgCount(S, TheCall, 2))
132  return true;
133 
134  // First argument should be an integer.
135  Expr *ValArg = TheCall->getArg(0);
136  QualType Ty = ValArg->getType();
137  if (!Ty->isIntegerType()) {
138  S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg)
139  << ValArg->getSourceRange();
140  return true;
141  }
142 
143  // Second argument should be a constant string.
144  Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
145  StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
146  if (!Literal || !Literal->isAscii()) {
147  S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg)
148  << StrArg->getSourceRange();
149  return true;
150  }
151 
152  TheCall->setType(Ty);
153  return false;
154 }
155 
156 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
157  // We need at least one argument.
158  if (TheCall->getNumArgs() < 1) {
159  S.Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
160  << 0 << 1 << TheCall->getNumArgs()
161  << TheCall->getCallee()->getSourceRange();
162  return true;
163  }
164 
165  // All arguments should be wide string literals.
166  for (Expr *Arg : TheCall->arguments()) {
167  auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
168  if (!Literal || !Literal->isWide()) {
169  S.Diag(Arg->getLocStart(), diag::err_msvc_annotation_wide_str)
170  << Arg->getSourceRange();
171  return true;
172  }
173  }
174 
175  return false;
176 }
177 
178 /// Check that the argument to __builtin_addressof is a glvalue, and set the
179 /// result type to the corresponding pointer type.
180 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
181  if (checkArgCount(S, TheCall, 1))
182  return true;
183 
184  ExprResult Arg(TheCall->getArg(0));
185  QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getLocStart());
186  if (ResultType.isNull())
187  return true;
188 
189  TheCall->setArg(0, Arg.get());
190  TheCall->setType(ResultType);
191  return false;
192 }
193 
194 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
195  if (checkArgCount(S, TheCall, 3))
196  return true;
197 
198  // First two arguments should be integers.
199  for (unsigned I = 0; I < 2; ++I) {
200  ExprResult Arg = TheCall->getArg(I);
201  QualType Ty = Arg.get()->getType();
202  if (!Ty->isIntegerType()) {
203  S.Diag(Arg.get()->getLocStart(), diag::err_overflow_builtin_must_be_int)
204  << Ty << Arg.get()->getSourceRange();
205  return true;
206  }
208  S.getASTContext(), Ty, /*consume*/ false);
209  Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
210  if (Arg.isInvalid())
211  return true;
212  TheCall->setArg(I, Arg.get());
213  }
214 
215  // Third argument should be a pointer to a non-const integer.
216  // IRGen correctly handles volatile, restrict, and address spaces, and
217  // the other qualifiers aren't possible.
218  {
219  ExprResult Arg = TheCall->getArg(2);
220  QualType Ty = Arg.get()->getType();
221  const auto *PtrTy = Ty->getAs<PointerType>();
222  if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
223  !PtrTy->getPointeeType().isConstQualified())) {
224  S.Diag(Arg.get()->getLocStart(),
225  diag::err_overflow_builtin_must_be_ptr_int)
226  << Ty << Arg.get()->getSourceRange();
227  return true;
228  }
230  S.getASTContext(), Ty, /*consume*/ false);
231  Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
232  if (Arg.isInvalid())
233  return true;
234  TheCall->setArg(2, Arg.get());
235  }
236  return false;
237 }
238 
239 static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
240  CallExpr *TheCall, unsigned SizeIdx,
241  unsigned DstSizeIdx) {
242  if (TheCall->getNumArgs() <= SizeIdx ||
243  TheCall->getNumArgs() <= DstSizeIdx)
244  return;
245 
246  const Expr *SizeArg = TheCall->getArg(SizeIdx);
247  const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx);
248 
249  llvm::APSInt Size, DstSize;
250 
251  // find out if both sizes are known at compile time
252  if (!SizeArg->EvaluateAsInt(Size, S.Context) ||
253  !DstSizeArg->EvaluateAsInt(DstSize, S.Context))
254  return;
255 
256  if (Size.ule(DstSize))
257  return;
258 
259  // confirmed overflow so generate the diagnostic.
260  IdentifierInfo *FnName = FDecl->getIdentifier();
261  SourceLocation SL = TheCall->getLocStart();
262  SourceRange SR = TheCall->getSourceRange();
263 
264  S.Diag(SL, diag::warn_memcpy_chk_overflow) << SR << FnName;
265 }
266 
267 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
268  if (checkArgCount(S, BuiltinCall, 2))
269  return true;
270 
271  SourceLocation BuiltinLoc = BuiltinCall->getLocStart();
272  Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
273  Expr *Call = BuiltinCall->getArg(0);
274  Expr *Chain = BuiltinCall->getArg(1);
275 
276  if (Call->getStmtClass() != Stmt::CallExprClass) {
277  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
278  << Call->getSourceRange();
279  return true;
280  }
281 
282  auto CE = cast<CallExpr>(Call);
283  if (CE->getCallee()->getType()->isBlockPointerType()) {
284  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
285  << Call->getSourceRange();
286  return true;
287  }
288 
289  const Decl *TargetDecl = CE->getCalleeDecl();
290  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
291  if (FD->getBuiltinID()) {
292  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
293  << Call->getSourceRange();
294  return true;
295  }
296 
297  if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
298  S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
299  << Call->getSourceRange();
300  return true;
301  }
302 
303  ExprResult ChainResult = S.UsualUnaryConversions(Chain);
304  if (ChainResult.isInvalid())
305  return true;
306  if (!ChainResult.get()->getType()->isPointerType()) {
307  S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
308  << Chain->getSourceRange();
309  return true;
310  }
311 
312  QualType ReturnTy = CE->getCallReturnType(S.Context);
313  QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
314  QualType BuiltinTy = S.Context.getFunctionType(
315  ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
316  QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
317 
318  Builtin =
319  S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
320 
321  BuiltinCall->setType(CE->getType());
322  BuiltinCall->setValueKind(CE->getValueKind());
323  BuiltinCall->setObjectKind(CE->getObjectKind());
324  BuiltinCall->setCallee(Builtin);
325  BuiltinCall->setArg(1, ChainResult.get());
326 
327  return false;
328 }
329 
330 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
331  Scope::ScopeFlags NeededScopeFlags,
332  unsigned DiagID) {
333  // Scopes aren't available during instantiation. Fortunately, builtin
334  // functions cannot be template args so they cannot be formed through template
335  // instantiation. Therefore checking once during the parse is sufficient.
336  if (SemaRef.inTemplateInstantiation())
337  return false;
338 
339  Scope *S = SemaRef.getCurScope();
340  while (S && !S->isSEHExceptScope())
341  S = S->getParent();
342  if (!S || !(S->getFlags() & NeededScopeFlags)) {
343  auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
344  SemaRef.Diag(TheCall->getExprLoc(), DiagID)
345  << DRE->getDecl()->getIdentifier();
346  return true;
347  }
348 
349  return false;
350 }
351 
352 static inline bool isBlockPointer(Expr *Arg) {
353  return Arg->getType()->isBlockPointerType();
354 }
355 
356 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
357 /// void*, which is a requirement of device side enqueue.
358 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
359  const BlockPointerType *BPT =
360  cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
361  ArrayRef<QualType> Params =
362  BPT->getPointeeType()->getAs<FunctionProtoType>()->getParamTypes();
363  unsigned ArgCounter = 0;
364  bool IllegalParams = false;
365  // Iterate through the block parameters until either one is found that is not
366  // a local void*, or the block is valid.
367  for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
368  I != E; ++I, ++ArgCounter) {
369  if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
370  (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
372  // Get the location of the error. If a block literal has been passed
373  // (BlockExpr) then we can point straight to the offending argument,
374  // else we just point to the variable reference.
375  SourceLocation ErrorLoc;
376  if (isa<BlockExpr>(BlockArg)) {
377  BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
378  ErrorLoc = BD->getParamDecl(ArgCounter)->getLocStart();
379  } else if (isa<DeclRefExpr>(BlockArg)) {
380  ErrorLoc = cast<DeclRefExpr>(BlockArg)->getLocStart();
381  }
382  S.Diag(ErrorLoc,
383  diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
384  IllegalParams = true;
385  }
386  }
387 
388  return IllegalParams;
389 }
390 
391 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
392  if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) {
393  S.Diag(Call->getLocStart(), diag::err_opencl_requires_extension)
394  << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
395  return true;
396  }
397  return false;
398 }
399 
400 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
401  if (checkArgCount(S, TheCall, 2))
402  return true;
403 
404  if (checkOpenCLSubgroupExt(S, TheCall))
405  return true;
406 
407  // First argument is an ndrange_t type.
408  Expr *NDRangeArg = TheCall->getArg(0);
409  if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
410  S.Diag(NDRangeArg->getLocStart(),
411  diag::err_opencl_builtin_expected_type)
412  << TheCall->getDirectCallee() << "'ndrange_t'";
413  return true;
414  }
415 
416  Expr *BlockArg = TheCall->getArg(1);
417  if (!isBlockPointer(BlockArg)) {
418  S.Diag(BlockArg->getLocStart(),
419  diag::err_opencl_builtin_expected_type)
420  << TheCall->getDirectCallee() << "block";
421  return true;
422  }
423  return checkOpenCLBlockArgs(S, BlockArg);
424 }
425 
426 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
427 /// get_kernel_work_group_size
428 /// and get_kernel_preferred_work_group_size_multiple builtin functions.
430  if (checkArgCount(S, TheCall, 1))
431  return true;
432 
433  Expr *BlockArg = TheCall->getArg(0);
434  if (!isBlockPointer(BlockArg)) {
435  S.Diag(BlockArg->getLocStart(),
436  diag::err_opencl_builtin_expected_type)
437  << TheCall->getDirectCallee() << "block";
438  return true;
439  }
440  return checkOpenCLBlockArgs(S, BlockArg);
441 }
442 
443 /// Diagnose integer type and any valid implicit conversion to it.
444 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
445  const QualType &IntType);
446 
448  unsigned Start, unsigned End) {
449  bool IllegalParams = false;
450  for (unsigned I = Start; I <= End; ++I)
451  IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
452  S.Context.getSizeType());
453  return IllegalParams;
454 }
455 
456 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
457 /// 'local void*' parameter of passed block.
459  Expr *BlockArg,
460  unsigned NumNonVarArgs) {
461  const BlockPointerType *BPT =
462  cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
463  unsigned NumBlockParams =
464  BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams();
465  unsigned TotalNumArgs = TheCall->getNumArgs();
466 
467  // For each argument passed to the block, a corresponding uint needs to
468  // be passed to describe the size of the local memory.
469  if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
470  S.Diag(TheCall->getLocStart(),
471  diag::err_opencl_enqueue_kernel_local_size_args);
472  return true;
473  }
474 
475  // Check that the sizes of the local memory are specified by integers.
476  return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
477  TotalNumArgs - 1);
478 }
479 
480 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
481 /// overload formats specified in Table 6.13.17.1.
482 /// int enqueue_kernel(queue_t queue,
483 /// kernel_enqueue_flags_t flags,
484 /// const ndrange_t ndrange,
485 /// void (^block)(void))
486 /// int enqueue_kernel(queue_t queue,
487 /// kernel_enqueue_flags_t flags,
488 /// const ndrange_t ndrange,
489 /// uint num_events_in_wait_list,
490 /// clk_event_t *event_wait_list,
491 /// clk_event_t *event_ret,
492 /// void (^block)(void))
493 /// int enqueue_kernel(queue_t queue,
494 /// kernel_enqueue_flags_t flags,
495 /// const ndrange_t ndrange,
496 /// void (^block)(local void*, ...),
497 /// uint size0, ...)
498 /// int enqueue_kernel(queue_t queue,
499 /// kernel_enqueue_flags_t flags,
500 /// const ndrange_t ndrange,
501 /// uint num_events_in_wait_list,
502 /// clk_event_t *event_wait_list,
503 /// clk_event_t *event_ret,
504 /// void (^block)(local void*, ...),
505 /// uint size0, ...)
506 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
507  unsigned NumArgs = TheCall->getNumArgs();
508 
509  if (NumArgs < 4) {
510  S.Diag(TheCall->getLocStart(), diag::err_typecheck_call_too_few_args);
511  return true;
512  }
513 
514  Expr *Arg0 = TheCall->getArg(0);
515  Expr *Arg1 = TheCall->getArg(1);
516  Expr *Arg2 = TheCall->getArg(2);
517  Expr *Arg3 = TheCall->getArg(3);
518 
519  // First argument always needs to be a queue_t type.
520  if (!Arg0->getType()->isQueueT()) {
521  S.Diag(TheCall->getArg(0)->getLocStart(),
522  diag::err_opencl_builtin_expected_type)
523  << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
524  return true;
525  }
526 
527  // Second argument always needs to be a kernel_enqueue_flags_t enum value.
528  if (!Arg1->getType()->isIntegerType()) {
529  S.Diag(TheCall->getArg(1)->getLocStart(),
530  diag::err_opencl_builtin_expected_type)
531  << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
532  return true;
533  }
534 
535  // Third argument is always an ndrange_t type.
536  if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
537  S.Diag(TheCall->getArg(2)->getLocStart(),
538  diag::err_opencl_builtin_expected_type)
539  << TheCall->getDirectCallee() << "'ndrange_t'";
540  return true;
541  }
542 
543  // With four arguments, there is only one form that the function could be
544  // called in: no events and no variable arguments.
545  if (NumArgs == 4) {
546  // check that the last argument is the right block type.
547  if (!isBlockPointer(Arg3)) {
548  S.Diag(Arg3->getLocStart(), diag::err_opencl_builtin_expected_type)
549  << TheCall->getDirectCallee() << "block";
550  return true;
551  }
552  // we have a block type, check the prototype
553  const BlockPointerType *BPT =
554  cast<BlockPointerType>(Arg3->getType().getCanonicalType());
555  if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) {
556  S.Diag(Arg3->getLocStart(),
557  diag::err_opencl_enqueue_kernel_blocks_no_args);
558  return true;
559  }
560  return false;
561  }
562  // we can have block + varargs.
563  if (isBlockPointer(Arg3))
564  return (checkOpenCLBlockArgs(S, Arg3) ||
565  checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
566  // last two cases with either exactly 7 args or 7 args and varargs.
567  if (NumArgs >= 7) {
568  // check common block argument.
569  Expr *Arg6 = TheCall->getArg(6);
570  if (!isBlockPointer(Arg6)) {
571  S.Diag(Arg6->getLocStart(), diag::err_opencl_builtin_expected_type)
572  << TheCall->getDirectCallee() << "block";
573  return true;
574  }
575  if (checkOpenCLBlockArgs(S, Arg6))
576  return true;
577 
578  // Forth argument has to be any integer type.
579  if (!Arg3->getType()->isIntegerType()) {
580  S.Diag(TheCall->getArg(3)->getLocStart(),
581  diag::err_opencl_builtin_expected_type)
582  << TheCall->getDirectCallee() << "integer";
583  return true;
584  }
585  // check remaining common arguments.
586  Expr *Arg4 = TheCall->getArg(4);
587  Expr *Arg5 = TheCall->getArg(5);
588 
589  // Fifth argument is always passed as a pointer to clk_event_t.
590  if (!Arg4->isNullPointerConstant(S.Context,
593  S.Diag(TheCall->getArg(4)->getLocStart(),
594  diag::err_opencl_builtin_expected_type)
595  << TheCall->getDirectCallee()
597  return true;
598  }
599 
600  // Sixth argument is always passed as a pointer to clk_event_t.
601  if (!Arg5->isNullPointerConstant(S.Context,
603  !(Arg5->getType()->isPointerType() &&
604  Arg5->getType()->getPointeeType()->isClkEventT())) {
605  S.Diag(TheCall->getArg(5)->getLocStart(),
606  diag::err_opencl_builtin_expected_type)
607  << TheCall->getDirectCallee()
609  return true;
610  }
611 
612  if (NumArgs == 7)
613  return false;
614 
615  return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
616  }
617 
618  // None of the specific case has been detected, give generic error
619  S.Diag(TheCall->getLocStart(),
620  diag::err_opencl_enqueue_kernel_incorrect_args);
621  return true;
622 }
623 
624 /// Returns OpenCL access qual.
625 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
626  return D->getAttr<OpenCLAccessAttr>();
627 }
628 
629 /// Returns true if pipe element type is different from the pointer.
630 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
631  const Expr *Arg0 = Call->getArg(0);
632  // First argument type should always be pipe.
633  if (!Arg0->getType()->isPipeType()) {
634  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_first_arg)
635  << Call->getDirectCallee() << Arg0->getSourceRange();
636  return true;
637  }
638  OpenCLAccessAttr *AccessQual =
639  getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
640  // Validates the access qualifier is compatible with the call.
641  // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
642  // read_only and write_only, and assumed to be read_only if no qualifier is
643  // specified.
644  switch (Call->getDirectCallee()->getBuiltinID()) {
645  case Builtin::BIread_pipe:
646  case Builtin::BIreserve_read_pipe:
647  case Builtin::BIcommit_read_pipe:
648  case Builtin::BIwork_group_reserve_read_pipe:
649  case Builtin::BIsub_group_reserve_read_pipe:
650  case Builtin::BIwork_group_commit_read_pipe:
651  case Builtin::BIsub_group_commit_read_pipe:
652  if (!(!AccessQual || AccessQual->isReadOnly())) {
653  S.Diag(Arg0->getLocStart(),
654  diag::err_opencl_builtin_pipe_invalid_access_modifier)
655  << "read_only" << Arg0->getSourceRange();
656  return true;
657  }
658  break;
659  case Builtin::BIwrite_pipe:
660  case Builtin::BIreserve_write_pipe:
661  case Builtin::BIcommit_write_pipe:
662  case Builtin::BIwork_group_reserve_write_pipe:
663  case Builtin::BIsub_group_reserve_write_pipe:
664  case Builtin::BIwork_group_commit_write_pipe:
665  case Builtin::BIsub_group_commit_write_pipe:
666  if (!(AccessQual && AccessQual->isWriteOnly())) {
667  S.Diag(Arg0->getLocStart(),
668  diag::err_opencl_builtin_pipe_invalid_access_modifier)
669  << "write_only" << Arg0->getSourceRange();
670  return true;
671  }
672  break;
673  default:
674  break;
675  }
676  return false;
677 }
678 
679 /// Returns true if pipe element type is different from the pointer.
680 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
681  const Expr *Arg0 = Call->getArg(0);
682  const Expr *ArgIdx = Call->getArg(Idx);
683  const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
684  const QualType EltTy = PipeTy->getElementType();
685  const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
686  // The Idx argument should be a pointer and the type of the pointer and
687  // the type of pipe element should also be the same.
688  if (!ArgTy ||
689  !S.Context.hasSameType(
690  EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
691  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
692  << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
693  << ArgIdx->getType() << ArgIdx->getSourceRange();
694  return true;
695  }
696  return false;
697 }
698 
699 // Performs semantic analysis for the read/write_pipe call.
700 // \param S Reference to the semantic analyzer.
701 // \param Call A pointer to the builtin call.
702 // \return True if a semantic error has been found, false otherwise.
703 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
704  // OpenCL v2.0 s6.13.16.2 - The built-in read/write
705  // functions have two forms.
706  switch (Call->getNumArgs()) {
707  case 2:
708  if (checkOpenCLPipeArg(S, Call))
709  return true;
710  // The call with 2 arguments should be
711  // read/write_pipe(pipe T, T*).
712  // Check packet type T.
713  if (checkOpenCLPipePacketType(S, Call, 1))
714  return true;
715  break;
716 
717  case 4: {
718  if (checkOpenCLPipeArg(S, Call))
719  return true;
720  // The call with 4 arguments should be
721  // read/write_pipe(pipe T, reserve_id_t, uint, T*).
722  // Check reserve_id_t.
723  if (!Call->getArg(1)->getType()->isReserveIDT()) {
724  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
725  << Call->getDirectCallee() << S.Context.OCLReserveIDTy
726  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
727  return true;
728  }
729 
730  // Check the index.
731  const Expr *Arg2 = Call->getArg(2);
732  if (!Arg2->getType()->isIntegerType() &&
733  !Arg2->getType()->isUnsignedIntegerType()) {
734  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
735  << Call->getDirectCallee() << S.Context.UnsignedIntTy
736  << Arg2->getType() << Arg2->getSourceRange();
737  return true;
738  }
739 
740  // Check packet type T.
741  if (checkOpenCLPipePacketType(S, Call, 3))
742  return true;
743  } break;
744  default:
745  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_arg_num)
746  << Call->getDirectCallee() << Call->getSourceRange();
747  return true;
748  }
749 
750  return false;
751 }
752 
753 // Performs a semantic analysis on the {work_group_/sub_group_
754 // /_}reserve_{read/write}_pipe
755 // \param S Reference to the semantic analyzer.
756 // \param Call The call to the builtin function to be analyzed.
757 // \return True if a semantic error was found, false otherwise.
758 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
759  if (checkArgCount(S, Call, 2))
760  return true;
761 
762  if (checkOpenCLPipeArg(S, Call))
763  return true;
764 
765  // Check the reserve size.
766  if (!Call->getArg(1)->getType()->isIntegerType() &&
767  !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
768  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
769  << Call->getDirectCallee() << S.Context.UnsignedIntTy
770  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
771  return true;
772  }
773 
774  // Since return type of reserve_read/write_pipe built-in function is
775  // reserve_id_t, which is not defined in the builtin def file , we used int
776  // as return type and need to override the return type of these functions.
777  Call->setType(S.Context.OCLReserveIDTy);
778 
779  return false;
780 }
781 
782 // Performs a semantic analysis on {work_group_/sub_group_
783 // /_}commit_{read/write}_pipe
784 // \param S Reference to the semantic analyzer.
785 // \param Call The call to the builtin function to be analyzed.
786 // \return True if a semantic error was found, false otherwise.
787 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
788  if (checkArgCount(S, Call, 2))
789  return true;
790 
791  if (checkOpenCLPipeArg(S, Call))
792  return true;
793 
794  // Check reserve_id_t.
795  if (!Call->getArg(1)->getType()->isReserveIDT()) {
796  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
797  << Call->getDirectCallee() << S.Context.OCLReserveIDTy
798  << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
799  return true;
800  }
801 
802  return false;
803 }
804 
805 // Performs a semantic analysis on the call to built-in Pipe
806 // Query Functions.
807 // \param S Reference to the semantic analyzer.
808 // \param Call The call to the builtin function to be analyzed.
809 // \return True if a semantic error was found, false otherwise.
810 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
811  if (checkArgCount(S, Call, 1))
812  return true;
813 
814  if (!Call->getArg(0)->getType()->isPipeType()) {
815  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_first_arg)
816  << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
817  return true;
818  }
819 
820  return false;
821 }
822 
823 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
824 // Performs semantic analysis for the to_global/local/private call.
825 // \param S Reference to the semantic analyzer.
826 // \param BuiltinID ID of the builtin function.
827 // \param Call A pointer to the builtin call.
828 // \return True if a semantic error has been found, false otherwise.
829 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
830  CallExpr *Call) {
831  if (Call->getNumArgs() != 1) {
832  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_to_addr_arg_num)
833  << Call->getDirectCallee() << Call->getSourceRange();
834  return true;
835  }
836 
837  auto RT = Call->getArg(0)->getType();
838  if (!RT->isPointerType() || RT->getPointeeType()
839  .getAddressSpace() == LangAS::opencl_constant) {
840  S.Diag(Call->getLocStart(), diag::err_opencl_builtin_to_addr_invalid_arg)
841  << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
842  return true;
843  }
844 
845  RT = RT->getPointeeType();
846  auto Qual = RT.getQualifiers();
847  switch (BuiltinID) {
848  case Builtin::BIto_global:
849  Qual.setAddressSpace(LangAS::opencl_global);
850  break;
851  case Builtin::BIto_local:
852  Qual.setAddressSpace(LangAS::opencl_local);
853  break;
854  case Builtin::BIto_private:
855  Qual.setAddressSpace(LangAS::opencl_private);
856  break;
857  default:
858  llvm_unreachable("Invalid builtin function");
859  }
861  RT.getUnqualifiedType(), Qual)));
862 
863  return false;
864 }
865 
866 // Emit an error and return true if the current architecture is not in the list
867 // of supported architectures.
868 static bool
869 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
870  ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
871  llvm::Triple::ArchType CurArch =
872  S.getASTContext().getTargetInfo().getTriple().getArch();
873  if (llvm::is_contained(SupportedArchs, CurArch))
874  return false;
875  S.Diag(TheCall->getLocStart(), diag::err_builtin_target_unsupported)
876  << TheCall->getSourceRange();
877  return true;
878 }
879 
881 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
882  CallExpr *TheCall) {
883  ExprResult TheCallResult(TheCall);
884 
885  // Find out if any arguments are required to be integer constant expressions.
886  unsigned ICEArguments = 0;
888  Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
889  if (Error != ASTContext::GE_None)
890  ICEArguments = 0; // Don't diagnose previously diagnosed errors.
891 
892  // If any arguments are required to be ICE's, check and diagnose.
893  for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
894  // Skip arguments not required to be ICE's.
895  if ((ICEArguments & (1 << ArgNo)) == 0) continue;
896 
897  llvm::APSInt Result;
898  if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
899  return true;
900  ICEArguments &= ~(1 << ArgNo);
901  }
902 
903  switch (BuiltinID) {
904  case Builtin::BI__builtin___CFStringMakeConstantString:
905  assert(TheCall->getNumArgs() == 1 &&
906  "Wrong # arguments to builtin CFStringMakeConstantString");
907  if (CheckObjCString(TheCall->getArg(0)))
908  return ExprError();
909  break;
910  case Builtin::BI__builtin_ms_va_start:
911  case Builtin::BI__builtin_stdarg_start:
912  case Builtin::BI__builtin_va_start:
913  if (SemaBuiltinVAStart(BuiltinID, TheCall))
914  return ExprError();
915  break;
916  case Builtin::BI__va_start: {
917  switch (Context.getTargetInfo().getTriple().getArch()) {
918  case llvm::Triple::arm:
919  case llvm::Triple::thumb:
920  if (SemaBuiltinVAStartARMMicrosoft(TheCall))
921  return ExprError();
922  break;
923  default:
924  if (SemaBuiltinVAStart(BuiltinID, TheCall))
925  return ExprError();
926  break;
927  }
928  break;
929  }
930 
931  // The acquire, release, and no fence variants are ARM and AArch64 only.
932  case Builtin::BI_interlockedbittestandset_acq:
933  case Builtin::BI_interlockedbittestandset_rel:
934  case Builtin::BI_interlockedbittestandset_nf:
935  case Builtin::BI_interlockedbittestandreset_acq:
936  case Builtin::BI_interlockedbittestandreset_rel:
937  case Builtin::BI_interlockedbittestandreset_nf:
939  *this, BuiltinID, TheCall,
940  {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
941  return ExprError();
942  break;
943 
944  // The 64-bit bittest variants are x64, ARM, and AArch64 only.
945  case Builtin::BI_bittest64:
946  case Builtin::BI_bittestandcomplement64:
947  case Builtin::BI_bittestandreset64:
948  case Builtin::BI_bittestandset64:
949  case Builtin::BI_interlockedbittestandreset64:
950  case Builtin::BI_interlockedbittestandset64:
951  if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
952  {llvm::Triple::x86_64, llvm::Triple::arm,
953  llvm::Triple::thumb, llvm::Triple::aarch64}))
954  return ExprError();
955  break;
956 
957  case Builtin::BI__builtin_isgreater:
958  case Builtin::BI__builtin_isgreaterequal:
959  case Builtin::BI__builtin_isless:
960  case Builtin::BI__builtin_islessequal:
961  case Builtin::BI__builtin_islessgreater:
962  case Builtin::BI__builtin_isunordered:
963  if (SemaBuiltinUnorderedCompare(TheCall))
964  return ExprError();
965  break;
966  case Builtin::BI__builtin_fpclassify:
967  if (SemaBuiltinFPClassification(TheCall, 6))
968  return ExprError();
969  break;
970  case Builtin::BI__builtin_isfinite:
971  case Builtin::BI__builtin_isinf:
972  case Builtin::BI__builtin_isinf_sign:
973  case Builtin::BI__builtin_isnan:
974  case Builtin::BI__builtin_isnormal:
975  if (SemaBuiltinFPClassification(TheCall, 1))
976  return ExprError();
977  break;
978  case Builtin::BI__builtin_shufflevector:
979  return SemaBuiltinShuffleVector(TheCall);
980  // TheCall will be freed by the smart pointer here, but that's fine, since
981  // SemaBuiltinShuffleVector guts it, but then doesn't release it.
982  case Builtin::BI__builtin_prefetch:
983  if (SemaBuiltinPrefetch(TheCall))
984  return ExprError();
985  break;
986  case Builtin::BI__builtin_alloca_with_align:
987  if (SemaBuiltinAllocaWithAlign(TheCall))
988  return ExprError();
989  break;
990  case Builtin::BI__assume:
991  case Builtin::BI__builtin_assume:
992  if (SemaBuiltinAssume(TheCall))
993  return ExprError();
994  break;
995  case Builtin::BI__builtin_assume_aligned:
996  if (SemaBuiltinAssumeAligned(TheCall))
997  return ExprError();
998  break;
999  case Builtin::BI__builtin_object_size:
1000  if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
1001  return ExprError();
1002  break;
1003  case Builtin::BI__builtin_longjmp:
1004  if (SemaBuiltinLongjmp(TheCall))
1005  return ExprError();
1006  break;
1007  case Builtin::BI__builtin_setjmp:
1008  if (SemaBuiltinSetjmp(TheCall))
1009  return ExprError();
1010  break;
1011  case Builtin::BI_setjmp:
1012  case Builtin::BI_setjmpex:
1013  if (checkArgCount(*this, TheCall, 1))
1014  return true;
1015  break;
1016  case Builtin::BI__builtin_classify_type:
1017  if (checkArgCount(*this, TheCall, 1)) return true;
1018  TheCall->setType(Context.IntTy);
1019  break;
1020  case Builtin::BI__builtin_constant_p:
1021  if (checkArgCount(*this, TheCall, 1)) return true;
1022  TheCall->setType(Context.IntTy);
1023  break;
1024  case Builtin::BI__sync_fetch_and_add:
1025  case Builtin::BI__sync_fetch_and_add_1:
1026  case Builtin::BI__sync_fetch_and_add_2:
1027  case Builtin::BI__sync_fetch_and_add_4:
1028  case Builtin::BI__sync_fetch_and_add_8:
1029  case Builtin::BI__sync_fetch_and_add_16:
1030  case Builtin::BI__sync_fetch_and_sub:
1031  case Builtin::BI__sync_fetch_and_sub_1:
1032  case Builtin::BI__sync_fetch_and_sub_2:
1033  case Builtin::BI__sync_fetch_and_sub_4:
1034  case Builtin::BI__sync_fetch_and_sub_8:
1035  case Builtin::BI__sync_fetch_and_sub_16:
1036  case Builtin::BI__sync_fetch_and_or:
1037  case Builtin::BI__sync_fetch_and_or_1:
1038  case Builtin::BI__sync_fetch_and_or_2:
1039  case Builtin::BI__sync_fetch_and_or_4:
1040  case Builtin::BI__sync_fetch_and_or_8:
1041  case Builtin::BI__sync_fetch_and_or_16:
1042  case Builtin::BI__sync_fetch_and_and:
1043  case Builtin::BI__sync_fetch_and_and_1:
1044  case Builtin::BI__sync_fetch_and_and_2:
1045  case Builtin::BI__sync_fetch_and_and_4:
1046  case Builtin::BI__sync_fetch_and_and_8:
1047  case Builtin::BI__sync_fetch_and_and_16:
1048  case Builtin::BI__sync_fetch_and_xor:
1049  case Builtin::BI__sync_fetch_and_xor_1:
1050  case Builtin::BI__sync_fetch_and_xor_2:
1051  case Builtin::BI__sync_fetch_and_xor_4:
1052  case Builtin::BI__sync_fetch_and_xor_8:
1053  case Builtin::BI__sync_fetch_and_xor_16:
1054  case Builtin::BI__sync_fetch_and_nand:
1055  case Builtin::BI__sync_fetch_and_nand_1:
1056  case Builtin::BI__sync_fetch_and_nand_2:
1057  case Builtin::BI__sync_fetch_and_nand_4:
1058  case Builtin::BI__sync_fetch_and_nand_8:
1059  case Builtin::BI__sync_fetch_and_nand_16:
1060  case Builtin::BI__sync_add_and_fetch:
1061  case Builtin::BI__sync_add_and_fetch_1:
1062  case Builtin::BI__sync_add_and_fetch_2:
1063  case Builtin::BI__sync_add_and_fetch_4:
1064  case Builtin::BI__sync_add_and_fetch_8:
1065  case Builtin::BI__sync_add_and_fetch_16:
1066  case Builtin::BI__sync_sub_and_fetch:
1067  case Builtin::BI__sync_sub_and_fetch_1:
1068  case Builtin::BI__sync_sub_and_fetch_2:
1069  case Builtin::BI__sync_sub_and_fetch_4:
1070  case Builtin::BI__sync_sub_and_fetch_8:
1071  case Builtin::BI__sync_sub_and_fetch_16:
1072  case Builtin::BI__sync_and_and_fetch:
1073  case Builtin::BI__sync_and_and_fetch_1:
1074  case Builtin::BI__sync_and_and_fetch_2:
1075  case Builtin::BI__sync_and_and_fetch_4:
1076  case Builtin::BI__sync_and_and_fetch_8:
1077  case Builtin::BI__sync_and_and_fetch_16:
1078  case Builtin::BI__sync_or_and_fetch:
1079  case Builtin::BI__sync_or_and_fetch_1:
1080  case Builtin::BI__sync_or_and_fetch_2:
1081  case Builtin::BI__sync_or_and_fetch_4:
1082  case Builtin::BI__sync_or_and_fetch_8:
1083  case Builtin::BI__sync_or_and_fetch_16:
1084  case Builtin::BI__sync_xor_and_fetch:
1085  case Builtin::BI__sync_xor_and_fetch_1:
1086  case Builtin::BI__sync_xor_and_fetch_2:
1087  case Builtin::BI__sync_xor_and_fetch_4:
1088  case Builtin::BI__sync_xor_and_fetch_8:
1089  case Builtin::BI__sync_xor_and_fetch_16:
1090  case Builtin::BI__sync_nand_and_fetch:
1091  case Builtin::BI__sync_nand_and_fetch_1:
1092  case Builtin::BI__sync_nand_and_fetch_2:
1093  case Builtin::BI__sync_nand_and_fetch_4:
1094  case Builtin::BI__sync_nand_and_fetch_8:
1095  case Builtin::BI__sync_nand_and_fetch_16:
1096  case Builtin::BI__sync_val_compare_and_swap:
1097  case Builtin::BI__sync_val_compare_and_swap_1:
1098  case Builtin::BI__sync_val_compare_and_swap_2:
1099  case Builtin::BI__sync_val_compare_and_swap_4:
1100  case Builtin::BI__sync_val_compare_and_swap_8:
1101  case Builtin::BI__sync_val_compare_and_swap_16:
1102  case Builtin::BI__sync_bool_compare_and_swap:
1103  case Builtin::BI__sync_bool_compare_and_swap_1:
1104  case Builtin::BI__sync_bool_compare_and_swap_2:
1105  case Builtin::BI__sync_bool_compare_and_swap_4:
1106  case Builtin::BI__sync_bool_compare_and_swap_8:
1107  case Builtin::BI__sync_bool_compare_and_swap_16:
1108  case Builtin::BI__sync_lock_test_and_set:
1109  case Builtin::BI__sync_lock_test_and_set_1:
1110  case Builtin::BI__sync_lock_test_and_set_2:
1111  case Builtin::BI__sync_lock_test_and_set_4:
1112  case Builtin::BI__sync_lock_test_and_set_8:
1113  case Builtin::BI__sync_lock_test_and_set_16:
1114  case Builtin::BI__sync_lock_release:
1115  case Builtin::BI__sync_lock_release_1:
1116  case Builtin::BI__sync_lock_release_2:
1117  case Builtin::BI__sync_lock_release_4:
1118  case Builtin::BI__sync_lock_release_8:
1119  case Builtin::BI__sync_lock_release_16:
1120  case Builtin::BI__sync_swap:
1121  case Builtin::BI__sync_swap_1:
1122  case Builtin::BI__sync_swap_2:
1123  case Builtin::BI__sync_swap_4:
1124  case Builtin::BI__sync_swap_8:
1125  case Builtin::BI__sync_swap_16:
1126  return SemaBuiltinAtomicOverloaded(TheCallResult);
1127  case Builtin::BI__builtin_nontemporal_load:
1128  case Builtin::BI__builtin_nontemporal_store:
1129  return SemaBuiltinNontemporalOverloaded(TheCallResult);
1130 #define BUILTIN(ID, TYPE, ATTRS)
1131 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
1132  case Builtin::BI##ID: \
1133  return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
1134 #include "clang/Basic/Builtins.def"
1135  case Builtin::BI__annotation:
1136  if (SemaBuiltinMSVCAnnotation(*this, TheCall))
1137  return ExprError();
1138  break;
1139  case Builtin::BI__builtin_annotation:
1140  if (SemaBuiltinAnnotation(*this, TheCall))
1141  return ExprError();
1142  break;
1143  case Builtin::BI__builtin_addressof:
1144  if (SemaBuiltinAddressof(*this, TheCall))
1145  return ExprError();
1146  break;
1147  case Builtin::BI__builtin_add_overflow:
1148  case Builtin::BI__builtin_sub_overflow:
1149  case Builtin::BI__builtin_mul_overflow:
1150  if (SemaBuiltinOverflow(*this, TheCall))
1151  return ExprError();
1152  break;
1153  case Builtin::BI__builtin_operator_new:
1154  case Builtin::BI__builtin_operator_delete: {
1155  bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
1156  ExprResult Res =
1157  SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
1158  if (Res.isInvalid())
1159  CorrectDelayedTyposInExpr(TheCallResult.get());
1160  return Res;
1161  }
1162  case Builtin::BI__builtin_dump_struct: {
1163  // We first want to ensure we are called with 2 arguments
1164  if (checkArgCount(*this, TheCall, 2))
1165  return ExprError();
1166  // Ensure that the first argument is of type 'struct XX *'
1167  const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
1168  const QualType PtrArgType = PtrArg->getType();
1169  if (!PtrArgType->isPointerType() ||
1170  !PtrArgType->getPointeeType()->isRecordType()) {
1171  Diag(PtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1172  << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
1173  << "structure pointer";
1174  return ExprError();
1175  }
1176 
1177  // Ensure that the second argument is of type 'FunctionType'
1178  const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
1179  const QualType FnPtrArgType = FnPtrArg->getType();
1180  if (!FnPtrArgType->isPointerType()) {
1181  Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1182  << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1183  << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1184  return ExprError();
1185  }
1186 
1187  const auto *FuncType =
1188  FnPtrArgType->getPointeeType()->getAs<FunctionType>();
1189 
1190  if (!FuncType) {
1191  Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1192  << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1193  << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1194  return ExprError();
1195  }
1196 
1197  if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
1198  if (!FT->getNumParams()) {
1199  Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1200  << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1201  << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1202  return ExprError();
1203  }
1204  QualType PT = FT->getParamType(0);
1205  if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
1206  !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
1207  !PT->getPointeeType().isConstQualified()) {
1208  Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
1209  << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1210  << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1211  return ExprError();
1212  }
1213  }
1214 
1215  TheCall->setType(Context.IntTy);
1216  break;
1217  }
1218 
1219  // check secure string manipulation functions where overflows
1220  // are detectable at compile time
1221  case Builtin::BI__builtin___memcpy_chk:
1222  case Builtin::BI__builtin___memmove_chk:
1223  case Builtin::BI__builtin___memset_chk:
1224  case Builtin::BI__builtin___strlcat_chk:
1225  case Builtin::BI__builtin___strlcpy_chk:
1226  case Builtin::BI__builtin___strncat_chk:
1227  case Builtin::BI__builtin___strncpy_chk:
1228  case Builtin::BI__builtin___stpncpy_chk:
1229  SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3);
1230  break;
1231  case Builtin::BI__builtin___memccpy_chk:
1232  SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4);
1233  break;
1234  case Builtin::BI__builtin___snprintf_chk:
1235  case Builtin::BI__builtin___vsnprintf_chk:
1236  SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3);
1237  break;
1238  case Builtin::BI__builtin_call_with_static_chain:
1239  if (SemaBuiltinCallWithStaticChain(*this, TheCall))
1240  return ExprError();
1241  break;
1242  case Builtin::BI__exception_code:
1243  case Builtin::BI_exception_code:
1244  if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
1245  diag::err_seh___except_block))
1246  return ExprError();
1247  break;
1248  case Builtin::BI__exception_info:
1249  case Builtin::BI_exception_info:
1250  if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
1251  diag::err_seh___except_filter))
1252  return ExprError();
1253  break;
1254  case Builtin::BI__GetExceptionInfo:
1255  if (checkArgCount(*this, TheCall, 1))
1256  return ExprError();
1257 
1258  if (CheckCXXThrowOperand(
1259  TheCall->getLocStart(),
1260  Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
1261  TheCall))
1262  return ExprError();
1263 
1264  TheCall->setType(Context.VoidPtrTy);
1265  break;
1266  // OpenCL v2.0, s6.13.16 - Pipe functions
1267  case Builtin::BIread_pipe:
1268  case Builtin::BIwrite_pipe:
1269  // Since those two functions are declared with var args, we need a semantic
1270  // check for the argument.
1271  if (SemaBuiltinRWPipe(*this, TheCall))
1272  return ExprError();
1273  TheCall->setType(Context.IntTy);
1274  break;
1275  case Builtin::BIreserve_read_pipe:
1276  case Builtin::BIreserve_write_pipe:
1277  case Builtin::BIwork_group_reserve_read_pipe:
1278  case Builtin::BIwork_group_reserve_write_pipe:
1279  if (SemaBuiltinReserveRWPipe(*this, TheCall))
1280  return ExprError();
1281  break;
1282  case Builtin::BIsub_group_reserve_read_pipe:
1283  case Builtin::BIsub_group_reserve_write_pipe:
1284  if (checkOpenCLSubgroupExt(*this, TheCall) ||
1285  SemaBuiltinReserveRWPipe(*this, TheCall))
1286  return ExprError();
1287  break;
1288  case Builtin::BIcommit_read_pipe:
1289  case Builtin::BIcommit_write_pipe:
1290  case Builtin::BIwork_group_commit_read_pipe:
1291  case Builtin::BIwork_group_commit_write_pipe:
1292  if (SemaBuiltinCommitRWPipe(*this, TheCall))
1293  return ExprError();
1294  break;
1295  case Builtin::BIsub_group_commit_read_pipe:
1296  case Builtin::BIsub_group_commit_write_pipe:
1297  if (checkOpenCLSubgroupExt(*this, TheCall) ||
1298  SemaBuiltinCommitRWPipe(*this, TheCall))
1299  return ExprError();
1300  break;
1301  case Builtin::BIget_pipe_num_packets:
1302  case Builtin::BIget_pipe_max_packets:
1303  if (SemaBuiltinPipePackets(*this, TheCall))
1304  return ExprError();
1305  TheCall->setType(Context.UnsignedIntTy);
1306  break;
1307  case Builtin::BIto_global:
1308  case Builtin::BIto_local:
1309  case Builtin::BIto_private:
1310  if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
1311  return ExprError();
1312  break;
1313  // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
1314  case Builtin::BIenqueue_kernel:
1315  if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
1316  return ExprError();
1317  break;
1318  case Builtin::BIget_kernel_work_group_size:
1319  case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1320  if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
1321  return ExprError();
1322  break;
1323  case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1324  case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1325  if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
1326  return ExprError();
1327  break;
1328  case Builtin::BI__builtin_os_log_format:
1329  case Builtin::BI__builtin_os_log_format_buffer_size:
1330  if (SemaBuiltinOSLogFormat(TheCall))
1331  return ExprError();
1332  break;
1333  }
1334 
1335  // Since the target specific builtins for each arch overlap, only check those
1336  // of the arch we are compiling for.
1337  if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
1338  switch (Context.getTargetInfo().getTriple().getArch()) {
1339  case llvm::Triple::arm:
1340  case llvm::Triple::armeb:
1341  case llvm::Triple::thumb:
1342  case llvm::Triple::thumbeb:
1343  if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
1344  return ExprError();
1345  break;
1346  case llvm::Triple::aarch64:
1347  case llvm::Triple::aarch64_be:
1348  if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
1349  return ExprError();
1350  break;
1351  case llvm::Triple::hexagon:
1352  if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall))
1353  return ExprError();
1354  break;
1355  case llvm::Triple::mips:
1356  case llvm::Triple::mipsel:
1357  case llvm::Triple::mips64:
1358  case llvm::Triple::mips64el:
1359  if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
1360  return ExprError();
1361  break;
1362  case llvm::Triple::systemz:
1363  if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall))
1364  return ExprError();
1365  break;
1366  case llvm::Triple::x86:
1367  case llvm::Triple::x86_64:
1368  if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
1369  return ExprError();
1370  break;
1371  case llvm::Triple::ppc:
1372  case llvm::Triple::ppc64:
1373  case llvm::Triple::ppc64le:
1374  if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall))
1375  return ExprError();
1376  break;
1377  default:
1378  break;
1379  }
1380  }
1381 
1382  return TheCallResult;
1383 }
1384 
1385 // Get the valid immediate range for the specified NEON type code.
1386 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
1387  NeonTypeFlags Type(t);
1388  int IsQuad = ForceQuad ? true : Type.isQuad();
1389  switch (Type.getEltType()) {
1390  case NeonTypeFlags::Int8:
1391  case NeonTypeFlags::Poly8:
1392  return shift ? 7 : (8 << IsQuad) - 1;
1393  case NeonTypeFlags::Int16:
1394  case NeonTypeFlags::Poly16:
1395  return shift ? 15 : (4 << IsQuad) - 1;
1396  case NeonTypeFlags::Int32:
1397  return shift ? 31 : (2 << IsQuad) - 1;
1398  case NeonTypeFlags::Int64:
1399  case NeonTypeFlags::Poly64:
1400  return shift ? 63 : (1 << IsQuad) - 1;
1402  return shift ? 127 : (1 << IsQuad) - 1;
1404  assert(!shift && "cannot shift float types!");
1405  return (4 << IsQuad) - 1;
1407  assert(!shift && "cannot shift float types!");
1408  return (2 << IsQuad) - 1;
1410  assert(!shift && "cannot shift float types!");
1411  return (1 << IsQuad) - 1;
1412  }
1413  llvm_unreachable("Invalid NeonTypeFlag!");
1414 }
1415 
1416 /// getNeonEltType - Return the QualType corresponding to the elements of
1417 /// the vector type specified by the NeonTypeFlags. This is used to check
1418 /// the pointer arguments for Neon load/store intrinsics.
1420  bool IsPolyUnsigned, bool IsInt64Long) {
1421  switch (Flags.getEltType()) {
1422  case NeonTypeFlags::Int8:
1423  return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
1424  case NeonTypeFlags::Int16:
1425  return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
1426  case NeonTypeFlags::Int32:
1427  return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
1428  case NeonTypeFlags::Int64:
1429  if (IsInt64Long)
1430  return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
1431  else
1432  return Flags.isUnsigned() ? Context.UnsignedLongLongTy
1433  : Context.LongLongTy;
1434  case NeonTypeFlags::Poly8:
1435  return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
1436  case NeonTypeFlags::Poly16:
1437  return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
1438  case NeonTypeFlags::Poly64:
1439  if (IsInt64Long)
1440  return Context.UnsignedLongTy;
1441  else
1442  return Context.UnsignedLongLongTy;
1444  break;
1446  return Context.HalfTy;
1448  return Context.FloatTy;
1450  return Context.DoubleTy;
1451  }
1452  llvm_unreachable("Invalid NeonTypeFlag!");
1453 }
1454 
1455 bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1456  llvm::APSInt Result;
1457  uint64_t mask = 0;
1458  unsigned TV = 0;
1459  int PtrArgNum = -1;
1460  bool HasConstPtr = false;
1461  switch (BuiltinID) {
1462 #define GET_NEON_OVERLOAD_CHECK
1463 #include "clang/Basic/arm_neon.inc"
1464 #include "clang/Basic/arm_fp16.inc"
1465 #undef GET_NEON_OVERLOAD_CHECK
1466  }
1467 
1468  // For NEON intrinsics which are overloaded on vector element type, validate
1469  // the immediate which specifies which variant to emit.
1470  unsigned ImmArg = TheCall->getNumArgs()-1;
1471  if (mask) {
1472  if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
1473  return true;
1474 
1475  TV = Result.getLimitedValue(64);
1476  if ((TV > 63) || (mask & (1ULL << TV)) == 0)
1477  return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
1478  << TheCall->getArg(ImmArg)->getSourceRange();
1479  }
1480 
1481  if (PtrArgNum >= 0) {
1482  // Check that pointer arguments have the specified type.
1483  Expr *Arg = TheCall->getArg(PtrArgNum);
1484  if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
1485  Arg = ICE->getSubExpr();
1486  ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
1487  QualType RHSTy = RHS.get()->getType();
1488 
1489  llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
1490  bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
1491  Arch == llvm::Triple::aarch64_be;
1492  bool IsInt64Long =
1494  QualType EltTy =
1495  getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
1496  if (HasConstPtr)
1497  EltTy = EltTy.withConst();
1498  QualType LHSTy = Context.getPointerType(EltTy);
1499  AssignConvertType ConvTy;
1500  ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
1501  if (RHS.isInvalid())
1502  return true;
1503  if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy,
1504  RHS.get(), AA_Assigning))
1505  return true;
1506  }
1507 
1508  // For NEON intrinsics which take an immediate value as part of the
1509  // instruction, range check them here.
1510  unsigned i = 0, l = 0, u = 0;
1511  switch (BuiltinID) {
1512  default:
1513  return false;
1514  #define GET_NEON_IMMEDIATE_CHECK
1515  #include "clang/Basic/arm_neon.inc"
1516  #include "clang/Basic/arm_fp16.inc"
1517  #undef GET_NEON_IMMEDIATE_CHECK
1518  }
1519 
1520  return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
1521 }
1522 
1523 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
1524  unsigned MaxWidth) {
1525  assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
1526  BuiltinID == ARM::BI__builtin_arm_ldaex ||
1527  BuiltinID == ARM::BI__builtin_arm_strex ||
1528  BuiltinID == ARM::BI__builtin_arm_stlex ||
1529  BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1530  BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1531  BuiltinID == AArch64::BI__builtin_arm_strex ||
1532  BuiltinID == AArch64::BI__builtin_arm_stlex) &&
1533  "unexpected ARM builtin");
1534  bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
1535  BuiltinID == ARM::BI__builtin_arm_ldaex ||
1536  BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1537  BuiltinID == AArch64::BI__builtin_arm_ldaex;
1538 
1539  DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
1540 
1541  // Ensure that we have the proper number of arguments.
1542  if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
1543  return true;
1544 
1545  // Inspect the pointer argument of the atomic builtin. This should always be
1546  // a pointer type, whose element is an integral scalar or pointer type.
1547  // Because it is a pointer type, we don't have to worry about any implicit
1548  // casts here.
1549  Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
1550  ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
1551  if (PointerArgRes.isInvalid())
1552  return true;
1553  PointerArg = PointerArgRes.get();
1554 
1555  const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
1556  if (!pointerType) {
1557  Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
1558  << PointerArg->getType() << PointerArg->getSourceRange();
1559  return true;
1560  }
1561 
1562  // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
1563  // task is to insert the appropriate casts into the AST. First work out just
1564  // what the appropriate type is.
1565  QualType ValType = pointerType->getPointeeType();
1566  QualType AddrType = ValType.getUnqualifiedType().withVolatile();
1567  if (IsLdrex)
1568  AddrType.addConst();
1569 
1570  // Issue a warning if the cast is dodgy.
1571  CastKind CastNeeded = CK_NoOp;
1572  if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
1573  CastNeeded = CK_BitCast;
1574  Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers)
1575  << PointerArg->getType()
1576  << Context.getPointerType(AddrType)
1577  << AA_Passing << PointerArg->getSourceRange();
1578  }
1579 
1580  // Finally, do the cast and replace the argument with the corrected version.
1581  AddrType = Context.getPointerType(AddrType);
1582  PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
1583  if (PointerArgRes.isInvalid())
1584  return true;
1585  PointerArg = PointerArgRes.get();
1586 
1587  TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
1588 
1589  // In general, we allow ints, floats and pointers to be loaded and stored.
1590  if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
1591  !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
1592  Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
1593  << PointerArg->getType() << PointerArg->getSourceRange();
1594  return true;
1595  }
1596 
1597  // But ARM doesn't have instructions to deal with 128-bit versions.
1598  if (Context.getTypeSize(ValType) > MaxWidth) {
1599  assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
1600  Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size)
1601  << PointerArg->getType() << PointerArg->getSourceRange();
1602  return true;
1603  }
1604 
1605  switch (ValType.getObjCLifetime()) {
1606  case Qualifiers::OCL_None:
1608  // okay
1609  break;
1610 
1611  case Qualifiers::OCL_Weak:
1614  Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
1615  << ValType << PointerArg->getSourceRange();
1616  return true;
1617  }
1618 
1619  if (IsLdrex) {
1620  TheCall->setType(ValType);
1621  return false;
1622  }
1623 
1624  // Initialize the argument to be stored.
1625  ExprResult ValArg = TheCall->getArg(0);
1627  Context, ValType, /*consume*/ false);
1628  ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
1629  if (ValArg.isInvalid())
1630  return true;
1631  TheCall->setArg(0, ValArg.get());
1632 
1633  // __builtin_arm_strex always returns an int. It's marked as such in the .def,
1634  // but the custom checker bypasses all default analysis.
1635  TheCall->setType(Context.IntTy);
1636  return false;
1637 }
1638 
1639 bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1640  if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
1641  BuiltinID == ARM::BI__builtin_arm_ldaex ||
1642  BuiltinID == ARM::BI__builtin_arm_strex ||
1643  BuiltinID == ARM::BI__builtin_arm_stlex) {
1644  return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
1645  }
1646 
1647  if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
1648  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1649  SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
1650  }
1651 
1652  if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
1653  BuiltinID == ARM::BI__builtin_arm_wsr64)
1654  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
1655 
1656  if (BuiltinID == ARM::BI__builtin_arm_rsr ||
1657  BuiltinID == ARM::BI__builtin_arm_rsrp ||
1658  BuiltinID == ARM::BI__builtin_arm_wsr ||
1659  BuiltinID == ARM::BI__builtin_arm_wsrp)
1660  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1661 
1662  if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
1663  return true;
1664 
1665  // For intrinsics which take an immediate value as part of the instruction,
1666  // range check them here.
1667  // FIXME: VFP Intrinsics should error if VFP not present.
1668  switch (BuiltinID) {
1669  default: return false;
1670  case ARM::BI__builtin_arm_ssat:
1671  return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
1672  case ARM::BI__builtin_arm_usat:
1673  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
1674  case ARM::BI__builtin_arm_ssat16:
1675  return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
1676  case ARM::BI__builtin_arm_usat16:
1677  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
1678  case ARM::BI__builtin_arm_vcvtr_f:
1679  case ARM::BI__builtin_arm_vcvtr_d:
1680  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
1681  case ARM::BI__builtin_arm_dmb:
1682  case ARM::BI__builtin_arm_dsb:
1683  case ARM::BI__builtin_arm_isb:
1684  case ARM::BI__builtin_arm_dbg:
1685  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
1686  }
1687 }
1688 
1689 bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
1690  CallExpr *TheCall) {
1691  if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1692  BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1693  BuiltinID == AArch64::BI__builtin_arm_strex ||
1694  BuiltinID == AArch64::BI__builtin_arm_stlex) {
1695  return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
1696  }
1697 
1698  if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1699  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1700  SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
1701  SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
1702  SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
1703  }
1704 
1705  if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1706  BuiltinID == AArch64::BI__builtin_arm_wsr64)
1707  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1708 
1709  if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
1710  BuiltinID == AArch64::BI__builtin_arm_rsrp ||
1711  BuiltinID == AArch64::BI__builtin_arm_wsr ||
1712  BuiltinID == AArch64::BI__builtin_arm_wsrp)
1713  return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1714 
1715  if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
1716  return true;
1717 
1718  // For intrinsics which take an immediate value as part of the instruction,
1719  // range check them here.
1720  unsigned i = 0, l = 0, u = 0;
1721  switch (BuiltinID) {
1722  default: return false;
1723  case AArch64::BI__builtin_arm_dmb:
1724  case AArch64::BI__builtin_arm_dsb:
1725  case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
1726  }
1727 
1728  return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
1729 }
1730 
1731 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
1732  CallExpr *TheCall) {
1733  struct ArgInfo {
1734  ArgInfo(unsigned O, bool S, unsigned W, unsigned A)
1735  : OpNum(O), IsSigned(S), BitWidth(W), Align(A) {}
1736  unsigned OpNum = 0;
1737  bool IsSigned = false;
1738  unsigned BitWidth = 0;
1739  unsigned Align = 0;
1740  };
1741 
1742  static const std::map<unsigned, std::vector<ArgInfo>> Infos = {
1743  { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
1744  { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
1745  { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
1746  { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} },
1747  { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
1748  { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
1749  { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
1750  { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
1751  { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
1752  { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
1753  { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
1754 
1755  { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
1756  { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
1757  { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
1758  { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
1759  { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
1760  { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
1761  { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
1762  { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
1763  { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
1764  { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
1765  { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
1766 
1767  { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
1768  { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
1769  { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
1770  { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
1771  { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
1772  { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
1773  { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
1774  { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
1775  { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
1776  { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
1777  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
1778  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
1779  { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
1780  { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
1781  { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
1782  { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
1783  { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
1784  { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
1785  { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
1786  { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
1787  { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
1788  { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
1789  { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
1790  { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
1791  { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
1792  { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
1793  { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
1794  { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
1795  { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
1796  { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
1797  { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
1798  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
1799  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
1800  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
1801  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
1802  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
1803  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
1804  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
1805  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
1806  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
1807  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
1808  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
1809  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
1810  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
1811  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
1812  { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
1813  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
1814  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
1815  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
1816  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
1817  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
1818  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
1819  {{ 1, false, 6, 0 }} },
1820  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
1821  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
1822  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
1823  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
1824  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
1825  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
1826  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
1827  {{ 1, false, 5, 0 }} },
1828  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
1829  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
1830  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
1831  { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
1832  { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
1833  { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
1834  { 2, false, 5, 0 }} },
1835  { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
1836  { 2, false, 6, 0 }} },
1837  { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
1838  { 3, false, 5, 0 }} },
1839  { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
1840  { 3, false, 6, 0 }} },
1841  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
1842  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
1843  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
1844  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
1845  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
1846  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
1847  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
1848  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
1849  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
1850  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
1851  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
1852  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
1853  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
1854  { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
1855  { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
1856  { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
1857  {{ 2, false, 4, 0 },
1858  { 3, false, 5, 0 }} },
1859  { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
1860  {{ 2, false, 4, 0 },
1861  { 3, false, 5, 0 }} },
1862  { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
1863  {{ 2, false, 4, 0 },
1864  { 3, false, 5, 0 }} },
1865  { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
1866  {{ 2, false, 4, 0 },
1867  { 3, false, 5, 0 }} },
1868  { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
1869  { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
1870  { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
1871  { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
1872  { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
1873  { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
1874  { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
1875  { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
1876  { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
1877  { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
1878  { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
1879  { 2, false, 5, 0 }} },
1880  { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
1881  { 2, false, 6, 0 }} },
1882  { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
1883  { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
1884  { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
1885  { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
1886  { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
1887  { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
1888  { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
1889  { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
1890  { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
1891  {{ 1, false, 4, 0 }} },
1892  { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
1893  { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
1894  {{ 1, false, 4, 0 }} },
1895  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
1896  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
1897  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
1898  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
1899  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
1900  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
1901  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
1902  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
1903  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
1904  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
1905  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
1906  { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
1907  { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
1908  { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
1909  { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
1910  { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
1911  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
1912  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
1913  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
1914  { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
1915  {{ 3, false, 1, 0 }} },
1916  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
1917  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
1918  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
1919  { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
1920  {{ 3, false, 1, 0 }} },
1921  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
1922  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
1923  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
1924  { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
1925  {{ 3, false, 1, 0 }} },
1926  };
1927 
1928  auto F = Infos.find(BuiltinID);
1929  if (F == Infos.end())
1930  return false;
1931 
1932  bool Error = false;
1933 
1934  for (const ArgInfo &A : F->second) {
1935  int32_t Min = A.IsSigned ? -(1 << (A.BitWidth-1)) : 0;
1936  int32_t Max = (1 << (A.IsSigned ? A.BitWidth-1 : A.BitWidth)) - 1;
1937  if (!A.Align) {
1938  Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
1939  } else {
1940  unsigned M = 1 << A.Align;
1941  Min *= M;
1942  Max *= M;
1943  Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
1944  SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
1945  }
1946  }
1947  return Error;
1948 }
1949 
1950 // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the
1951 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The
1952 // ordering for DSP is unspecified. MSA is ordered by the data format used
1953 // by the underlying instruction i.e., df/m, df/n and then by size.
1954 //
1955 // FIXME: The size tests here should instead be tablegen'd along with the
1956 // definitions from include/clang/Basic/BuiltinsMips.def.
1957 // FIXME: GCC is strict on signedness for some of these intrinsics, we should
1958 // be too.
1959 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
1960  unsigned i = 0, l = 0, u = 0, m = 0;
1961  switch (BuiltinID) {
1962  default: return false;
1963  case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
1964  case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
1965  case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
1966  case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
1967  case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
1968  case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
1969  case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
1970  // MSA instrinsics. Instructions (which the intrinsics maps to) which use the
1971  // df/m field.
1972  // These intrinsics take an unsigned 3 bit immediate.
1973  case Mips::BI__builtin_msa_bclri_b:
1974  case Mips::BI__builtin_msa_bnegi_b:
1975  case Mips::BI__builtin_msa_bseti_b:
1976  case Mips::BI__builtin_msa_sat_s_b:
1977  case Mips::BI__builtin_msa_sat_u_b:
1978  case Mips::BI__builtin_msa_slli_b:
1979  case Mips::BI__builtin_msa_srai_b:
1980  case Mips::BI__builtin_msa_srari_b:
1981  case Mips::BI__builtin_msa_srli_b:
1982  case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
1983  case Mips::BI__builtin_msa_binsli_b:
1984  case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
1985  // These intrinsics take an unsigned 4 bit immediate.
1986  case Mips::BI__builtin_msa_bclri_h:
1987  case Mips::BI__builtin_msa_bnegi_h:
1988  case Mips::BI__builtin_msa_bseti_h:
1989  case Mips::BI__builtin_msa_sat_s_h:
1990  case Mips::BI__builtin_msa_sat_u_h:
1991  case Mips::BI__builtin_msa_slli_h:
1992  case Mips::BI__builtin_msa_srai_h:
1993  case Mips::BI__builtin_msa_srari_h:
1994  case Mips::BI__builtin_msa_srli_h:
1995  case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
1996  case Mips::BI__builtin_msa_binsli_h:
1997  case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
1998  // These intrinsics take an unsigned 5 bit immediate.
1999  // The first block of intrinsics actually have an unsigned 5 bit field,
2000  // not a df/n field.
2001  case Mips::BI__builtin_msa_clei_u_b:
2002  case Mips::BI__builtin_msa_clei_u_h:
2003  case Mips::BI__builtin_msa_clei_u_w:
2004  case Mips::BI__builtin_msa_clei_u_d:
2005  case Mips::BI__builtin_msa_clti_u_b:
2006  case Mips::BI__builtin_msa_clti_u_h:
2007  case Mips::BI__builtin_msa_clti_u_w:
2008  case Mips::BI__builtin_msa_clti_u_d:
2009  case Mips::BI__builtin_msa_maxi_u_b:
2010  case Mips::BI__builtin_msa_maxi_u_h:
2011  case Mips::BI__builtin_msa_maxi_u_w:
2012  case Mips::BI__builtin_msa_maxi_u_d:
2013  case Mips::BI__builtin_msa_mini_u_b:
2014  case Mips::BI__builtin_msa_mini_u_h:
2015  case Mips::BI__builtin_msa_mini_u_w:
2016  case Mips::BI__builtin_msa_mini_u_d:
2017  case Mips::BI__builtin_msa_addvi_b:
2018  case Mips::BI__builtin_msa_addvi_h:
2019  case Mips::BI__builtin_msa_addvi_w:
2020  case Mips::BI__builtin_msa_addvi_d:
2021  case Mips::BI__builtin_msa_bclri_w:
2022  case Mips::BI__builtin_msa_bnegi_w:
2023  case Mips::BI__builtin_msa_bseti_w:
2024  case Mips::BI__builtin_msa_sat_s_w:
2025  case Mips::BI__builtin_msa_sat_u_w:
2026  case Mips::BI__builtin_msa_slli_w:
2027  case Mips::BI__builtin_msa_srai_w:
2028  case Mips::BI__builtin_msa_srari_w:
2029  case Mips::BI__builtin_msa_srli_w:
2030  case Mips::BI__builtin_msa_srlri_w:
2031  case Mips::BI__builtin_msa_subvi_b:
2032  case Mips::BI__builtin_msa_subvi_h:
2033  case Mips::BI__builtin_msa_subvi_w:
2034  case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
2035  case Mips::BI__builtin_msa_binsli_w:
2036  case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
2037  // These intrinsics take an unsigned 6 bit immediate.
2038  case Mips::BI__builtin_msa_bclri_d:
2039  case Mips::BI__builtin_msa_bnegi_d:
2040  case Mips::BI__builtin_msa_bseti_d:
2041  case Mips::BI__builtin_msa_sat_s_d:
2042  case Mips::BI__builtin_msa_sat_u_d:
2043  case Mips::BI__builtin_msa_slli_d:
2044  case Mips::BI__builtin_msa_srai_d:
2045  case Mips::BI__builtin_msa_srari_d:
2046  case Mips::BI__builtin_msa_srli_d:
2047  case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
2048  case Mips::BI__builtin_msa_binsli_d:
2049  case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
2050  // These intrinsics take a signed 5 bit immediate.
2051  case Mips::BI__builtin_msa_ceqi_b:
2052  case Mips::BI__builtin_msa_ceqi_h:
2053  case Mips::BI__builtin_msa_ceqi_w:
2054  case Mips::BI__builtin_msa_ceqi_d:
2055  case Mips::BI__builtin_msa_clti_s_b:
2056  case Mips::BI__builtin_msa_clti_s_h:
2057  case Mips::BI__builtin_msa_clti_s_w:
2058  case Mips::BI__builtin_msa_clti_s_d:
2059  case Mips::BI__builtin_msa_clei_s_b:
2060  case Mips::BI__builtin_msa_clei_s_h:
2061  case Mips::BI__builtin_msa_clei_s_w:
2062  case Mips::BI__builtin_msa_clei_s_d:
2063  case Mips::BI__builtin_msa_maxi_s_b:
2064  case Mips::BI__builtin_msa_maxi_s_h:
2065  case Mips::BI__builtin_msa_maxi_s_w:
2066  case Mips::BI__builtin_msa_maxi_s_d:
2067  case Mips::BI__builtin_msa_mini_s_b:
2068  case Mips::BI__builtin_msa_mini_s_h:
2069  case Mips::BI__builtin_msa_mini_s_w:
2070  case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
2071  // These intrinsics take an unsigned 8 bit immediate.
2072  case Mips::BI__builtin_msa_andi_b:
2073  case Mips::BI__builtin_msa_nori_b:
2074  case Mips::BI__builtin_msa_ori_b:
2075  case Mips::BI__builtin_msa_shf_b:
2076  case Mips::BI__builtin_msa_shf_h:
2077  case Mips::BI__builtin_msa_shf_w:
2078  case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
2079  case Mips::BI__builtin_msa_bseli_b:
2080  case Mips::BI__builtin_msa_bmnzi_b:
2081  case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
2082  // df/n format
2083  // These intrinsics take an unsigned 4 bit immediate.
2084  case Mips::BI__builtin_msa_copy_s_b:
2085  case Mips::BI__builtin_msa_copy_u_b:
2086  case Mips::BI__builtin_msa_insve_b:
2087  case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
2088  case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
2089  // These intrinsics take an unsigned 3 bit immediate.
2090  case Mips::BI__builtin_msa_copy_s_h:
2091  case Mips::BI__builtin_msa_copy_u_h:
2092  case Mips::BI__builtin_msa_insve_h:
2093  case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
2094  case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
2095  // These intrinsics take an unsigned 2 bit immediate.
2096  case Mips::BI__builtin_msa_copy_s_w:
2097  case Mips::BI__builtin_msa_copy_u_w:
2098  case Mips::BI__builtin_msa_insve_w:
2099  case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
2100  case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
2101  // These intrinsics take an unsigned 1 bit immediate.
2102  case Mips::BI__builtin_msa_copy_s_d:
2103  case Mips::BI__builtin_msa_copy_u_d:
2104  case Mips::BI__builtin_msa_insve_d:
2105  case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
2106  case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
2107  // Memory offsets and immediate loads.
2108  // These intrinsics take a signed 10 bit immediate.
2109  case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
2110  case Mips::BI__builtin_msa_ldi_h:
2111  case Mips::BI__builtin_msa_ldi_w:
2112  case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
2113  case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 16; break;
2114  case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 16; break;
2115  case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 16; break;
2116  case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 16; break;
2117  case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 16; break;
2118  case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 16; break;
2119  case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 16; break;
2120  case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 16; break;
2121  }
2122 
2123  if (!m)
2124  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
2125 
2126  return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
2127  SemaBuiltinConstantArgMultiple(TheCall, i, m);
2128 }
2129 
2130 bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2131  unsigned i = 0, l = 0, u = 0;
2132  bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde ||
2133  BuiltinID == PPC::BI__builtin_divdeu ||
2134  BuiltinID == PPC::BI__builtin_bpermd;
2135  bool IsTarget64Bit = Context.getTargetInfo()
2136  .getTypeWidth(Context
2137  .getTargetInfo()
2138  .getIntPtrType()) == 64;
2139  bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe ||
2140  BuiltinID == PPC::BI__builtin_divweu ||
2141  BuiltinID == PPC::BI__builtin_divde ||
2142  BuiltinID == PPC::BI__builtin_divdeu;
2143 
2144  if (Is64BitBltin && !IsTarget64Bit)
2145  return Diag(TheCall->getLocStart(), diag::err_64_bit_builtin_32_bit_tgt)
2146  << TheCall->getSourceRange();
2147 
2148  if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) ||
2149  (BuiltinID == PPC::BI__builtin_bpermd &&
2150  !Context.getTargetInfo().hasFeature("bpermd")))
2151  return Diag(TheCall->getLocStart(), diag::err_ppc_builtin_only_on_pwr7)
2152  << TheCall->getSourceRange();
2153 
2154  switch (BuiltinID) {
2155  default: return false;
2156  case PPC::BI__builtin_altivec_crypto_vshasigmaw:
2157  case PPC::BI__builtin_altivec_crypto_vshasigmad:
2158  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2159  SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
2160  case PPC::BI__builtin_tbegin:
2161  case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
2162  case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
2163  case PPC::BI__builtin_tabortwc:
2164  case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
2165  case PPC::BI__builtin_tabortwci:
2166  case PPC::BI__builtin_tabortdci:
2167  return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
2168  SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
2169  case PPC::BI__builtin_vsx_xxpermdi:
2170  case PPC::BI__builtin_vsx_xxsldwi:
2171  return SemaBuiltinVSX(TheCall);
2172  }
2173  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
2174 }
2175 
2176 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
2177  CallExpr *TheCall) {
2178  if (BuiltinID == SystemZ::BI__builtin_tabort) {
2179  Expr *Arg = TheCall->getArg(0);
2180  llvm::APSInt AbortCode(32);
2181  if (Arg->isIntegerConstantExpr(AbortCode, Context) &&
2182  AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256)
2183  return Diag(Arg->getLocStart(), diag::err_systemz_invalid_tabort_code)
2184  << Arg->getSourceRange();
2185  }
2186 
2187  // For intrinsics which take an immediate value as part of the instruction,
2188  // range check them here.
2189  unsigned i = 0, l = 0, u = 0;
2190  switch (BuiltinID) {
2191  default: return false;
2192  case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
2193  case SystemZ::BI__builtin_s390_verimb:
2194  case SystemZ::BI__builtin_s390_verimh:
2195  case SystemZ::BI__builtin_s390_verimf:
2196  case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
2197  case SystemZ::BI__builtin_s390_vfaeb:
2198  case SystemZ::BI__builtin_s390_vfaeh:
2199  case SystemZ::BI__builtin_s390_vfaef:
2200  case SystemZ::BI__builtin_s390_vfaebs:
2201  case SystemZ::BI__builtin_s390_vfaehs:
2202  case SystemZ::BI__builtin_s390_vfaefs:
2203  case SystemZ::BI__builtin_s390_vfaezb:
2204  case SystemZ::BI__builtin_s390_vfaezh:
2205  case SystemZ::BI__builtin_s390_vfaezf:
2206  case SystemZ::BI__builtin_s390_vfaezbs:
2207  case SystemZ::BI__builtin_s390_vfaezhs:
2208  case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
2209  case SystemZ::BI__builtin_s390_vfisb:
2210  case SystemZ::BI__builtin_s390_vfidb:
2211  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
2212  SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
2213  case SystemZ::BI__builtin_s390_vftcisb:
2214  case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
2215  case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
2216  case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
2217  case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
2218  case SystemZ::BI__builtin_s390_vstrcb:
2219  case SystemZ::BI__builtin_s390_vstrch:
2220  case SystemZ::BI__builtin_s390_vstrcf:
2221  case SystemZ::BI__builtin_s390_vstrczb:
2222  case SystemZ::BI__builtin_s390_vstrczh:
2223  case SystemZ::BI__builtin_s390_vstrczf:
2224  case SystemZ::BI__builtin_s390_vstrcbs:
2225  case SystemZ::BI__builtin_s390_vstrchs:
2226  case SystemZ::BI__builtin_s390_vstrcfs:
2227  case SystemZ::BI__builtin_s390_vstrczbs:
2228  case SystemZ::BI__builtin_s390_vstrczhs:
2229  case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
2230  case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
2231  case SystemZ::BI__builtin_s390_vfminsb:
2232  case SystemZ::BI__builtin_s390_vfmaxsb:
2233  case SystemZ::BI__builtin_s390_vfmindb:
2234  case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
2235  }
2236  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
2237 }
2238 
2239 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
2240 /// This checks that the target supports __builtin_cpu_supports and
2241 /// that the string argument is constant and valid.
2242 static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
2243  Expr *Arg = TheCall->getArg(0);
2244 
2245  // Check if the argument is a string literal.
2246  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
2247  return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
2248  << Arg->getSourceRange();
2249 
2250  // Check the contents of the string.
2251  StringRef Feature =
2252  cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
2253  if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
2254  return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports)
2255  << Arg->getSourceRange();
2256  return false;
2257 }
2258 
2259 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
2260 /// This checks that the target supports __builtin_cpu_is and
2261 /// that the string argument is constant and valid.
2262 static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
2263  Expr *Arg = TheCall->getArg(0);
2264 
2265  // Check if the argument is a string literal.
2266  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
2267  return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
2268  << Arg->getSourceRange();
2269 
2270  // Check the contents of the string.
2271  StringRef Feature =
2272  cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
2273  if (!S.Context.getTargetInfo().validateCpuIs(Feature))
2274  return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_is)
2275  << Arg->getSourceRange();
2276  return false;
2277 }
2278 
2279 // Check if the rounding mode is legal.
2280 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
2281  // Indicates if this instruction has rounding control or just SAE.
2282  bool HasRC = false;
2283 
2284  unsigned ArgNum = 0;
2285  switch (BuiltinID) {
2286  default:
2287  return false;
2288  case X86::BI__builtin_ia32_vcvttsd2si32:
2289  case X86::BI__builtin_ia32_vcvttsd2si64:
2290  case X86::BI__builtin_ia32_vcvttsd2usi32:
2291  case X86::BI__builtin_ia32_vcvttsd2usi64:
2292  case X86::BI__builtin_ia32_vcvttss2si32:
2293  case X86::BI__builtin_ia32_vcvttss2si64:
2294  case X86::BI__builtin_ia32_vcvttss2usi32:
2295  case X86::BI__builtin_ia32_vcvttss2usi64:
2296  ArgNum = 1;
2297  break;
2298  case X86::BI__builtin_ia32_cvtps2pd512_mask:
2299  case X86::BI__builtin_ia32_cvttpd2dq512_mask:
2300  case X86::BI__builtin_ia32_cvttpd2qq512_mask:
2301  case X86::BI__builtin_ia32_cvttpd2udq512_mask:
2302  case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
2303  case X86::BI__builtin_ia32_cvttps2dq512_mask:
2304  case X86::BI__builtin_ia32_cvttps2qq512_mask:
2305  case X86::BI__builtin_ia32_cvttps2udq512_mask:
2306  case X86::BI__builtin_ia32_cvttps2uqq512_mask:
2307  case X86::BI__builtin_ia32_exp2pd_mask:
2308  case X86::BI__builtin_ia32_exp2ps_mask:
2309  case X86::BI__builtin_ia32_getexppd512_mask:
2310  case X86::BI__builtin_ia32_getexpps512_mask:
2311  case X86::BI__builtin_ia32_rcp28pd_mask:
2312  case X86::BI__builtin_ia32_rcp28ps_mask:
2313  case X86::BI__builtin_ia32_rsqrt28pd_mask:
2314  case X86::BI__builtin_ia32_rsqrt28ps_mask:
2315  case X86::BI__builtin_ia32_vcomisd:
2316  case X86::BI__builtin_ia32_vcomiss:
2317  case X86::BI__builtin_ia32_vcvtph2ps512_mask:
2318  ArgNum = 3;
2319  break;
2320  case X86::BI__builtin_ia32_cmppd512_mask:
2321  case X86::BI__builtin_ia32_cmpps512_mask:
2322  case X86::BI__builtin_ia32_cmpsd_mask:
2323  case X86::BI__builtin_ia32_cmpss_mask:
2324  case X86::BI__builtin_ia32_cvtss2sd_round_mask:
2325  case X86::BI__builtin_ia32_getexpsd128_round_mask:
2326  case X86::BI__builtin_ia32_getexpss128_round_mask:
2327  case X86::BI__builtin_ia32_maxpd512_mask:
2328  case X86::BI__builtin_ia32_maxps512_mask:
2329  case X86::BI__builtin_ia32_maxsd_round_mask:
2330  case X86::BI__builtin_ia32_maxss_round_mask:
2331  case X86::BI__builtin_ia32_minpd512_mask:
2332  case X86::BI__builtin_ia32_minps512_mask:
2333  case X86::BI__builtin_ia32_minsd_round_mask:
2334  case X86::BI__builtin_ia32_minss_round_mask:
2335  case X86::BI__builtin_ia32_rcp28sd_round_mask:
2336  case X86::BI__builtin_ia32_rcp28ss_round_mask:
2337  case X86::BI__builtin_ia32_reducepd512_mask:
2338  case X86::BI__builtin_ia32_reduceps512_mask:
2339  case X86::BI__builtin_ia32_rndscalepd_mask:
2340  case X86::BI__builtin_ia32_rndscaleps_mask:
2341  case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
2342  case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
2343  ArgNum = 4;
2344  break;
2345  case X86::BI__builtin_ia32_fixupimmpd512_mask:
2346  case X86::BI__builtin_ia32_fixupimmpd512_maskz:
2347  case X86::BI__builtin_ia32_fixupimmps512_mask:
2348  case X86::BI__builtin_ia32_fixupimmps512_maskz:
2349  case X86::BI__builtin_ia32_fixupimmsd_mask:
2350  case X86::BI__builtin_ia32_fixupimmsd_maskz:
2351  case X86::BI__builtin_ia32_fixupimmss_mask:
2352  case X86::BI__builtin_ia32_fixupimmss_maskz:
2353  case X86::BI__builtin_ia32_rangepd512_mask:
2354  case X86::BI__builtin_ia32_rangeps512_mask:
2355  case X86::BI__builtin_ia32_rangesd128_round_mask:
2356  case X86::BI__builtin_ia32_rangess128_round_mask:
2357  case X86::BI__builtin_ia32_reducesd_mask:
2358  case X86::BI__builtin_ia32_reducess_mask:
2359  case X86::BI__builtin_ia32_rndscalesd_round_mask:
2360  case X86::BI__builtin_ia32_rndscaless_round_mask:
2361  ArgNum = 5;
2362  break;
2363  case X86::BI__builtin_ia32_vcvtsd2si64:
2364  case X86::BI__builtin_ia32_vcvtsd2si32:
2365  case X86::BI__builtin_ia32_vcvtsd2usi32:
2366  case X86::BI__builtin_ia32_vcvtsd2usi64:
2367  case X86::BI__builtin_ia32_vcvtss2si32:
2368  case X86::BI__builtin_ia32_vcvtss2si64:
2369  case X86::BI__builtin_ia32_vcvtss2usi32:
2370  case X86::BI__builtin_ia32_vcvtss2usi64:
2371  ArgNum = 1;
2372  HasRC = true;
2373  break;
2374  case X86::BI__builtin_ia32_addpd512:
2375  case X86::BI__builtin_ia32_addps512:
2376  case X86::BI__builtin_ia32_divpd512:
2377  case X86::BI__builtin_ia32_divps512:
2378  case X86::BI__builtin_ia32_mulpd512:
2379  case X86::BI__builtin_ia32_mulps512:
2380  case X86::BI__builtin_ia32_subpd512:
2381  case X86::BI__builtin_ia32_subps512:
2382  case X86::BI__builtin_ia32_cvtsi2sd64:
2383  case X86::BI__builtin_ia32_cvtsi2ss32:
2384  case X86::BI__builtin_ia32_cvtsi2ss64:
2385  case X86::BI__builtin_ia32_cvtusi2sd64:
2386  case X86::BI__builtin_ia32_cvtusi2ss32:
2387  case X86::BI__builtin_ia32_cvtusi2ss64:
2388  ArgNum = 2;
2389  HasRC = true;
2390  break;
2391  case X86::BI__builtin_ia32_cvtdq2ps512_mask:
2392  case X86::BI__builtin_ia32_cvtudq2ps512_mask:
2393  case X86::BI__builtin_ia32_cvtpd2ps512_mask:
2394  case X86::BI__builtin_ia32_cvtpd2qq512_mask:
2395  case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
2396  case X86::BI__builtin_ia32_cvtps2qq512_mask:
2397  case X86::BI__builtin_ia32_cvtps2uqq512_mask:
2398  case X86::BI__builtin_ia32_cvtqq2pd512_mask:
2399  case X86::BI__builtin_ia32_cvtqq2ps512_mask:
2400  case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
2401  case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
2402  case X86::BI__builtin_ia32_sqrtpd512_mask:
2403  case X86::BI__builtin_ia32_sqrtps512_mask:
2404  ArgNum = 3;
2405  HasRC = true;
2406  break;
2407  case X86::BI__builtin_ia32_addss_round_mask:
2408  case X86::BI__builtin_ia32_addsd_round_mask:
2409  case X86::BI__builtin_ia32_divss_round_mask:
2410  case X86::BI__builtin_ia32_divsd_round_mask:
2411  case X86::BI__builtin_ia32_mulss_round_mask:
2412  case X86::BI__builtin_ia32_mulsd_round_mask:
2413  case X86::BI__builtin_ia32_subss_round_mask:
2414  case X86::BI__builtin_ia32_subsd_round_mask:
2415  case X86::BI__builtin_ia32_scalefpd512_mask:
2416  case X86::BI__builtin_ia32_scalefps512_mask:
2417  case X86::BI__builtin_ia32_scalefsd_round_mask:
2418  case X86::BI__builtin_ia32_scalefss_round_mask:
2419  case X86::BI__builtin_ia32_getmantpd512_mask:
2420  case X86::BI__builtin_ia32_getmantps512_mask:
2421  case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
2422  case X86::BI__builtin_ia32_sqrtsd_round_mask:
2423  case X86::BI__builtin_ia32_sqrtss_round_mask:
2424  case X86::BI__builtin_ia32_vfmaddsd3_mask:
2425  case X86::BI__builtin_ia32_vfmaddsd3_maskz:
2426  case X86::BI__builtin_ia32_vfmaddsd3_mask3:
2427  case X86::BI__builtin_ia32_vfmaddss3_mask:
2428  case X86::BI__builtin_ia32_vfmaddss3_maskz:
2429  case X86::BI__builtin_ia32_vfmaddss3_mask3:
2430  case X86::BI__builtin_ia32_vfmaddpd512_mask:
2431  case X86::BI__builtin_ia32_vfmaddpd512_maskz:
2432  case X86::BI__builtin_ia32_vfmaddpd512_mask3:
2433  case X86::BI__builtin_ia32_vfmsubpd512_mask3:
2434  case X86::BI__builtin_ia32_vfmaddps512_mask:
2435  case X86::BI__builtin_ia32_vfmaddps512_maskz:
2436  case X86::BI__builtin_ia32_vfmaddps512_mask3:
2437  case X86::BI__builtin_ia32_vfmsubps512_mask3:
2438  case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
2439  case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
2440  case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
2441  case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
2442  case X86::BI__builtin_ia32_vfmaddsubps512_mask:
2443  case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
2444  case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
2445  case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
2446  ArgNum = 4;
2447  HasRC = true;
2448  break;
2449  case X86::BI__builtin_ia32_getmantsd_round_mask:
2450  case X86::BI__builtin_ia32_getmantss_round_mask:
2451  ArgNum = 5;
2452  HasRC = true;
2453  break;
2454  }
2455 
2456  llvm::APSInt Result;
2457 
2458  // We can't check the value of a dependent argument.
2459  Expr *Arg = TheCall->getArg(ArgNum);
2460  if (Arg->isTypeDependent() || Arg->isValueDependent())
2461  return false;
2462 
2463  // Check constant-ness first.
2464  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
2465  return true;
2466 
2467  // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
2468  // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
2469  // combined with ROUND_NO_EXC.
2470  if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
2471  Result == 8/*ROUND_NO_EXC*/ ||
2472  (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
2473  return false;
2474 
2475  return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_rounding)
2476  << Arg->getSourceRange();
2477 }
2478 
2479 // Check if the gather/scatter scale is legal.
2480 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
2481  CallExpr *TheCall) {
2482  unsigned ArgNum = 0;
2483  switch (BuiltinID) {
2484  default:
2485  return false;
2486  case X86::BI__builtin_ia32_gatherpfdpd:
2487  case X86::BI__builtin_ia32_gatherpfdps:
2488  case X86::BI__builtin_ia32_gatherpfqpd:
2489  case X86::BI__builtin_ia32_gatherpfqps:
2490  case X86::BI__builtin_ia32_scatterpfdpd:
2491  case X86::BI__builtin_ia32_scatterpfdps:
2492  case X86::BI__builtin_ia32_scatterpfqpd:
2493  case X86::BI__builtin_ia32_scatterpfqps:
2494  ArgNum = 3;
2495  break;
2496  case X86::BI__builtin_ia32_gatherd_pd:
2497  case X86::BI__builtin_ia32_gatherd_pd256:
2498  case X86::BI__builtin_ia32_gatherq_pd:
2499  case X86::BI__builtin_ia32_gatherq_pd256:
2500  case X86::BI__builtin_ia32_gatherd_ps:
2501  case X86::BI__builtin_ia32_gatherd_ps256:
2502  case X86::BI__builtin_ia32_gatherq_ps:
2503  case X86::BI__builtin_ia32_gatherq_ps256:
2504  case X86::BI__builtin_ia32_gatherd_q:
2505  case X86::BI__builtin_ia32_gatherd_q256:
2506  case X86::BI__builtin_ia32_gatherq_q:
2507  case X86::BI__builtin_ia32_gatherq_q256:
2508  case X86::BI__builtin_ia32_gatherd_d:
2509  case X86::BI__builtin_ia32_gatherd_d256:
2510  case X86::BI__builtin_ia32_gatherq_d:
2511  case X86::BI__builtin_ia32_gatherq_d256:
2512  case X86::BI__builtin_ia32_gather3div2df:
2513  case X86::BI__builtin_ia32_gather3div2di:
2514  case X86::BI__builtin_ia32_gather3div4df:
2515  case X86::BI__builtin_ia32_gather3div4di:
2516  case X86::BI__builtin_ia32_gather3div4sf:
2517  case X86::BI__builtin_ia32_gather3div4si:
2518  case X86::BI__builtin_ia32_gather3div8sf:
2519  case X86::BI__builtin_ia32_gather3div8si:
2520  case X86::BI__builtin_ia32_gather3siv2df:
2521  case X86::BI__builtin_ia32_gather3siv2di:
2522  case X86::BI__builtin_ia32_gather3siv4df:
2523  case X86::BI__builtin_ia32_gather3siv4di:
2524  case X86::BI__builtin_ia32_gather3siv4sf:
2525  case X86::BI__builtin_ia32_gather3siv4si:
2526  case X86::BI__builtin_ia32_gather3siv8sf:
2527  case X86::BI__builtin_ia32_gather3siv8si:
2528  case X86::BI__builtin_ia32_gathersiv8df:
2529  case X86::BI__builtin_ia32_gathersiv16sf:
2530  case X86::BI__builtin_ia32_gatherdiv8df:
2531  case X86::BI__builtin_ia32_gatherdiv16sf:
2532  case X86::BI__builtin_ia32_gathersiv8di:
2533  case X86::BI__builtin_ia32_gathersiv16si:
2534  case X86::BI__builtin_ia32_gatherdiv8di:
2535  case X86::BI__builtin_ia32_gatherdiv16si:
2536  case X86::BI__builtin_ia32_scatterdiv2df:
2537  case X86::BI__builtin_ia32_scatterdiv2di:
2538  case X86::BI__builtin_ia32_scatterdiv4df:
2539  case X86::BI__builtin_ia32_scatterdiv4di:
2540  case X86::BI__builtin_ia32_scatterdiv4sf:
2541  case X86::BI__builtin_ia32_scatterdiv4si:
2542  case X86::BI__builtin_ia32_scatterdiv8sf:
2543  case X86::BI__builtin_ia32_scatterdiv8si:
2544  case X86::BI__builtin_ia32_scattersiv2df:
2545  case X86::BI__builtin_ia32_scattersiv2di:
2546  case X86::BI__builtin_ia32_scattersiv4df:
2547  case X86::BI__builtin_ia32_scattersiv4di:
2548  case X86::BI__builtin_ia32_scattersiv4sf:
2549  case X86::BI__builtin_ia32_scattersiv4si:
2550  case X86::BI__builtin_ia32_scattersiv8sf:
2551  case X86::BI__builtin_ia32_scattersiv8si:
2552  case X86::BI__builtin_ia32_scattersiv8df:
2553  case X86::BI__builtin_ia32_scattersiv16sf:
2554  case X86::BI__builtin_ia32_scatterdiv8df:
2555  case X86::BI__builtin_ia32_scatterdiv16sf:
2556  case X86::BI__builtin_ia32_scattersiv8di:
2557  case X86::BI__builtin_ia32_scattersiv16si:
2558  case X86::BI__builtin_ia32_scatterdiv8di:
2559  case X86::BI__builtin_ia32_scatterdiv16si:
2560  ArgNum = 4;
2561  break;
2562  }
2563 
2564  llvm::APSInt Result;
2565 
2566  // We can't check the value of a dependent argument.
2567  Expr *Arg = TheCall->getArg(ArgNum);
2568  if (Arg->isTypeDependent() || Arg->isValueDependent())
2569  return false;
2570 
2571  // Check constant-ness first.
2572  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
2573  return true;
2574 
2575  if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
2576  return false;
2577 
2578  return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_scale)
2579  << Arg->getSourceRange();
2580 }
2581 
2582 static bool isX86_32Builtin(unsigned BuiltinID) {
2583  // These builtins only work on x86-32 targets.
2584  switch (BuiltinID) {
2585  case X86::BI__builtin_ia32_readeflags_u32:
2586  case X86::BI__builtin_ia32_writeeflags_u32:
2587  return true;
2588  }
2589 
2590  return false;
2591 }
2592 
2593 bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2594  if (BuiltinID == X86::BI__builtin_cpu_supports)
2595  return SemaBuiltinCpuSupports(*this, TheCall);
2596 
2597  if (BuiltinID == X86::BI__builtin_cpu_is)
2598  return SemaBuiltinCpuIs(*this, TheCall);
2599 
2600  // Check for 32-bit only builtins on a 64-bit target.
2601  const llvm::Triple &TT = Context.getTargetInfo().getTriple();
2602  if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
2603  return Diag(TheCall->getCallee()->getLocStart(),
2604  diag::err_32_bit_builtin_64_bit_tgt);
2605 
2606  // If the intrinsic has rounding or SAE make sure its valid.
2607  if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
2608  return true;
2609 
2610  // If the intrinsic has a gather/scatter scale immediate make sure its valid.
2611  if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
2612  return true;
2613 
2614  // For intrinsics which take an immediate value as part of the instruction,
2615  // range check them here.
2616  int i = 0, l = 0, u = 0;
2617  switch (BuiltinID) {
2618  default:
2619  return false;
2620  case X86::BI__builtin_ia32_vec_ext_v2si:
2621  case X86::BI__builtin_ia32_vec_ext_v2di:
2622  case X86::BI__builtin_ia32_vextractf128_pd256:
2623  case X86::BI__builtin_ia32_vextractf128_ps256:
2624  case X86::BI__builtin_ia32_vextractf128_si256:
2625  case X86::BI__builtin_ia32_extract128i256:
2626  case X86::BI__builtin_ia32_extractf64x4_mask:
2627  case X86::BI__builtin_ia32_extracti64x4_mask:
2628  case X86::BI__builtin_ia32_extractf32x8_mask:
2629  case X86::BI__builtin_ia32_extracti32x8_mask:
2630  case X86::BI__builtin_ia32_extractf64x2_256_mask:
2631  case X86::BI__builtin_ia32_extracti64x2_256_mask:
2632  case X86::BI__builtin_ia32_extractf32x4_256_mask:
2633  case X86::BI__builtin_ia32_extracti32x4_256_mask:
2634  i = 1; l = 0; u = 1;
2635  break;
2636  case X86::BI__builtin_ia32_vec_set_v2di:
2637  case X86::BI__builtin_ia32_vinsertf128_pd256:
2638  case X86::BI__builtin_ia32_vinsertf128_ps256:
2639  case X86::BI__builtin_ia32_vinsertf128_si256:
2640  case X86::BI__builtin_ia32_insert128i256:
2641  case X86::BI__builtin_ia32_insertf32x8:
2642  case X86::BI__builtin_ia32_inserti32x8:
2643  case X86::BI__builtin_ia32_insertf64x4:
2644  case X86::BI__builtin_ia32_inserti64x4:
2645  case X86::BI__builtin_ia32_insertf64x2_256:
2646  case X86::BI__builtin_ia32_inserti64x2_256:
2647  case X86::BI__builtin_ia32_insertf32x4_256:
2648  case X86::BI__builtin_ia32_inserti32x4_256:
2649  i = 2; l = 0; u = 1;
2650  break;
2651  case X86::BI__builtin_ia32_vpermilpd:
2652  case X86::BI__builtin_ia32_vec_ext_v4hi:
2653  case X86::BI__builtin_ia32_vec_ext_v4si:
2654  case X86::BI__builtin_ia32_vec_ext_v4sf:
2655  case X86::BI__builtin_ia32_vec_ext_v4di:
2656  case X86::BI__builtin_ia32_extractf32x4_mask:
2657  case X86::BI__builtin_ia32_extracti32x4_mask:
2658  case X86::BI__builtin_ia32_extractf64x2_512_mask:
2659  case X86::BI__builtin_ia32_extracti64x2_512_mask:
2660  i = 1; l = 0; u = 3;
2661  break;
2662  case X86::BI_mm_prefetch:
2663  case X86::BI__builtin_ia32_vec_ext_v8hi:
2664  case X86::BI__builtin_ia32_vec_ext_v8si:
2665  i = 1; l = 0; u = 7;
2666  break;
2667  case X86::BI__builtin_ia32_sha1rnds4:
2668  case X86::BI__builtin_ia32_blendpd:
2669  case X86::BI__builtin_ia32_shufpd:
2670  case X86::BI__builtin_ia32_vec_set_v4hi:
2671  case X86::BI__builtin_ia32_vec_set_v4si:
2672  case X86::BI__builtin_ia32_vec_set_v4di:
2673  case X86::BI__builtin_ia32_shuf_f32x4_256:
2674  case X86::BI__builtin_ia32_shuf_f64x2_256:
2675  case X86::BI__builtin_ia32_shuf_i32x4_256:
2676  case X86::BI__builtin_ia32_shuf_i64x2_256:
2677  case X86::BI__builtin_ia32_insertf64x2_512:
2678  case X86::BI__builtin_ia32_inserti64x2_512:
2679  case X86::BI__builtin_ia32_insertf32x4:
2680  case X86::BI__builtin_ia32_inserti32x4:
2681  i = 2; l = 0; u = 3;
2682  break;
2683  case X86::BI__builtin_ia32_vpermil2pd:
2684  case X86::BI__builtin_ia32_vpermil2pd256:
2685  case X86::BI__builtin_ia32_vpermil2ps:
2686  case X86::BI__builtin_ia32_vpermil2ps256:
2687  i = 3; l = 0; u = 3;
2688  break;
2689  case X86::BI__builtin_ia32_cmpb128_mask:
2690  case X86::BI__builtin_ia32_cmpw128_mask:
2691  case X86::BI__builtin_ia32_cmpd128_mask:
2692  case X86::BI__builtin_ia32_cmpq128_mask:
2693  case X86::BI__builtin_ia32_cmpb256_mask:
2694  case X86::BI__builtin_ia32_cmpw256_mask:
2695  case X86::BI__builtin_ia32_cmpd256_mask:
2696  case X86::BI__builtin_ia32_cmpq256_mask:
2697  case X86::BI__builtin_ia32_cmpb512_mask:
2698  case X86::BI__builtin_ia32_cmpw512_mask:
2699  case X86::BI__builtin_ia32_cmpd512_mask:
2700  case X86::BI__builtin_ia32_cmpq512_mask:
2701  case X86::BI__builtin_ia32_ucmpb128_mask:
2702  case X86::BI__builtin_ia32_ucmpw128_mask:
2703  case X86::BI__builtin_ia32_ucmpd128_mask:
2704  case X86::BI__builtin_ia32_ucmpq128_mask:
2705  case X86::BI__builtin_ia32_ucmpb256_mask:
2706  case X86::BI__builtin_ia32_ucmpw256_mask:
2707  case X86::BI__builtin_ia32_ucmpd256_mask:
2708  case X86::BI__builtin_ia32_ucmpq256_mask:
2709  case X86::BI__builtin_ia32_ucmpb512_mask:
2710  case X86::BI__builtin_ia32_ucmpw512_mask:
2711  case X86::BI__builtin_ia32_ucmpd512_mask:
2712  case X86::BI__builtin_ia32_ucmpq512_mask:
2713  case X86::BI__builtin_ia32_vpcomub:
2714  case X86::BI__builtin_ia32_vpcomuw:
2715  case X86::BI__builtin_ia32_vpcomud:
2716  case X86::BI__builtin_ia32_vpcomuq:
2717  case X86::BI__builtin_ia32_vpcomb:
2718  case X86::BI__builtin_ia32_vpcomw:
2719  case X86::BI__builtin_ia32_vpcomd:
2720  case X86::BI__builtin_ia32_vpcomq:
2721  case X86::BI__builtin_ia32_vec_set_v8hi:
2722  case X86::BI__builtin_ia32_vec_set_v8si:
2723  i = 2; l = 0; u = 7;
2724  break;
2725  case X86::BI__builtin_ia32_vpermilpd256:
2726  case X86::BI__builtin_ia32_roundps:
2727  case X86::BI__builtin_ia32_roundpd:
2728  case X86::BI__builtin_ia32_roundps256:
2729  case X86::BI__builtin_ia32_roundpd256:
2730  case X86::BI__builtin_ia32_getmantpd128_mask:
2731  case X86::BI__builtin_ia32_getmantpd256_mask:
2732  case X86::BI__builtin_ia32_getmantps128_mask:
2733  case X86::BI__builtin_ia32_getmantps256_mask:
2734  case X86::BI__builtin_ia32_getmantpd512_mask:
2735  case X86::BI__builtin_ia32_getmantps512_mask:
2736  case X86::BI__builtin_ia32_vec_ext_v16qi:
2737  case X86::BI__builtin_ia32_vec_ext_v16hi:
2738  i = 1; l = 0; u = 15;
2739  break;
2740  case X86::BI__builtin_ia32_pblendd128:
2741  case X86::BI__builtin_ia32_blendps:
2742  case X86::BI__builtin_ia32_blendpd256:
2743  case X86::BI__builtin_ia32_shufpd256:
2744  case X86::BI__builtin_ia32_roundss:
2745  case X86::BI__builtin_ia32_roundsd:
2746  case X86::BI__builtin_ia32_rangepd128_mask:
2747  case X86::BI__builtin_ia32_rangepd256_mask:
2748  case X86::BI__builtin_ia32_rangepd512_mask:
2749  case X86::BI__builtin_ia32_rangeps128_mask:
2750  case X86::BI__builtin_ia32_rangeps256_mask:
2751  case X86::BI__builtin_ia32_rangeps512_mask:
2752  case X86::BI__builtin_ia32_getmantsd_round_mask:
2753  case X86::BI__builtin_ia32_getmantss_round_mask:
2754  case X86::BI__builtin_ia32_vec_set_v16qi:
2755  case X86::BI__builtin_ia32_vec_set_v16hi:
2756  i = 2; l = 0; u = 15;
2757  break;
2758  case X86::BI__builtin_ia32_vec_ext_v32qi:
2759  i = 1; l = 0; u = 31;
2760  break;
2761  case X86::BI__builtin_ia32_cmpps:
2762  case X86::BI__builtin_ia32_cmpss:
2763  case X86::BI__builtin_ia32_cmppd:
2764  case X86::BI__builtin_ia32_cmpsd:
2765  case X86::BI__builtin_ia32_cmpps256:
2766  case X86::BI__builtin_ia32_cmppd256:
2767  case X86::BI__builtin_ia32_cmpps128_mask:
2768  case X86::BI__builtin_ia32_cmppd128_mask:
2769  case X86::BI__builtin_ia32_cmpps256_mask:
2770  case X86::BI__builtin_ia32_cmppd256_mask:
2771  case X86::BI__builtin_ia32_cmpps512_mask:
2772  case X86::BI__builtin_ia32_cmppd512_mask:
2773  case X86::BI__builtin_ia32_cmpsd_mask:
2774  case X86::BI__builtin_ia32_cmpss_mask:
2775  case X86::BI__builtin_ia32_vec_set_v32qi:
2776  i = 2; l = 0; u = 31;
2777  break;
2778  case X86::BI__builtin_ia32_permdf256:
2779  case X86::BI__builtin_ia32_permdi256:
2780  case X86::BI__builtin_ia32_permdf512:
2781  case X86::BI__builtin_ia32_permdi512:
2782  case X86::BI__builtin_ia32_vpermilps:
2783  case X86::BI__builtin_ia32_vpermilps256:
2784  case X86::BI__builtin_ia32_vpermilpd512:
2785  case X86::BI__builtin_ia32_vpermilps512:
2786  case X86::BI__builtin_ia32_pshufd:
2787  case X86::BI__builtin_ia32_pshufd256:
2788  case X86::BI__builtin_ia32_pshufd512:
2789  case X86::BI__builtin_ia32_pshufhw:
2790  case X86::BI__builtin_ia32_pshufhw256:
2791  case X86::BI__builtin_ia32_pshufhw512:
2792  case X86::BI__builtin_ia32_pshuflw:
2793  case X86::BI__builtin_ia32_pshuflw256:
2794  case X86::BI__builtin_ia32_pshuflw512:
2795  case X86::BI__builtin_ia32_vcvtps2ph:
2796  case X86::BI__builtin_ia32_vcvtps2ph_mask:
2797  case X86::BI__builtin_ia32_vcvtps2ph256:
2798  case X86::BI__builtin_ia32_vcvtps2ph256_mask:
2799  case X86::BI__builtin_ia32_vcvtps2ph512_mask:
2800  case X86::BI__builtin_ia32_rndscaleps_128_mask:
2801  case X86::BI__builtin_ia32_rndscalepd_128_mask:
2802  case X86::BI__builtin_ia32_rndscaleps_256_mask:
2803  case X86::BI__builtin_ia32_rndscalepd_256_mask:
2804  case X86::BI__builtin_ia32_rndscaleps_mask:
2805  case X86::BI__builtin_ia32_rndscalepd_mask:
2806  case X86::BI__builtin_ia32_reducepd128_mask:
2807  case X86::BI__builtin_ia32_reducepd256_mask:
2808  case X86::BI__builtin_ia32_reducepd512_mask:
2809  case X86::BI__builtin_ia32_reduceps128_mask:
2810  case X86::BI__builtin_ia32_reduceps256_mask:
2811  case X86::BI__builtin_ia32_reduceps512_mask:
2812  case X86::BI__builtin_ia32_prold512_mask:
2813  case X86::BI__builtin_ia32_prolq512_mask:
2814  case X86::BI__builtin_ia32_prold128_mask:
2815  case X86::BI__builtin_ia32_prold256_mask:
2816  case X86::BI__builtin_ia32_prolq128_mask:
2817  case X86::BI__builtin_ia32_prolq256_mask:
2818  case X86::BI__builtin_ia32_prord512_mask:
2819  case X86::BI__builtin_ia32_prorq512_mask:
2820  case X86::BI__builtin_ia32_prord128_mask:
2821  case X86::BI__builtin_ia32_prord256_mask:
2822  case X86::BI__builtin_ia32_prorq128_mask:
2823  case X86::BI__builtin_ia32_prorq256_mask:
2824  case X86::BI__builtin_ia32_fpclasspd128_mask:
2825  case X86::BI__builtin_ia32_fpclasspd256_mask:
2826  case X86::BI__builtin_ia32_fpclassps128_mask:
2827  case X86::BI__builtin_ia32_fpclassps256_mask:
2828  case X86::BI__builtin_ia32_fpclassps512_mask:
2829  case X86::BI__builtin_ia32_fpclasspd512_mask:
2830  case X86::BI__builtin_ia32_fpclasssd_mask:
2831  case X86::BI__builtin_ia32_fpclassss_mask:
2832  case X86::BI__builtin_ia32_pslldqi128_byteshift:
2833  case X86::BI__builtin_ia32_pslldqi256_byteshift:
2834  case X86::BI__builtin_ia32_pslldqi512_byteshift:
2835  case X86::BI__builtin_ia32_psrldqi128_byteshift:
2836  case X86::BI__builtin_ia32_psrldqi256_byteshift:
2837  case X86::BI__builtin_ia32_psrldqi512_byteshift:
2838  i = 1; l = 0; u = 255;
2839  break;
2840  case X86::BI__builtin_ia32_vperm2f128_pd256:
2841  case X86::BI__builtin_ia32_vperm2f128_ps256:
2842  case X86::BI__builtin_ia32_vperm2f128_si256:
2843  case X86::BI__builtin_ia32_permti256:
2844  case X86::BI__builtin_ia32_pblendw128:
2845  case X86::BI__builtin_ia32_pblendw256:
2846  case X86::BI__builtin_ia32_blendps256:
2847  case X86::BI__builtin_ia32_pblendd256:
2848  case X86::BI__builtin_ia32_palignr128:
2849  case X86::BI__builtin_ia32_palignr256:
2850  case X86::BI__builtin_ia32_palignr512:
2851  case X86::BI__builtin_ia32_alignq512:
2852  case X86::BI__builtin_ia32_alignd512:
2853  case X86::BI__builtin_ia32_alignd128:
2854  case X86::BI__builtin_ia32_alignd256:
2855  case X86::BI__builtin_ia32_alignq128:
2856  case X86::BI__builtin_ia32_alignq256:
2857  case X86::BI__builtin_ia32_vcomisd:
2858  case X86::BI__builtin_ia32_vcomiss:
2859  case X86::BI__builtin_ia32_shuf_f32x4:
2860  case X86::BI__builtin_ia32_shuf_f64x2:
2861  case X86::BI__builtin_ia32_shuf_i32x4:
2862  case X86::BI__builtin_ia32_shuf_i64x2:
2863  case X86::BI__builtin_ia32_shufpd512:
2864  case X86::BI__builtin_ia32_shufps:
2865  case X86::BI__builtin_ia32_shufps256:
2866  case X86::BI__builtin_ia32_shufps512:
2867  case X86::BI__builtin_ia32_dbpsadbw128:
2868  case X86::BI__builtin_ia32_dbpsadbw256:
2869  case X86::BI__builtin_ia32_dbpsadbw512:
2870  case X86::BI__builtin_ia32_vpshldd128:
2871  case X86::BI__builtin_ia32_vpshldd256:
2872  case X86::BI__builtin_ia32_vpshldd512:
2873  case X86::BI__builtin_ia32_vpshldq128:
2874  case X86::BI__builtin_ia32_vpshldq256:
2875  case X86::BI__builtin_ia32_vpshldq512:
2876  case X86::BI__builtin_ia32_vpshldw128:
2877  case X86::BI__builtin_ia32_vpshldw256:
2878  case X86::BI__builtin_ia32_vpshldw512:
2879  case X86::BI__builtin_ia32_vpshrdd128:
2880  case X86::BI__builtin_ia32_vpshrdd256:
2881  case X86::BI__builtin_ia32_vpshrdd512:
2882  case X86::BI__builtin_ia32_vpshrdq128:
2883  case X86::BI__builtin_ia32_vpshrdq256:
2884  case X86::BI__builtin_ia32_vpshrdq512:
2885  case X86::BI__builtin_ia32_vpshrdw128:
2886  case X86::BI__builtin_ia32_vpshrdw256:
2887  case X86::BI__builtin_ia32_vpshrdw512:
2888  i = 2; l = 0; u = 255;
2889  break;
2890  case X86::BI__builtin_ia32_fixupimmpd512_mask:
2891  case X86::BI__builtin_ia32_fixupimmpd512_maskz:
2892  case X86::BI__builtin_ia32_fixupimmps512_mask:
2893  case X86::BI__builtin_ia32_fixupimmps512_maskz:
2894  case X86::BI__builtin_ia32_fixupimmsd_mask:
2895  case X86::BI__builtin_ia32_fixupimmsd_maskz:
2896  case X86::BI__builtin_ia32_fixupimmss_mask:
2897  case X86::BI__builtin_ia32_fixupimmss_maskz:
2898  case X86::BI__builtin_ia32_fixupimmpd128_mask:
2899  case X86::BI__builtin_ia32_fixupimmpd128_maskz:
2900  case X86::BI__builtin_ia32_fixupimmpd256_mask:
2901  case X86::BI__builtin_ia32_fixupimmpd256_maskz:
2902  case X86::BI__builtin_ia32_fixupimmps128_mask:
2903  case X86::BI__builtin_ia32_fixupimmps128_maskz:
2904  case X86::BI__builtin_ia32_fixupimmps256_mask:
2905  case X86::BI__builtin_ia32_fixupimmps256_maskz:
2906  case X86::BI__builtin_ia32_pternlogd512_mask:
2907  case X86::BI__builtin_ia32_pternlogd512_maskz:
2908  case X86::BI__builtin_ia32_pternlogq512_mask:
2909  case X86::BI__builtin_ia32_pternlogq512_maskz:
2910  case X86::BI__builtin_ia32_pternlogd128_mask:
2911  case X86::BI__builtin_ia32_pternlogd128_maskz:
2912  case X86::BI__builtin_ia32_pternlogd256_mask:
2913  case X86::BI__builtin_ia32_pternlogd256_maskz:
2914  case X86::BI__builtin_ia32_pternlogq128_mask:
2915  case X86::BI__builtin_ia32_pternlogq128_maskz:
2916  case X86::BI__builtin_ia32_pternlogq256_mask:
2917  case X86::BI__builtin_ia32_pternlogq256_maskz:
2918  i = 3; l = 0; u = 255;
2919  break;
2920  case X86::BI__builtin_ia32_gatherpfdpd:
2921  case X86::BI__builtin_ia32_gatherpfdps:
2922  case X86::BI__builtin_ia32_gatherpfqpd:
2923  case X86::BI__builtin_ia32_gatherpfqps:
2924  case X86::BI__builtin_ia32_scatterpfdpd:
2925  case X86::BI__builtin_ia32_scatterpfdps:
2926  case X86::BI__builtin_ia32_scatterpfqpd:
2927  case X86::BI__builtin_ia32_scatterpfqps:
2928  i = 4; l = 2; u = 3;
2929  break;
2930  case X86::BI__builtin_ia32_rndscalesd_round_mask:
2931  case X86::BI__builtin_ia32_rndscaless_round_mask:
2932  i = 4; l = 0; u = 255;
2933  break;
2934  }
2935  return SemaBuiltinConstantArgRange(TheCall, i, l, u);
2936 }
2937 
2938 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
2939 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
2940 /// Returns true when the format fits the function and the FormatStringInfo has
2941 /// been populated.
2942 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
2943  FormatStringInfo *FSI) {
2944  FSI->HasVAListArg = Format->getFirstArg() == 0;
2945  FSI->FormatIdx = Format->getFormatIdx() - 1;
2946  FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
2947 
2948  // The way the format attribute works in GCC, the implicit this argument
2949  // of member functions is counted. However, it doesn't appear in our own
2950  // lists, so decrement format_idx in that case.
2951  if (IsCXXMember) {
2952  if(FSI->FormatIdx == 0)
2953  return false;
2954  --FSI->FormatIdx;
2955  if (FSI->FirstDataArg != 0)
2956  --FSI->FirstDataArg;
2957  }
2958  return true;
2959 }
2960 
2961 /// Checks if a the given expression evaluates to null.
2962 ///
2963 /// Returns true if the value evaluates to null.
2964 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
2965  // If the expression has non-null type, it doesn't evaluate to null.
2966  if (auto nullability
2967  = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
2968  if (*nullability == NullabilityKind::NonNull)
2969  return false;
2970  }
2971 
2972  // As a special case, transparent unions initialized with zero are
2973  // considered null for the purposes of the nonnull attribute.
2974  if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
2975  if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
2976  if (const CompoundLiteralExpr *CLE =
2977  dyn_cast<CompoundLiteralExpr>(Expr))
2978  if (const InitListExpr *ILE =
2979  dyn_cast<InitListExpr>(CLE->getInitializer()))
2980  Expr = ILE->getInit(0);
2981  }
2982 
2983  bool Result;
2984  return (!Expr->isValueDependent() &&
2985  Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
2986  !Result);
2987 }
2988 
2990  const Expr *ArgExpr,
2991  SourceLocation CallSiteLoc) {
2992  if (CheckNonNullExpr(S, ArgExpr))
2993  S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
2994  S.PDiag(diag::warn_null_arg) << ArgExpr->getSourceRange());
2995 }
2996 
2997 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
2998  FormatStringInfo FSI;
2999  if ((GetFormatStringType(Format) == FST_NSString) &&
3000  getFormatStringInfo(Format, false, &FSI)) {
3001  Idx = FSI.FormatIdx;
3002  return true;
3003  }
3004  return false;
3005 }
3006 
3007 /// Diagnose use of %s directive in an NSString which is being passed
3008 /// as formatting string to formatting method.
3009 static void
3011  const NamedDecl *FDecl,
3012  Expr **Args,
3013  unsigned NumArgs) {
3014  unsigned Idx = 0;
3015  bool Format = false;
3017  if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
3018  Idx = 2;
3019  Format = true;
3020  }
3021  else
3022  for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
3023  if (S.GetFormatNSStringIdx(I, Idx)) {
3024  Format = true;
3025  break;
3026  }
3027  }
3028  if (!Format || NumArgs <= Idx)
3029  return;
3030  const Expr *FormatExpr = Args[Idx];
3031  if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
3032  FormatExpr = CSCE->getSubExpr();
3033  const StringLiteral *FormatString;
3034  if (const ObjCStringLiteral *OSL =
3035  dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
3036  FormatString = OSL->getString();
3037  else
3038  FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
3039  if (!FormatString)
3040  return;
3041  if (S.FormatStringHasSArg(FormatString)) {
3042  S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
3043  << "%s" << 1 << 1;
3044  S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
3045  << FDecl->getDeclName();
3046  }
3047 }
3048 
3049 /// Determine whether the given type has a non-null nullability annotation.
3051  if (auto nullability = type->getNullability(ctx))
3052  return *nullability == NullabilityKind::NonNull;
3053 
3054  return false;
3055 }
3056 
3058  const NamedDecl *FDecl,
3059  const FunctionProtoType *Proto,
3060  ArrayRef<const Expr *> Args,
3061  SourceLocation CallSiteLoc) {
3062  assert((FDecl || Proto) && "Need a function declaration or prototype");
3063 
3064  // Check the attributes attached to the method/function itself.
3065  llvm::SmallBitVector NonNullArgs;
3066  if (FDecl) {
3067  // Handle the nonnull attribute on the function/method declaration itself.
3068  for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
3069  if (!NonNull->args_size()) {
3070  // Easy case: all pointer arguments are nonnull.
3071  for (const auto *Arg : Args)
3072  if (S.isValidPointerAttrType(Arg->getType()))
3073  CheckNonNullArgument(S, Arg, CallSiteLoc);
3074  return;
3075  }
3076 
3077  for (const ParamIdx &Idx : NonNull->args()) {
3078  unsigned IdxAST = Idx.getASTIndex();
3079  if (IdxAST >= Args.size())
3080  continue;
3081  if (NonNullArgs.empty())
3082  NonNullArgs.resize(Args.size());
3083  NonNullArgs.set(IdxAST);
3084  }
3085  }
3086  }
3087 
3088  if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
3089  // Handle the nonnull attribute on the parameters of the
3090  // function/method.
3091  ArrayRef<ParmVarDecl*> parms;
3092  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
3093  parms = FD->parameters();
3094  else
3095  parms = cast<ObjCMethodDecl>(FDecl)->parameters();
3096 
3097  unsigned ParamIndex = 0;
3098  for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
3099  I != E; ++I, ++ParamIndex) {
3100  const ParmVarDecl *PVD = *I;
3101  if (PVD->hasAttr<NonNullAttr>() ||
3102  isNonNullType(S.Context, PVD->getType())) {
3103  if (NonNullArgs.empty())
3104  NonNullArgs.resize(Args.size());
3105 
3106  NonNullArgs.set(ParamIndex);
3107  }
3108  }
3109  } else {
3110  // If we have a non-function, non-method declaration but no
3111  // function prototype, try to dig out the function prototype.
3112  if (!Proto) {
3113  if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
3114  QualType type = VD->getType().getNonReferenceType();
3115  if (auto pointerType = type->getAs<PointerType>())
3116  type = pointerType->getPointeeType();
3117  else if (auto blockType = type->getAs<BlockPointerType>())
3118  type = blockType->getPointeeType();
3119  // FIXME: data member pointers?
3120 
3121  // Dig out the function prototype, if there is one.
3122  Proto = type->getAs<FunctionProtoType>();
3123  }
3124  }
3125 
3126  // Fill in non-null argument information from the nullability
3127  // information on the parameter types (if we have them).
3128  if (Proto) {
3129  unsigned Index = 0;
3130  for (auto paramType : Proto->getParamTypes()) {
3131  if (isNonNullType(S.Context, paramType)) {
3132  if (NonNullArgs.empty())
3133  NonNullArgs.resize(Args.size());
3134 
3135  NonNullArgs.set(Index);
3136  }
3137 
3138  ++Index;
3139  }
3140  }
3141  }
3142 
3143  // Check for non-null arguments.
3144  for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
3145  ArgIndex != ArgIndexEnd; ++ArgIndex) {
3146  if (NonNullArgs[ArgIndex])
3147  CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
3148  }
3149 }
3150 
3151 /// Handles the checks for format strings, non-POD arguments to vararg
3152 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
3153 /// attributes.
3154 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
3155  const Expr *ThisArg, ArrayRef<const Expr *> Args,
3156  bool IsMemberFunction, SourceLocation Loc,
3157  SourceRange Range, VariadicCallType CallType) {
3158  // FIXME: We should check as much as we can in the template definition.
3159  if (CurContext->isDependentContext())
3160  return;
3161 
3162  // Printf and scanf checking.
3163  llvm::SmallBitVector CheckedVarArgs;
3164  if (FDecl) {
3165  for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
3166  // Only create vector if there are format attributes.
3167  CheckedVarArgs.resize(Args.size());
3168 
3169  CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
3170  CheckedVarArgs);
3171  }
3172  }
3173 
3174  // Refuse POD arguments that weren't caught by the format string
3175  // checks above.
3176  auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
3177  if (CallType != VariadicDoesNotApply &&
3178  (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
3179  unsigned NumParams = Proto ? Proto->getNumParams()
3180  : FDecl && isa<FunctionDecl>(FDecl)
3181  ? cast<FunctionDecl>(FDecl)->getNumParams()
3182  : FDecl && isa<ObjCMethodDecl>(FDecl)
3183  ? cast<ObjCMethodDecl>(FDecl)->param_size()
3184  : 0;
3185 
3186  for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
3187  // Args[ArgIdx] can be null in malformed code.
3188  if (const Expr *Arg = Args[ArgIdx]) {
3189  if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
3190  checkVariadicArgument(Arg, CallType);
3191  }
3192  }
3193  }
3194 
3195  if (FDecl || Proto) {
3196  CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
3197 
3198  // Type safety checking.
3199  if (FDecl) {
3200  for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
3201  CheckArgumentWithTypeTag(I, Args, Loc);
3202  }
3203  }
3204 
3205  if (FD)
3206  diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
3207 }
3208 
3209 /// CheckConstructorCall - Check a constructor call for correctness and safety
3210 /// properties not enforced by the C type system.
3211 void Sema::CheckConstructorCall(FunctionDecl *FDecl,
3212  ArrayRef<const Expr *> Args,
3213  const FunctionProtoType *Proto,
3214  SourceLocation Loc) {
3215  VariadicCallType CallType =
3216  Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
3217  checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
3218  Loc, SourceRange(), CallType);
3219 }
3220 
3221 /// CheckFunctionCall - Check a direct function call for various correctness
3222 /// and safety properties not strictly enforced by the C type system.
3223 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
3224  const FunctionProtoType *Proto) {
3225  bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
3226  isa<CXXMethodDecl>(FDecl);
3227  bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
3228  IsMemberOperatorCall;
3229  VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
3230  TheCall->getCallee());
3231  Expr** Args = TheCall->getArgs();
3232  unsigned NumArgs = TheCall->getNumArgs();
3233 
3234  Expr *ImplicitThis = nullptr;
3235  if (IsMemberOperatorCall) {
3236  // If this is a call to a member operator, hide the first argument
3237  // from checkCall.
3238  // FIXME: Our choice of AST representation here is less than ideal.
3239  ImplicitThis = Args[0];
3240  ++Args;
3241  --NumArgs;
3242  } else if (IsMemberFunction)
3243  ImplicitThis =
3244  cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
3245 
3246  checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
3247  IsMemberFunction, TheCall->getRParenLoc(),
3248  TheCall->getCallee()->getSourceRange(), CallType);
3249 
3250  IdentifierInfo *FnInfo = FDecl->getIdentifier();
3251  // None of the checks below are needed for functions that don't have
3252  // simple names (e.g., C++ conversion functions).
3253  if (!FnInfo)
3254  return false;
3255 
3256  CheckAbsoluteValueFunction(TheCall, FDecl);
3257  CheckMaxUnsignedZero(TheCall, FDecl);
3258 
3259  if (getLangOpts().ObjC1)
3260  DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
3261 
3262  unsigned CMId = FDecl->getMemoryFunctionKind();
3263  if (CMId == 0)
3264  return false;
3265 
3266  // Handle memory setting and copying functions.
3267  if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat)
3268  CheckStrlcpycatArguments(TheCall, FnInfo);
3269  else if (CMId == Builtin::BIstrncat)
3270  CheckStrncatArguments(TheCall, FnInfo);
3271  else
3272  CheckMemaccessArguments(TheCall, CMId, FnInfo);
3273 
3274  return false;
3275 }
3276 
3277 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
3278  ArrayRef<const Expr *> Args) {
3279  VariadicCallType CallType =
3280  Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
3281 
3282  checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
3283  /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
3284  CallType);
3285 
3286  return false;
3287 }
3288 
3289 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
3290  const FunctionProtoType *Proto) {
3291  QualType Ty;
3292  if (const auto *V = dyn_cast<VarDecl>(NDecl))
3293  Ty = V->getType().getNonReferenceType();
3294  else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
3295  Ty = F->getType().getNonReferenceType();
3296  else
3297  return false;
3298 
3299  if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
3300  !Ty->isFunctionProtoType())
3301  return false;
3302 
3303  VariadicCallType CallType;
3304  if (!Proto || !Proto->isVariadic()) {
3305  CallType = VariadicDoesNotApply;
3306  } else if (Ty->isBlockPointerType()) {
3307  CallType = VariadicBlock;
3308  } else { // Ty->isFunctionPointerType()
3309  CallType = VariadicFunction;
3310  }
3311 
3312  checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
3313  llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
3314  /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
3315  TheCall->getCallee()->getSourceRange(), CallType);
3316 
3317  return false;
3318 }
3319 
3320 /// Checks function calls when a FunctionDecl or a NamedDecl is not available,
3321 /// such as function pointers returned from functions.
3322 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
3323  VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
3324  TheCall->getCallee());
3325  checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
3326  llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
3327  /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
3328  TheCall->getCallee()->getSourceRange(), CallType);
3329 
3330  return false;
3331 }
3332 
3333 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
3334  if (!llvm::isValidAtomicOrderingCABI(Ordering))
3335  return false;
3336 
3337  auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
3338  switch (Op) {
3339  case AtomicExpr::AO__c11_atomic_init:
3340  case AtomicExpr::AO__opencl_atomic_init:
3341  llvm_unreachable("There is no ordering argument for an init");
3342 
3343  case AtomicExpr::AO__c11_atomic_load:
3344  case AtomicExpr::AO__opencl_atomic_load:
3345  case AtomicExpr::AO__atomic_load_n:
3346  case AtomicExpr::AO__atomic_load:
3347  return OrderingCABI != llvm::AtomicOrderingCABI::release &&
3348  OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
3349 
3350  case AtomicExpr::AO__c11_atomic_store:
3351  case AtomicExpr::AO__opencl_atomic_store:
3352  case AtomicExpr::AO__atomic_store:
3353  case AtomicExpr::AO__atomic_store_n:
3354  return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
3355  OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
3356  OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
3357 
3358  default:
3359  return true;
3360  }
3361 }
3362 
3363 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
3364  AtomicExpr::AtomicOp Op) {
3365  CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
3366  DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
3367 
3368  // All the non-OpenCL operations take one of the following forms.
3369  // The OpenCL operations take the __c11 forms with one extra argument for
3370  // synchronization scope.
3371  enum {
3372  // C __c11_atomic_init(A *, C)
3373  Init,
3374 
3375  // C __c11_atomic_load(A *, int)
3376  Load,
3377 
3378  // void __atomic_load(A *, CP, int)
3379  LoadCopy,
3380 
3381  // void __atomic_store(A *, CP, int)
3382  Copy,
3383 
3384  // C __c11_atomic_add(A *, M, int)
3385  Arithmetic,
3386 
3387  // C __atomic_exchange_n(A *, CP, int)
3388  Xchg,
3389 
3390  // void __atomic_exchange(A *, C *, CP, int)
3391  GNUXchg,
3392 
3393  // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
3394  C11CmpXchg,
3395 
3396  // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
3397  GNUCmpXchg
3398  } Form = Init;
3399 
3400  const unsigned NumForm = GNUCmpXchg + 1;
3401  const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
3402  const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
3403  // where:
3404  // C is an appropriate type,
3405  // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
3406  // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
3407  // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
3408  // the int parameters are for orderings.
3409 
3410  static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
3411  && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
3412  "need to update code for modified forms");
3413  static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
3414  AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
3415  AtomicExpr::AO__atomic_load,
3416  "need to update code for modified C11 atomics");
3417  bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
3418  Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
3419  bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
3420  Op <= AtomicExpr::AO__c11_atomic_fetch_xor) ||
3421  IsOpenCL;
3422  bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
3423  Op == AtomicExpr::AO__atomic_store_n ||
3424  Op == AtomicExpr::AO__atomic_exchange_n ||
3425  Op == AtomicExpr::AO__atomic_compare_exchange_n;
3426  bool IsAddSub = false;
3427  bool IsMinMax = false;
3428 
3429  switch (Op) {
3430  case AtomicExpr::AO__c11_atomic_init:
3431  case AtomicExpr::AO__opencl_atomic_init:
3432  Form = Init;
3433  break;
3434 
3435  case AtomicExpr::AO__c11_atomic_load:
3436  case AtomicExpr::AO__opencl_atomic_load:
3437  case AtomicExpr::AO__atomic_load_n:
3438  Form = Load;
3439  break;
3440 
3441  case AtomicExpr::AO__atomic_load:
3442  Form = LoadCopy;
3443  break;
3444 
3445  case AtomicExpr::AO__c11_atomic_store:
3446  case AtomicExpr::AO__opencl_atomic_store:
3447  case AtomicExpr::AO__atomic_store:
3448  case AtomicExpr::AO__atomic_store_n:
3449  Form = Copy;
3450  break;
3451 
3452  case AtomicExpr::AO__c11_atomic_fetch_add:
3453  case AtomicExpr::AO__c11_atomic_fetch_sub:
3454  case AtomicExpr::AO__opencl_atomic_fetch_add:
3455  case AtomicExpr::AO__opencl_atomic_fetch_sub:
3456  case AtomicExpr::AO__opencl_atomic_fetch_min:
3457  case AtomicExpr::AO__opencl_atomic_fetch_max:
3458  case AtomicExpr::AO__atomic_fetch_add:
3459  case AtomicExpr::AO__atomic_fetch_sub:
3460  case AtomicExpr::AO__atomic_add_fetch:
3461  case AtomicExpr::AO__atomic_sub_fetch:
3462  IsAddSub = true;
3463  LLVM_FALLTHROUGH;
3464  case AtomicExpr::AO__c11_atomic_fetch_and:
3465  case AtomicExpr::AO__c11_atomic_fetch_or:
3466  case AtomicExpr::AO__c11_atomic_fetch_xor:
3467  case AtomicExpr::AO__opencl_atomic_fetch_and:
3468  case AtomicExpr::AO__opencl_atomic_fetch_or:
3469  case AtomicExpr::AO__opencl_atomic_fetch_xor:
3470  case AtomicExpr::AO__atomic_fetch_and:
3471  case AtomicExpr::AO__atomic_fetch_or:
3472  case AtomicExpr::AO__atomic_fetch_xor:
3473  case AtomicExpr::AO__atomic_fetch_nand:
3474  case AtomicExpr::AO__atomic_and_fetch:
3475  case AtomicExpr::AO__atomic_or_fetch:
3476  case AtomicExpr::AO__atomic_xor_fetch:
3477  case AtomicExpr::AO__atomic_nand_fetch:
3478  Form = Arithmetic;
3479  break;
3480 
3481  case AtomicExpr::AO__atomic_fetch_min:
3482  case AtomicExpr::AO__atomic_fetch_max:
3483  IsMinMax = true;
3484  Form = Arithmetic;
3485  break;
3486 
3487  case AtomicExpr::AO__c11_atomic_exchange:
3488  case AtomicExpr::AO__opencl_atomic_exchange:
3489  case AtomicExpr::AO__atomic_exchange_n:
3490  Form = Xchg;
3491  break;
3492 
3493  case AtomicExpr::AO__atomic_exchange:
3494  Form = GNUXchg;
3495  break;
3496 
3497  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3498  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3499  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
3500  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
3501  Form = C11CmpXchg;
3502  break;
3503 
3504  case AtomicExpr::AO__atomic_compare_exchange:
3505  case AtomicExpr::AO__atomic_compare_exchange_n:
3506  Form = GNUCmpXchg;
3507  break;
3508  }
3509 
3510  unsigned AdjustedNumArgs = NumArgs[Form];
3511  if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
3512  ++AdjustedNumArgs;
3513  // Check we have the right number of arguments.
3514  if (TheCall->getNumArgs() < AdjustedNumArgs) {
3515  Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
3516  << 0 << AdjustedNumArgs << TheCall->getNumArgs()
3517  << TheCall->getCallee()->getSourceRange();
3518  return ExprError();
3519  } else if (TheCall->getNumArgs() > AdjustedNumArgs) {
3520  Diag(TheCall->getArg(AdjustedNumArgs)->getLocStart(),
3521  diag::err_typecheck_call_too_many_args)
3522  << 0 << AdjustedNumArgs << TheCall->getNumArgs()
3523  << TheCall->getCallee()->getSourceRange();
3524  return ExprError();
3525  }
3526 
3527  // Inspect the first argument of the atomic operation.
3528  Expr *Ptr = TheCall->getArg(0);
3529  ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
3530  if (ConvertedPtr.isInvalid())
3531  return ExprError();
3532 
3533  Ptr = ConvertedPtr.get();
3534  const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
3535  if (!pointerType) {
3536  Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
3537  << Ptr->getType() << Ptr->getSourceRange();
3538  return ExprError();
3539  }
3540 
3541  // For a __c11 builtin, this should be a pointer to an _Atomic type.
3542  QualType AtomTy = pointerType->getPointeeType(); // 'A'
3543  QualType ValType = AtomTy; // 'C'
3544  if (IsC11) {
3545  if (!AtomTy->isAtomicType()) {
3546  Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
3547  << Ptr->getType() << Ptr->getSourceRange();
3548  return ExprError();
3549  }
3550  if (AtomTy.isConstQualified() ||
3552  Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic)
3553  << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
3554  << Ptr->getSourceRange();
3555  return ExprError();
3556  }
3557  ValType = AtomTy->getAs<AtomicType>()->getValueType();
3558  } else if (Form != Load && Form != LoadCopy) {
3559  if (ValType.isConstQualified()) {
3560  Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_pointer)
3561  << Ptr->getType() << Ptr->getSourceRange();
3562  return ExprError();
3563  }
3564  }
3565 
3566  // For an arithmetic operation, the implied arithmetic must be well-formed.
3567  if (Form == Arithmetic) {
3568  // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
3569  if (IsAddSub && !ValType->isIntegerType()
3570  && !ValType->isPointerType()) {
3571  Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
3572  << IsC11 << Ptr->getType() << Ptr->getSourceRange();
3573  return ExprError();
3574  }
3575  if (IsMinMax) {
3576  const BuiltinType *BT = ValType->getAs<BuiltinType>();
3577  if (!BT || (BT->getKind() != BuiltinType::Int &&
3578  BT->getKind() != BuiltinType::UInt)) {
3579  Diag(DRE->getLocStart(), diag::err_atomic_op_needs_int32_or_ptr);
3580  return ExprError();
3581  }
3582  }
3583  if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
3584  Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int)
3585  << IsC11 << Ptr->getType() << Ptr->getSourceRange();
3586  return ExprError();
3587  }
3588  if (IsC11 && ValType->isPointerType() &&
3589  RequireCompleteType(Ptr->getLocStart(), ValType->getPointeeType(),
3590  diag::err_incomplete_type)) {
3591  return ExprError();
3592  }
3593  } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
3594  // For __atomic_*_n operations, the value type must be a scalar integral or
3595  // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
3596  Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
3597  << IsC11 << Ptr->getType() << Ptr->getSourceRange();
3598  return ExprError();
3599  }
3600 
3601  if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
3602  !AtomTy->isScalarType()) {
3603  // For GNU atomics, require a trivially-copyable type. This is not part of
3604  // the GNU atomics specification, but we enforce it for sanity.
3605  Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy)
3606  << Ptr->getType() << Ptr->getSourceRange();
3607  return ExprError();
3608  }
3609 
3610  switch (ValType.getObjCLifetime()) {
3611  case Qualifiers::OCL_None:
3613  // okay
3614  break;
3615 
3616  case Qualifiers::OCL_Weak:
3619  // FIXME: Can this happen? By this point, ValType should be known
3620  // to be trivially copyable.
3621  Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
3622  << ValType << Ptr->getSourceRange();
3623  return ExprError();
3624  }
3625 
3626  // All atomic operations have an overload which takes a pointer to a volatile
3627  // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
3628  // into the result or the other operands. Similarly atomic_load takes a
3629  // pointer to a const 'A'.
3630  ValType.removeLocalVolatile();
3631  ValType.removeLocalConst();
3632  QualType ResultType = ValType;
3633  if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
3634  Form == Init)
3635  ResultType = Context.VoidTy;
3636  else if (Form == C11CmpXchg || Form == GNUCmpXchg)
3637  ResultType = Context.BoolTy;
3638 
3639  // The type of a parameter passed 'by value'. In the GNU atomics, such
3640  // arguments are actually passed as pointers.
3641  QualType ByValType = ValType; // 'CP'
3642  bool IsPassedByAddress = false;
3643  if (!IsC11 && !IsN) {
3644  ByValType = Ptr->getType();
3645  IsPassedByAddress = true;
3646  }
3647 
3648  // The first argument's non-CV pointer type is used to deduce the type of
3649  // subsequent arguments, except for:
3650  // - weak flag (always converted to bool)
3651  // - memory order (always converted to int)
3652  // - scope (always converted to int)
3653  for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
3654  QualType Ty;
3655  if (i < NumVals[Form] + 1) {
3656  switch (i) {
3657  case 0:
3658  // The first argument is always a pointer. It has a fixed type.
3659  // It is always dereferenced, a nullptr is undefined.
3660  CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
3661  // Nothing else to do: we already know all we want about this pointer.
3662  continue;
3663  case 1:
3664  // The second argument is the non-atomic operand. For arithmetic, this
3665  // is always passed by value, and for a compare_exchange it is always
3666  // passed by address. For the rest, GNU uses by-address and C11 uses
3667  // by-value.
3668  assert(Form != Load);
3669  if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
3670  Ty = ValType;
3671  else if (Form == Copy || Form == Xchg) {
3672  if (IsPassedByAddress)
3673  // The value pointer is always dereferenced, a nullptr is undefined.
3674  CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
3675  Ty = ByValType;
3676  } else if (Form == Arithmetic)
3677  Ty = Context.getPointerDiffType();
3678  else {
3679  Expr *ValArg = TheCall->getArg(i);
3680  // The value pointer is always dereferenced, a nullptr is undefined.
3681  CheckNonNullArgument(*this, ValArg, DRE->getLocStart());
3682  LangAS AS = LangAS::Default;
3683  // Keep address space of non-atomic pointer type.
3684  if (const PointerType *PtrTy =
3685  ValArg->getType()->getAs<PointerType>()) {
3686  AS = PtrTy->getPointeeType().getAddressSpace();
3687  }
3688  Ty = Context.getPointerType(
3689  Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
3690  }
3691  break;
3692  case 2:
3693  // The third argument to compare_exchange / GNU exchange is the desired
3694  // value, either by-value (for the C11 and *_n variant) or as a pointer.
3695  if (IsPassedByAddress)
3696  CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
3697  Ty = ByValType;
3698  break;
3699  case 3:
3700  // The fourth argument to GNU compare_exchange is a 'weak' flag.
3701  Ty = Context.BoolTy;
3702  break;
3703  }
3704  } else {
3705  // The order(s) and scope are always converted to int.
3706  Ty = Context.IntTy;
3707  }
3708 
3709  InitializedEntity Entity =
3710  InitializedEntity::InitializeParameter(Context, Ty, false);
3711  ExprResult Arg = TheCall->getArg(i);
3712  Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
3713  if (Arg.isInvalid())
3714  return true;
3715  TheCall->setArg(i, Arg.get());
3716  }
3717 
3718  // Permute the arguments into a 'consistent' order.
3719  SmallVector<Expr*, 5> SubExprs;
3720  SubExprs.push_back(Ptr);
3721  switch (Form) {
3722  case Init:
3723  // Note, AtomicExpr::getVal1() has a special case for this atomic.
3724  SubExprs.push_back(TheCall->getArg(1)); // Val1
3725  break;
3726  case Load:
3727  SubExprs.push_back(TheCall->getArg(1)); // Order
3728  break;
3729  case LoadCopy:
3730  case Copy:
3731  case Arithmetic:
3732  case Xchg:
3733  SubExprs.push_back(TheCall->getArg(2)); // Order
3734  SubExprs.push_back(TheCall->getArg(1)); // Val1
3735  break;
3736  case GNUXchg:
3737  // Note, AtomicExpr::getVal2() has a special case for this atomic.
3738  SubExprs.push_back(TheCall->getArg(3)); // Order
3739  SubExprs.push_back(TheCall->getArg(1)); // Val1
3740  SubExprs.push_back(TheCall->getArg(2)); // Val2
3741  break;
3742  case C11CmpXchg:
3743  SubExprs.push_back(TheCall->getArg(3)); // Order
3744  SubExprs.push_back(TheCall->getArg(1)); // Val1
3745  SubExprs.push_back(TheCall->getArg(4)); // OrderFail
3746  SubExprs.push_back(TheCall->getArg(2)); // Val2
3747  break;
3748  case GNUCmpXchg:
3749  SubExprs.push_back(TheCall->getArg(4)); // Order
3750  SubExprs.push_back(TheCall->getArg(1)); // Val1
3751  SubExprs.push_back(TheCall->getArg(5)); // OrderFail
3752  SubExprs.push_back(TheCall->getArg(2)); // Val2
3753  SubExprs.push_back(TheCall->getArg(3)); // Weak
3754  break;
3755  }
3756 
3757  if (SubExprs.size() >= 2 && Form != Init) {
3758  llvm::APSInt Result(32);
3759  if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
3760  !isValidOrderingForOp(Result.getSExtValue(), Op))
3761  Diag(SubExprs[1]->getLocStart(),
3762  diag::warn_atomic_op_has_invalid_memory_order)
3763  << SubExprs[1]->getSourceRange();
3764  }
3765 
3766  if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
3767  auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
3768  llvm::APSInt Result(32);
3769  if (Scope->isIntegerConstantExpr(Result, Context) &&
3770  !ScopeModel->isValid(Result.getZExtValue())) {
3771  Diag(Scope->getLocStart(), diag::err_atomic_op_has_invalid_synch_scope)
3772  << Scope->getSourceRange();
3773  }
3774  SubExprs.push_back(Scope);
3775  }
3776 
3777  AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
3778  SubExprs, ResultType, Op,
3779  TheCall->getRParenLoc());
3780 
3781  if ((Op == AtomicExpr::AO__c11_atomic_load ||
3782  Op == AtomicExpr::AO__c11_atomic_store ||
3783  Op == AtomicExpr::AO__opencl_atomic_load ||
3784  Op == AtomicExpr::AO__opencl_atomic_store ) &&
3785  Context.AtomicUsesUnsupportedLibcall(AE))
3786  Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib)
3787  << ((Op == AtomicExpr::AO__c11_atomic_load ||
3788  Op == AtomicExpr::AO__opencl_atomic_load)
3789  ? 0 : 1);
3790 
3791  return AE;
3792 }
3793 
3794 /// checkBuiltinArgument - Given a call to a builtin function, perform
3795 /// normal type-checking on the given argument, updating the call in
3796 /// place. This is useful when a builtin function requires custom
3797 /// type-checking for some of its arguments but not necessarily all of
3798 /// them.
3799 ///
3800 /// Returns true on error.
3801 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
3802  FunctionDecl *Fn = E->getDirectCallee();
3803  assert(Fn && "builtin call without direct callee!");
3804 
3805  ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
3806  InitializedEntity Entity =
3808 
3809  ExprResult Arg = E->getArg(0);
3810  Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
3811  if (Arg.isInvalid())
3812  return true;
3813 
3814  E->setArg(ArgIndex, Arg.get());
3815  return false;
3816 }
3817 
3818 /// SemaBuiltinAtomicOverloaded - We have a call to a function like
3819 /// __sync_fetch_and_add, which is an overloaded function based on the pointer
3820 /// type of its first argument. The main ActOnCallExpr routines have already
3821 /// promoted the types of arguments because all of these calls are prototyped as
3822 /// void(...).
3823 ///
3824 /// This function goes through and does final semantic checking for these
3825 /// builtins,
3826 ExprResult
3827 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
3828  CallExpr *TheCall = (CallExpr *)TheCallResult.get();
3829  DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
3830  FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
3831 
3832  // Ensure that we have at least one argument to do type inference from.
3833  if (TheCall->getNumArgs() < 1) {
3834  Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
3835  << 0 << 1 << TheCall->getNumArgs()
3836  << TheCall->getCallee()->getSourceRange();
3837  return ExprError();
3838  }
3839 
3840  // Inspect the first argument of the atomic builtin. This should always be
3841  // a pointer type, whose element is an integral scalar or pointer type.
3842  // Because it is a pointer type, we don't have to worry about any implicit
3843  // casts here.
3844  // FIXME: We don't allow floating point scalars as input.
3845  Expr *FirstArg = TheCall->getArg(0);
3846  ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
3847  if (FirstArgResult.isInvalid())
3848  return ExprError();
3849  FirstArg = FirstArgResult.get();
3850  TheCall->setArg(0, FirstArg);
3851 
3852  const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
3853  if (!pointerType) {
3854  Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
3855  << FirstArg->getType() << FirstArg->getSourceRange();
3856  return ExprError();
3857  }
3858 
3859  QualType ValType = pointerType->getPointeeType();
3860  if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
3861  !ValType->isBlockPointerType()) {
3862  Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr)
3863  << FirstArg->getType() << FirstArg->getSourceRange();
3864  return ExprError();
3865  }
3866 
3867  if (ValType.isConstQualified()) {
3868  Diag(DRE->getLocStart(), diag::err_atomic_builtin_cannot_be_const)
3869  << FirstArg->getType() << FirstArg->getSourceRange();
3870  return ExprError();
3871  }
3872 
3873  switch (ValType.getObjCLifetime()) {
3874  case Qualifiers::OCL_None:
3876  // okay
3877  break;
3878 
3879  case Qualifiers::OCL_Weak:
3882  Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
3883  << ValType << FirstArg->getSourceRange();
3884  return ExprError();
3885  }
3886 
3887  // Strip any qualifiers off ValType.
3888  ValType = ValType.getUnqualifiedType();
3889 
3890  // The majority of builtins return a value, but a few have special return
3891  // types, so allow them to override appropriately below.
3892  QualType ResultType = ValType;
3893 
3894  // We need to figure out which concrete builtin this maps onto. For example,
3895  // __sync_fetch_and_add with a 2 byte object turns into
3896  // __sync_fetch_and_add_2.
3897 #define BUILTIN_ROW(x) \
3898  { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
3899  Builtin::BI##x##_8, Builtin::BI##x##_16 }
3900 
3901  static const unsigned BuiltinIndices[][5] = {
3902  BUILTIN_ROW(__sync_fetch_and_add),
3903  BUILTIN_ROW(__sync_fetch_and_sub),
3904  BUILTIN_ROW(__sync_fetch_and_or),
3905  BUILTIN_ROW(__sync_fetch_and_and),
3906  BUILTIN_ROW(__sync_fetch_and_xor),
3907  BUILTIN_ROW(__sync_fetch_and_nand),
3908 
3909  BUILTIN_ROW(__sync_add_and_fetch),
3910  BUILTIN_ROW(__sync_sub_and_fetch),
3911  BUILTIN_ROW(__sync_and_and_fetch),
3912  BUILTIN_ROW(__sync_or_and_fetch),
3913  BUILTIN_ROW(__sync_xor_and_fetch),
3914  BUILTIN_ROW(__sync_nand_and_fetch),
3915 
3916  BUILTIN_ROW(__sync_val_compare_and_swap),
3917  BUILTIN_ROW(__sync_bool_compare_and_swap),
3918  BUILTIN_ROW(__sync_lock_test_and_set),
3919  BUILTIN_ROW(__sync_lock_release),
3920  BUILTIN_ROW(__sync_swap)
3921  };
3922 #undef BUILTIN_ROW
3923 
3924  // Determine the index of the size.
3925  unsigned SizeIndex;
3926  switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
3927  case 1: SizeIndex = 0; break;
3928  case 2: SizeIndex = 1; break;
3929  case 4: SizeIndex = 2; break;
3930  case 8: SizeIndex = 3; break;
3931  case 16: SizeIndex = 4; break;
3932  default:
3933  Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
3934  << FirstArg->getType() << FirstArg->getSourceRange();
3935  return ExprError();
3936  }
3937 
3938  // Each of these builtins has one pointer argument, followed by some number of
3939  // values (0, 1 or 2) followed by a potentially empty varags list of stuff
3940  // that we ignore. Find out which row of BuiltinIndices to read from as well
3941  // as the number of fixed args.
3942  unsigned BuiltinID = FDecl->getBuiltinID();
3943  unsigned BuiltinIndex, NumFixed = 1;
3944  bool WarnAboutSemanticsChange = false;
3945  switch (BuiltinID) {
3946  default: llvm_unreachable("Unknown overloaded atomic builtin!");
3947  case Builtin::BI__sync_fetch_and_add:
3948  case Builtin::BI__sync_fetch_and_add_1:
3949  case Builtin::BI__sync_fetch_and_add_2:
3950  case Builtin::BI__sync_fetch_and_add_4:
3951  case Builtin::BI__sync_fetch_and_add_8:
3952  case Builtin::BI__sync_fetch_and_add_16:
3953  BuiltinIndex = 0;
3954  break;
3955 
3956  case Builtin::BI__sync_fetch_and_sub:
3957  case Builtin::BI__sync_fetch_and_sub_1:
3958  case Builtin::BI__sync_fetch_and_sub_2:
3959  case Builtin::BI__sync_fetch_and_sub_4:
3960  case Builtin::BI__sync_fetch_and_sub_8:
3961  case Builtin::BI__sync_fetch_and_sub_16:
3962  BuiltinIndex = 1;
3963  break;
3964 
3965  case Builtin::BI__sync_fetch_and_or:
3966  case Builtin::BI__sync_fetch_and_or_1:
3967  case Builtin::BI__sync_fetch_and_or_2:
3968  case Builtin::BI__sync_fetch_and_or_4:
3969  case Builtin::BI__sync_fetch_and_or_8:
3970  case Builtin::BI__sync_fetch_and_or_16:
3971  BuiltinIndex = 2;
3972  break;
3973 
3974  case Builtin::BI__sync_fetch_and_and:
3975  case Builtin::BI__sync_fetch_and_and_1:
3976  case Builtin::BI__sync_fetch_and_and_2:
3977  case Builtin::BI__sync_fetch_and_and_4:
3978  case Builtin::BI__sync_fetch_and_and_8:
3979  case Builtin::BI__sync_fetch_and_and_16:
3980  BuiltinIndex = 3;
3981  break;
3982 
3983  case Builtin::BI__sync_fetch_and_xor:
3984  case Builtin::BI__sync_fetch_and_xor_1:
3985  case Builtin::BI__sync_fetch_and_xor_2:
3986  case Builtin::BI__sync_fetch_and_xor_4:
3987  case Builtin::BI__sync_fetch_and_xor_8:
3988  case Builtin::BI__sync_fetch_and_xor_16:
3989  BuiltinIndex = 4;
3990  break;
3991 
3992  case Builtin::BI__sync_fetch_and_nand:
3993  case Builtin::BI__sync_fetch_and_nand_1:
3994  case Builtin::BI__sync_fetch_and_nand_2:
3995  case Builtin::BI__sync_fetch_and_nand_4:
3996  case Builtin::BI__sync_fetch_and_nand_8:
3997  case Builtin::BI__sync_fetch_and_nand_16:
3998  BuiltinIndex = 5;
3999  WarnAboutSemanticsChange = true;
4000  break;
4001 
4002  case Builtin::BI__sync_add_and_fetch:
4003  case Builtin::BI__sync_add_and_fetch_1:
4004  case Builtin::BI__sync_add_and_fetch_2:
4005  case Builtin::BI__sync_add_and_fetch_4:
4006  case Builtin::BI__sync_add_and_fetch_8:
4007  case Builtin::BI__sync_add_and_fetch_16:
4008  BuiltinIndex = 6;
4009  break;
4010 
4011  case Builtin::BI__sync_sub_and_fetch:
4012  case Builtin::BI__sync_sub_and_fetch_1:
4013  case Builtin::BI__sync_sub_and_fetch_2:
4014  case Builtin::BI__sync_sub_and_fetch_4:
4015  case Builtin::BI__sync_sub_and_fetch_8:
4016  case Builtin::BI__sync_sub_and_fetch_16:
4017  BuiltinIndex = 7;
4018  break;
4019 
4020  case Builtin::BI__sync_and_and_fetch:
4021  case Builtin::BI__sync_and_and_fetch_1:
4022  case Builtin::BI__sync_and_and_fetch_2:
4023  case Builtin::BI__sync_and_and_fetch_4:
4024  case Builtin::BI__sync_and_and_fetch_8:
4025  case Builtin::BI__sync_and_and_fetch_16:
4026  BuiltinIndex = 8;
4027  break;
4028 
4029  case Builtin::BI__sync_or_and_fetch:
4030  case Builtin::BI__sync_or_and_fetch_1:
4031  case Builtin::BI__sync_or_and_fetch_2:
4032  case Builtin::BI__sync_or_and_fetch_4:
4033  case Builtin::BI__sync_or_and_fetch_8:
4034  case Builtin::BI__sync_or_and_fetch_16:
4035  BuiltinIndex = 9;
4036  break;
4037 
4038  case Builtin::BI__sync_xor_and_fetch:
4039  case Builtin::BI__sync_xor_and_fetch_1:
4040  case Builtin::BI__sync_xor_and_fetch_2:
4041  case Builtin::BI__sync_xor_and_fetch_4:
4042  case Builtin::BI__sync_xor_and_fetch_8:
4043  case Builtin::BI__sync_xor_and_fetch_16:
4044  BuiltinIndex = 10;
4045  break;
4046 
4047  case Builtin::BI__sync_nand_and_fetch:
4048  case Builtin::BI__sync_nand_and_fetch_1:
4049  case Builtin::BI__sync_nand_and_fetch_2:
4050  case Builtin::BI__sync_nand_and_fetch_4:
4051  case Builtin::BI__sync_nand_and_fetch_8:
4052  case Builtin::BI__sync_nand_and_fetch_16:
4053  BuiltinIndex = 11;
4054  WarnAboutSemanticsChange = true;
4055  break;
4056 
4057  case Builtin::BI__sync_val_compare_and_swap:
4058  case Builtin::BI__sync_val_compare_and_swap_1:
4059  case Builtin::BI__sync_val_compare_and_swap_2:
4060  case Builtin::BI__sync_val_compare_and_swap_4:
4061  case Builtin::BI__sync_val_compare_and_swap_8:
4062  case Builtin::BI__sync_val_compare_and_swap_16:
4063  BuiltinIndex = 12;
4064  NumFixed = 2;
4065  break;
4066 
4067  case Builtin::BI__sync_bool_compare_and_swap:
4068  case Builtin::BI__sync_bool_compare_and_swap_1:
4069  case Builtin::BI__sync_bool_compare_and_swap_2:
4070  case Builtin::BI__sync_bool_compare_and_swap_4:
4071  case Builtin::BI__sync_bool_compare_and_swap_8:
4072  case Builtin::BI__sync_bool_compare_and_swap_16:
4073  BuiltinIndex = 13;
4074  NumFixed = 2;
4075  ResultType = Context.BoolTy;
4076  break;
4077 
4078  case Builtin::BI__sync_lock_test_and_set:
4079  case Builtin::BI__sync_lock_test_and_set_1:
4080  case Builtin::BI__sync_lock_test_and_set_2:
4081  case Builtin::BI__sync_lock_test_and_set_4:
4082  case Builtin::BI__sync_lock_test_and_set_8:
4083  case Builtin::BI__sync_lock_test_and_set_16:
4084  BuiltinIndex = 14;
4085  break;
4086 
4087  case Builtin::BI__sync_lock_release:
4088  case Builtin::BI__sync_lock_release_1:
4089  case Builtin::BI__sync_lock_release_2:
4090  case Builtin::BI__sync_lock_release_4:
4091  case Builtin::BI__sync_lock_release_8:
4092  case Builtin::BI__sync_lock_release_16:
4093  BuiltinIndex = 15;
4094  NumFixed = 0;
4095  ResultType = Context.VoidTy;
4096  break;
4097 
4098  case Builtin::BI__sync_swap:
4099  case Builtin::BI__sync_swap_1:
4100  case Builtin::BI__sync_swap_2:
4101  case Builtin::BI__sync_swap_4:
4102  case Builtin::BI__sync_swap_8:
4103  case Builtin::BI__sync_swap_16:
4104  BuiltinIndex = 16;
4105  break;
4106  }
4107 
4108  // Now that we know how many fixed arguments we expect, first check that we
4109  // have at least that many.
4110  if (TheCall->getNumArgs() < 1+NumFixed) {
4111  Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
4112  << 0 << 1+NumFixed << TheCall->getNumArgs()
4113  << TheCall->getCallee()->getSourceRange();
4114  return ExprError();
4115  }
4116 
4117  if (WarnAboutSemanticsChange) {
4118  Diag(TheCall->getLocEnd(), diag::warn_sync_fetch_and_nand_semantics_change)
4119  << TheCall->getCallee()->getSourceRange();
4120  }
4121 
4122  // Get the decl for the concrete builtin from this, we can tell what the
4123  // concrete integer type we should convert to is.
4124  unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
4125  const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
4126  FunctionDecl *NewBuiltinDecl;
4127  if (NewBuiltinID == BuiltinID)
4128  NewBuiltinDecl = FDecl;
4129  else {
4130  // Perform builtin lookup to avoid redeclaring it.
4131  DeclarationName DN(&Context.Idents.get(NewBuiltinName));
4132  LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName);
4133  LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
4134  assert(Res.getFoundDecl());
4135  NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
4136  if (!NewBuiltinDecl)
4137  return ExprError();
4138  }
4139 
4140  // The first argument --- the pointer --- has a fixed type; we
4141  // deduce the types of the rest of the arguments accordingly. Walk
4142  // the remaining arguments, converting them to the deduced value type.
4143  for (unsigned i = 0; i != NumFixed; ++i) {
4144  ExprResult Arg = TheCall->getArg(i+1);
4145 
4146  // GCC does an implicit conversion to the pointer or integer ValType. This
4147  // can fail in some cases (1i -> int**), check for this error case now.
4148  // Initialize the argument.
4150  ValType, /*consume*/ false);
4151  Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4152  if (Arg.isInvalid())
4153  return ExprError();
4154 
4155  // Okay, we have something that *can* be converted to the right type. Check
4156  // to see if there is a potentially weird extension going on here. This can
4157  // happen when you do an atomic operation on something like an char* and
4158  // pass in 42. The 42 gets converted to char. This is even more strange
4159  // for things like 45.123 -> char, etc.
4160  // FIXME: Do this check.
4161  TheCall->setArg(i+1, Arg.get());
4162  }
4163 
4164  ASTContext& Context = this->getASTContext();
4165 
4166  // Create a new DeclRefExpr to refer to the new decl.
4167  DeclRefExpr* NewDRE = DeclRefExpr::Create(
4168  Context,
4169  DRE->getQualifierLoc(),
4170  SourceLocation(),
4171  NewBuiltinDecl,
4172  /*enclosing*/ false,
4173  DRE->getLocation(),
4174  Context.BuiltinFnTy,
4175  DRE->getValueKind());
4176 
4177  // Set the callee in the CallExpr.
4178  // FIXME: This loses syntactic information.
4179  QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
4180  ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
4181  CK_BuiltinFnToFnPtr);
4182  TheCall->setCallee(PromotedCall.get());
4183 
4184  // Change the result type of the call to match the original value type. This
4185  // is arbitrary, but the codegen for these builtins ins design to handle it
4186  // gracefully.
4187  TheCall->setType(ResultType);
4188 
4189  return TheCallResult;
4190 }
4191 
4192 /// SemaBuiltinNontemporalOverloaded - We have a call to
4193 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
4194 /// overloaded function based on the pointer type of its last argument.
4195 ///
4196 /// This function goes through and does final semantic checking for these
4197 /// builtins.
4198 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
4199  CallExpr *TheCall = (CallExpr *)TheCallResult.get();
4200  DeclRefExpr *DRE =
4201  cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
4202  FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
4203  unsigned BuiltinID = FDecl->getBuiltinID();
4204  assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
4205  BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
4206  "Unexpected nontemporal load/store builtin!");
4207  bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
4208  unsigned numArgs = isStore ? 2 : 1;
4209 
4210  // Ensure that we have the proper number of arguments.
4211  if (checkArgCount(*this, TheCall, numArgs))
4212  return ExprError();
4213 
4214  // Inspect the last argument of the nontemporal builtin. This should always
4215  // be a pointer type, from which we imply the type of the memory access.
4216  // Because it is a pointer type, we don't have to worry about any implicit
4217  // casts here.
4218  Expr *PointerArg = TheCall->getArg(numArgs - 1);
4219  ExprResult PointerArgResult =
4220  DefaultFunctionArrayLvalueConversion(PointerArg);
4221 
4222  if (PointerArgResult.isInvalid())
4223  return ExprError();
4224  PointerArg = PointerArgResult.get();
4225  TheCall->setArg(numArgs - 1, PointerArg);
4226 
4227  const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
4228  if (!pointerType) {
4229  Diag(DRE->getLocStart(), diag::err_nontemporal_builtin_must_be_pointer)
4230  << PointerArg->getType() << PointerArg->getSourceRange();
4231  return ExprError();
4232  }
4233 
4234  QualType ValType = pointerType->getPointeeType();
4235 
4236  // Strip any qualifiers off ValType.
4237  ValType = ValType.getUnqualifiedType();
4238  if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
4239  !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
4240  !ValType->isVectorType()) {
4241  Diag(DRE->getLocStart(),
4242  diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
4243  << PointerArg->getType() << PointerArg->getSourceRange();
4244  return ExprError();
4245  }
4246 
4247  if (!isStore) {
4248  TheCall->setType(ValType);
4249  return TheCallResult;
4250  }
4251 
4252  ExprResult ValArg = TheCall->getArg(0);
4254  Context, ValType, /*consume*/ false);
4255  ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
4256  if (ValArg.isInvalid())
4257  return ExprError();
4258 
4259  TheCall->setArg(0, ValArg.get());
4260  TheCall->setType(Context.VoidTy);
4261  return TheCallResult;
4262 }
4263 
4264 /// CheckObjCString - Checks that the argument to the builtin
4265 /// CFString constructor is correct
4266 /// Note: It might also make sense to do the UTF-16 conversion here (would
4267 /// simplify the backend).
4268 bool Sema::CheckObjCString(Expr *Arg) {
4269  Arg = Arg->IgnoreParenCasts();
4270  StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
4271 
4272  if (!Literal || !Literal->isAscii()) {
4273  Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant)
4274  << Arg->getSourceRange();
4275  return true;
4276  }
4277 
4278  if (Literal->containsNonAsciiOrNull()) {
4279  StringRef String = Literal->getString();
4280  unsigned NumBytes = String.size();
4281  SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
4282  const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
4283  llvm::UTF16 *ToPtr = &ToBuf[0];
4284 
4285  llvm::ConversionResult Result =
4286  llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
4287  ToPtr + NumBytes, llvm::strictConversion);
4288  // Check for conversion failure.
4289  if (Result != llvm::conversionOK)
4290  Diag(Arg->getLocStart(),
4291  diag::warn_cfstring_truncated) << Arg->getSourceRange();
4292  }
4293  return false;
4294 }
4295 
4296 /// CheckObjCString - Checks that the format string argument to the os_log()
4297 /// and os_trace() functions is correct, and converts it to const char *.
4298 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
4299  Arg = Arg->IgnoreParenCasts();
4300  auto *Literal = dyn_cast<StringLiteral>(Arg);
4301  if (!Literal) {
4302  if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) {
4303  Literal = ObjcLiteral->getString();
4304  }
4305  }
4306 
4307  if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
4308  return ExprError(
4309  Diag(Arg->getLocStart(), diag::err_os_log_format_not_string_constant)
4310  << Arg->getSourceRange());
4311  }
4312 
4313  ExprResult Result(Literal);
4314  QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
4315  InitializedEntity Entity =
4316  InitializedEntity::InitializeParameter(Context, ResultTy, false);
4317  Result = PerformCopyInitialization(Entity, SourceLocation(), Result);
4318  return Result;
4319 }
4320 
4321 /// Check that the user is calling the appropriate va_start builtin for the
4322 /// target and calling convention.
4323 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
4324  const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
4325  bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
4326  bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64;
4327  bool IsWindows = TT.isOSWindows();
4328  bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
4329  if (IsX64 || IsAArch64) {
4330  CallingConv CC = CC_C;
4331  if (const FunctionDecl *FD = S.getCurFunctionDecl())
4332  CC = FD->getType()->getAs<FunctionType>()->getCallConv();
4333  if (IsMSVAStart) {
4334  // Don't allow this in System V ABI functions.
4335  if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
4336  return S.Diag(Fn->getLocStart(),
4337  diag::err_ms_va_start_used_in_sysv_function);
4338  } else {
4339  // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
4340  // On x64 Windows, don't allow this in System V ABI functions.
4341  // (Yes, that means there's no corresponding way to support variadic
4342  // System V ABI functions on Windows.)
4343  if ((IsWindows && CC == CC_X86_64SysV) ||
4344  (!IsWindows && CC == CC_Win64))
4345  return S.Diag(Fn->getLocStart(),
4346  diag::err_va_start_used_in_wrong_abi_function)
4347  << !IsWindows;
4348  }
4349  return false;
4350  }
4351 
4352  if (IsMSVAStart)
4353  return S.Diag(Fn->getLocStart(), diag::err_builtin_x64_aarch64_only);
4354  return false;
4355 }
4356 
4358  ParmVarDecl **LastParam = nullptr) {
4359  // Determine whether the current function, block, or obj-c method is variadic
4360  // and get its parameter list.
4361  bool IsVariadic = false;
4362  ArrayRef<ParmVarDecl *> Params;
4363  DeclContext *Caller = S.CurContext;
4364  if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
4365  IsVariadic = Block->isVariadic();
4366  Params = Block->parameters();
4367  } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
4368  IsVariadic = FD->isVariadic();
4369  Params = FD->parameters();
4370  } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
4371  IsVariadic = MD->isVariadic();
4372  // FIXME: This isn't correct for methods (results in bogus warning).
4373  Params = MD->parameters();
4374  } else if (isa<CapturedDecl>(Caller)) {
4375  // We don't support va_start in a CapturedDecl.
4376  S.Diag(Fn->getLocStart(), diag::err_va_start_captured_stmt);
4377  return true;
4378  } else {
4379  // This must be some other declcontext that parses exprs.
4380  S.Diag(Fn->getLocStart(), diag::err_va_start_outside_function);
4381  return true;
4382  }
4383 
4384  if (!IsVariadic) {
4385  S.Diag(Fn->getLocStart(), diag::err_va_start_fixed_function);
4386  return true;
4387  }
4388 
4389  if (LastParam)
4390  *LastParam = Params.empty() ? nullptr : Params.back();
4391 
4392  return false;
4393 }
4394 
4395 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
4396 /// for validity. Emit an error and return true on failure; return false
4397 /// on success.
4398 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
4399  Expr *Fn = TheCall->getCallee();
4400 
4401  if (checkVAStartABI(*this, BuiltinID, Fn))
4402  return true;
4403 
4404  if (TheCall->getNumArgs() > 2) {
4405  Diag(TheCall->getArg(2)->getLocStart(),
4406  diag::err_typecheck_call_too_many_args)
4407  << 0 /*function call*/ << 2 << TheCall->getNumArgs()
4408  << Fn->getSourceRange()
4409  << SourceRange(TheCall->getArg(2)->getLocStart(),
4410  (*(TheCall->arg_end()-1))->getLocEnd());
4411  return true;
4412  }
4413 
4414  if (TheCall->getNumArgs() < 2) {
4415  return Diag(TheCall->getLocEnd(),
4416  diag::err_typecheck_call_too_few_args_at_least)
4417  << 0 /*function call*/ << 2 << TheCall->getNumArgs();
4418  }
4419 
4420  // Type-check the first argument normally.
4421  if (checkBuiltinArgument(*this, TheCall, 0))
4422  return true;
4423 
4424  // Check that the current function is variadic, and get its last parameter.
4425  ParmVarDecl *LastParam;
4426  if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
4427  return true;
4428 
4429  // Verify that the second argument to the builtin is the last argument of the
4430  // current function or method.
4431  bool SecondArgIsLastNamedArgument = false;
4432  const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
4433 
4434  // These are valid if SecondArgIsLastNamedArgument is false after the next
4435  // block.
4436  QualType Type;
4437  SourceLocation ParamLoc;
4438  bool IsCRegister = false;
4439 
4440  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
4441  if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
4442  SecondArgIsLastNamedArgument = PV == LastParam;
4443 
4444  Type = PV->getType();
4445  ParamLoc = PV->getLocation();
4446  IsCRegister =
4447  PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
4448  }
4449  }
4450 
4451  if (!SecondArgIsLastNamedArgument)
4452  Diag(TheCall->getArg(1)->getLocStart(),
4453  diag::warn_second_arg_of_va_start_not_last_named_param);
4454  else if (IsCRegister || Type->isReferenceType() ||
4455  Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
4456  // Promotable integers are UB, but enumerations need a bit of
4457  // extra checking to see what their promotable type actually is.
4458  if (!Type->isPromotableIntegerType())
4459  return false;
4460  if (!Type->isEnumeralType())
4461  return true;
4462  const EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
4463  return !(ED &&
4464  Context.typesAreCompatible(ED->getPromotionType(), Type));
4465  }()) {
4466  unsigned Reason = 0;
4467  if (Type->isReferenceType()) Reason = 1;
4468  else if (IsCRegister) Reason = 2;
4469  Diag(Arg->getLocStart(), diag::warn_va_start_type_is_undefined) << Reason;
4470  Diag(ParamLoc, diag::note_parameter_type) << Type;
4471  }
4472 
4473  TheCall->setType(Context.VoidTy);
4474  return false;
4475 }
4476 
4477 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
4478  // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
4479  // const char *named_addr);
4480 
4481  Expr *Func = Call->getCallee();
4482 
4483  if (Call->getNumArgs() < 3)
4484  return Diag(Call->getLocEnd(),
4485  diag::err_typecheck_call_too_few_args_at_least)
4486  << 0 /*function call*/ << 3 << Call->getNumArgs();
4487 
4488  // Type-check the first argument normally.
4489  if (checkBuiltinArgument(*this, Call, 0))
4490  return true;
4491 
4492  // Check that the current function is variadic.
4493  if (checkVAStartIsInVariadicFunction(*this, Func))
4494  return true;
4495 
4496  // __va_start on Windows does not validate the parameter qualifiers
4497 
4498  const Expr *Arg1 = Call->getArg(1)->IgnoreParens();
4499  const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
4500 
4501  const Expr *Arg2 = Call->getArg(2)->IgnoreParens();
4502  const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
4503 
4504  const QualType &ConstCharPtrTy =
4505  Context.getPointerType(Context.CharTy.withConst());
4506  if (!Arg1Ty->isPointerType() ||
4507  Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy)
4508  Diag(Arg1->getLocStart(), diag::err_typecheck_convert_incompatible)
4509  << Arg1->getType() << ConstCharPtrTy
4510  << 1 /* different class */
4511  << 0 /* qualifier difference */
4512  << 3 /* parameter mismatch */
4513  << 2 << Arg1->getType() << ConstCharPtrTy;
4514 
4515  const QualType SizeTy = Context.getSizeType();
4516  if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
4517  Diag(Arg2->getLocStart(), diag::err_typecheck_convert_incompatible)
4518  << Arg2->getType() << SizeTy
4519  << 1 /* different class */
4520  << 0 /* qualifier difference */
4521  << 3 /* parameter mismatch */
4522  << 3 << Arg2->getType() << SizeTy;
4523 
4524  return false;
4525 }
4526 
4527 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
4528 /// friends. This is declared to take (...), so we have to check everything.
4529 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
4530  if (TheCall->getNumArgs() < 2)
4531  return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
4532  << 0 << 2 << TheCall->getNumArgs()/*function call*/;
4533  if (TheCall->getNumArgs() > 2)
4534  return Diag(TheCall->getArg(2)->getLocStart(),
4535  diag::err_typecheck_call_too_many_args)
4536  << 0 /*function call*/ << 2 << TheCall->getNumArgs()
4537  << SourceRange(TheCall->getArg(2)->getLocStart(),
4538  (*(TheCall->arg_end()-1))->getLocEnd());
4539 
4540  ExprResult OrigArg0 = TheCall->getArg(0);
4541  ExprResult OrigArg1 = TheCall->getArg(1);
4542 
4543  // Do standard promotions between the two arguments, returning their common
4544  // type.
4545  QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
4546  if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
4547  return true;
4548 
4549  // Make sure any conversions are pushed back into the call; this is
4550  // type safe since unordered compare builtins are declared as "_Bool
4551  // foo(...)".
4552  TheCall->setArg(0, OrigArg0.get());
4553  TheCall->setArg(1, OrigArg1.get());
4554 
4555  if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
4556  return false;
4557 
4558  // If the common type isn't a real floating type, then the arguments were
4559  // invalid for this operation.
4560  if (Res.isNull() || !Res->isRealFloatingType())
4561  return Diag(OrigArg0.get()->getLocStart(),
4562  diag::err_typecheck_call_invalid_ordered_compare)
4563  << OrigArg0.get()->getType() << OrigArg1.get()->getType()
4564  << SourceRange(OrigArg0.get()->getLocStart(), OrigArg1.get()->getLocEnd());
4565 
4566  return false;
4567 }
4568 
4569 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
4570 /// __builtin_isnan and friends. This is declared to take (...), so we have
4571 /// to check everything. We expect the last argument to be a floating point
4572 /// value.
4573 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
4574  if (TheCall->getNumArgs() < NumArgs)
4575  return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
4576  << 0 << NumArgs << TheCall->getNumArgs()/*function call*/;
4577  if (TheCall->getNumArgs() > NumArgs)
4578  return Diag(TheCall->getArg(NumArgs)->getLocStart(),
4579  diag::err_typecheck_call_too_many_args)
4580  << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
4581  << SourceRange(TheCall->getArg(NumArgs)->getLocStart(),
4582  (*(TheCall->arg_end()-1))->getLocEnd());
4583 
4584  Expr *OrigArg = TheCall->getArg(NumArgs-1);
4585 
4586  if (OrigArg->isTypeDependent())
4587  return false;
4588 
4589  // This operation requires a non-_Complex floating-point number.
4590  if (!OrigArg->getType()->isRealFloatingType())
4591  return Diag(OrigArg->getLocStart(),
4592  diag::err_typecheck_call_invalid_unary_fp)
4593  << OrigArg->getType() << OrigArg->getSourceRange();
4594 
4595  // If this is an implicit conversion from float -> float or double, remove it.
4596  if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
4597  // Only remove standard FloatCasts, leaving other casts inplace
4598  if (Cast->getCastKind() == CK_FloatingCast) {
4599  Expr *CastArg = Cast->getSubExpr();
4600  if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
4601  assert((Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) ||
4602  Cast->getType()->isSpecificBuiltinType(BuiltinType::Float)) &&
4603  "promotion from float to either float or double is the only expected cast here");
4604  Cast->setSubExpr(nullptr);
4605  TheCall->setArg(NumArgs-1, CastArg);
4606  }
4607  }
4608  }
4609 
4610  return false;
4611 }
4612 
4613 // Customized Sema Checking for VSX builtins that have the following signature:
4614 // vector [...] builtinName(vector [...], vector [...], const int);
4615 // Which takes the same type of vectors (any legal vector type) for the first
4616 // two arguments and takes compile time constant for the third argument.
4617 // Example builtins are :
4618 // vector double vec_xxpermdi(vector double, vector double, int);
4619 // vector short vec_xxsldwi(vector short, vector short, int);
4620 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
4621  unsigned ExpectedNumArgs = 3;
4622  if (TheCall->getNumArgs() < ExpectedNumArgs)
4623  return Diag(TheCall->getLocEnd(),
4624  diag::err_typecheck_call_too_few_args_at_least)
4625  << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
4626  << TheCall->getSourceRange();
4627 
4628  if (TheCall->getNumArgs() > ExpectedNumArgs)
4629  return Diag(TheCall->getLocEnd(),
4630  diag::err_typecheck_call_too_many_args_at_most)
4631  << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
4632  << TheCall->getSourceRange();
4633 
4634  // Check the third argument is a compile time constant
4635  llvm::APSInt Value;
4636  if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context))
4637  return Diag(TheCall->getLocStart(),
4638  diag::err_vsx_builtin_nonconstant_argument)
4639  << 3 /* argument index */ << TheCall->getDirectCallee()
4640  << SourceRange(TheCall->getArg(2)->getLocStart(),
4641  TheCall->getArg(2)->getLocEnd());
4642 
4643  QualType Arg1Ty = TheCall->getArg(0)->getType();
4644  QualType Arg2Ty = TheCall->getArg(1)->getType();
4645 
4646  // Check the type of argument 1 and argument 2 are vectors.
4647  SourceLocation BuiltinLoc = TheCall->getLocStart();
4648  if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
4649  (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
4650  return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
4651  << TheCall->getDirectCallee()
4652  << SourceRange(TheCall->getArg(0)->getLocStart(),
4653  TheCall->getArg(1)->getLocEnd());
4654  }
4655 
4656  // Check the first two arguments are the same type.
4657  if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
4658  return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
4659  << TheCall->getDirectCallee()
4660  << SourceRange(TheCall->getArg(0)->getLocStart(),
4661  TheCall->getArg(1)->getLocEnd());
4662  }
4663 
4664  // When default clang type checking is turned off and the customized type
4665  // checking is used, the returning type of the function must be explicitly
4666  // set. Otherwise it is _Bool by default.
4667  TheCall->setType(Arg1Ty);
4668 
4669  return false;
4670 }
4671 
4672 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
4673 // This is declared to take (...), so we have to check everything.
4675  if (TheCall->getNumArgs() < 2)
4676  return ExprError(Diag(TheCall->getLocEnd(),
4677  diag::err_typecheck_call_too_few_args_at_least)
4678  << 0 /*function call*/ << 2 << TheCall->getNumArgs()
4679  << TheCall->getSourceRange());
4680 
4681  // Determine which of the following types of shufflevector we're checking:
4682  // 1) unary, vector mask: (lhs, mask)
4683  // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
4684  QualType resType = TheCall->getArg(0)->getType();
4685  unsigned numElements = 0;
4686 
4687  if (!TheCall->getArg(0)->isTypeDependent() &&
4688  !TheCall->getArg(1)->isTypeDependent()) {
4689  QualType LHSType = TheCall->getArg(0)->getType();
4690  QualType RHSType = TheCall->getArg(1)->getType();
4691 
4692  if (!LHSType->isVectorType() || !RHSType->isVectorType())
4693  return ExprError(Diag(TheCall->getLocStart(),
4694  diag::err_vec_builtin_non_vector)
4695  << TheCall->getDirectCallee()
4696  << SourceRange(TheCall->getArg(0)->getLocStart(),
4697  TheCall->getArg(1)->getLocEnd()));
4698 
4699  numElements = LHSType->getAs<VectorType>()->getNumElements();
4700  unsigned numResElements = TheCall->getNumArgs() - 2;
4701 
4702  // Check to see if we have a call with 2 vector arguments, the unary shuffle
4703  // with mask. If so, verify that RHS is an integer vector type with the
4704  // same number of elts as lhs.
4705  if (TheCall->getNumArgs() == 2) {
4706  if (!RHSType->hasIntegerRepresentation() ||
4707  RHSType->getAs<VectorType>()->getNumElements() != numElements)
4708  return ExprError(Diag(TheCall->getLocStart(),
4709  diag::err_vec_builtin_incompatible_vector)
4710  << TheCall->getDirectCallee()
4711  << SourceRange(TheCall->getArg(1)->getLocStart(),
4712  TheCall->getArg(1)->getLocEnd()));
4713  } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
4714  return ExprError(Diag(TheCall->getLocStart(),
4715  diag::err_vec_builtin_incompatible_vector)
4716  << TheCall->getDirectCallee()
4717  << SourceRange(TheCall->getArg(0)->getLocStart(),
4718  TheCall->getArg(1)->getLocEnd()));
4719  } else if (numElements != numResElements) {
4720  QualType eltType = LHSType->getAs<VectorType>()->getElementType();
4721  resType = Context.getVectorType(eltType, numResElements,
4723  }
4724  }
4725 
4726  for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
4727  if (TheCall->getArg(i)->isTypeDependent() ||
4728  TheCall->getArg(i)->isValueDependent())
4729  continue;
4730 
4731  llvm::APSInt Result(32);
4732  if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
4733  return ExprError(Diag(TheCall->getLocStart(),
4734  diag::err_shufflevector_nonconstant_argument)
4735  << TheCall->getArg(i)->getSourceRange());
4736 
4737  // Allow -1 which will be translated to undef in the IR.
4738  if (Result.isSigned() && Result.isAllOnesValue())
4739  continue;
4740 
4741  if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
4742  return ExprError(Diag(TheCall->getLocStart(),
4743  diag::err_shufflevector_argument_too_large)
4744  << TheCall->getArg(i)->getSourceRange());
4745  }
4746 
4747  SmallVector<Expr*, 32> exprs;
4748 
4749  for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
4750  exprs.push_back(TheCall->getArg(i));
4751  TheCall->setArg(i, nullptr);
4752  }
4753 
4754  return new (Context) ShuffleVectorExpr(Context, exprs, resType,
4755  TheCall->getCallee()->getLocStart(),
4756  TheCall->getRParenLoc());
4757 }
4758 
4759 /// SemaConvertVectorExpr - Handle __builtin_convertvector
4761  SourceLocation BuiltinLoc,
4762  SourceLocation RParenLoc) {
4763  ExprValueKind VK = VK_RValue;
4765  QualType DstTy = TInfo->getType();
4766  QualType SrcTy = E->getType();
4767 
4768  if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
4769  return ExprError(Diag(BuiltinLoc,
4770  diag::err_convertvector_non_vector)
4771  << E->getSourceRange());
4772  if (!DstTy->isVectorType() && !DstTy->isDependentType())
4773  return ExprError(Diag(BuiltinLoc,
4774  diag::err_convertvector_non_vector_type));
4775 
4776  if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
4777  unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements();
4778  unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements();
4779  if (SrcElts != DstElts)
4780  return ExprError(Diag(BuiltinLoc,
4781  diag::err_convertvector_incompatible_vector)
4782  << E->getSourceRange());
4783  }
4784 
4785  return new (Context)
4786  ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
4787 }
4788 
4789 /// SemaBuiltinPrefetch - Handle __builtin_prefetch.
4790 // This is declared to take (const void*, ...) and can take two
4791 // optional constant int args.
4792 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
4793  unsigned NumArgs = TheCall->getNumArgs();
4794 
4795  if (NumArgs > 3)
4796  return Diag(TheCall->getLocEnd(),
4797  diag::err_typecheck_call_too_many_args_at_most)
4798  << 0 /*function call*/ << 3 << NumArgs
4799  << TheCall->getSourceRange();
4800 
4801  // Argument 0 is checked for us and the remaining arguments must be
4802  // constant integers.
4803  for (unsigned i = 1; i != NumArgs; ++i)
4804  if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3))
4805  return true;
4806 
4807  return false;
4808 }
4809 
4810 /// SemaBuiltinAssume - Handle __assume (MS Extension).
4811 // __assume does not evaluate its arguments, and should warn if its argument
4812 // has side effects.
4813 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
4814  Expr *Arg = TheCall->getArg(0);
4815  if (Arg->isInstantiationDependent()) return false;
4816 
4817  if (Arg->HasSideEffects(Context))
4818  Diag(Arg->getLocStart(), diag::warn_assume_side_effects)
4819  << Arg->getSourceRange()
4820  << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
4821 
4822  return false;
4823 }
4824 
4825 /// Handle __builtin_alloca_with_align. This is declared
4826 /// as (size_t, size_t) where the second size_t must be a power of 2 greater
4827 /// than 8.
4828 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
4829  // The alignment must be a constant integer.
4830  Expr *Arg = TheCall->getArg(1);
4831 
4832  // We can't check the value of a dependent argument.
4833  if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
4834  if (const auto *UE =
4835  dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
4836  if (UE->getKind() == UETT_AlignOf)
4837  Diag(TheCall->getLocStart(), diag::warn_alloca_align_alignof)
4838  << Arg->getSourceRange();
4839 
4840  llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context);
4841 
4842  if (!Result.isPowerOf2())
4843  return Diag(TheCall->getLocStart(),
4844  diag::err_alignment_not_power_of_two)
4845  << Arg->getSourceRange();
4846 
4847  if (Result < Context.getCharWidth())
4848  return Diag(TheCall->getLocStart(), diag::err_alignment_too_small)
4849  << (unsigned)Context.getCharWidth()
4850  << Arg->getSourceRange();
4851 
4852  if (Result > std::numeric_limits<int32_t>::max())
4853  return Diag(TheCall->getLocStart(), diag::err_alignment_too_big)
4855  << Arg->getSourceRange();
4856  }
4857 
4858  return false;
4859 }
4860 
4861 /// Handle __builtin_assume_aligned. This is declared
4862 /// as (const void*, size_t, ...) and can take one optional constant int arg.
4863 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
4864  unsigned NumArgs = TheCall->getNumArgs();
4865 
4866  if (NumArgs > 3)
4867  return Diag(TheCall->getLocEnd(),
4868  diag::err_typecheck_call_too_many_args_at_most)
4869  << 0 /*function call*/ << 3 << NumArgs
4870  << TheCall->getSourceRange();
4871 
4872  // The alignment must be a constant integer.
4873  Expr *Arg = TheCall->getArg(1);
4874 
4875  // We can't check the value of a dependent argument.
4876  if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
4877  llvm::APSInt Result;
4878  if (SemaBuiltinConstantArg(TheCall, 1, Result))
4879  return true;
4880 
4881  if (!Result.isPowerOf2())
4882  return Diag(TheCall->getLocStart(),
4883  diag::err_alignment_not_power_of_two)
4884  << Arg->getSourceRange();
4885  }
4886 
4887  if (NumArgs > 2) {
4888  ExprResult Arg(TheCall->getArg(2));
4890  Context.getSizeType(), false);
4891  Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4892  if (Arg.isInvalid()) return true;
4893  TheCall->setArg(2, Arg.get());
4894  }
4895 
4896  return false;
4897 }
4898 
4899 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
4900  unsigned BuiltinID =
4901  cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID();
4902  bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
4903 
4904  unsigned NumArgs = TheCall->getNumArgs();
4905  unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
4906  if (NumArgs < NumRequiredArgs) {
4907  return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
4908  << 0 /* function call */ << NumRequiredArgs << NumArgs
4909  << TheCall->getSourceRange();
4910  }
4911  if (NumArgs >= NumRequiredArgs + 0x100) {
4912  return Diag(TheCall->getLocEnd(),
4913  diag::err_typecheck_call_too_many_args_at_most)
4914  << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
4915  << TheCall->getSourceRange();
4916  }
4917  unsigned i = 0;
4918 
4919  // For formatting call, check buffer arg.
4920  if (!IsSizeCall) {
4921  ExprResult Arg(TheCall->getArg(i));
4923  Context, Context.VoidPtrTy, false);
4924  Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
4925  if (Arg.isInvalid())
4926  return true;
4927  TheCall->setArg(i, Arg.get());
4928  i++;
4929  }
4930 
4931  // Check string literal arg.
4932  unsigned FormatIdx = i;
4933  {
4934  ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i));
4935  if (Arg.isInvalid())
4936  return true;
4937  TheCall->setArg(i, Arg.get());
4938  i++;
4939  }
4940 
4941  // Make sure variadic args are scalar.
4942  unsigned FirstDataArg = i;
4943  while (i < NumArgs) {
4944  ExprResult Arg = DefaultVariadicArgumentPromotion(
4945  TheCall->getArg(i), VariadicFunction, nullptr);
4946  if (Arg.isInvalid())
4947  return true;
4948  CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType());
4949  if (ArgSize.getQuantity() >= 0x100) {
4950  return Diag(Arg.get()->getLocEnd(), diag::err_os_log_argument_too_big)
4951  << i << (int)ArgSize.getQuantity() << 0xff
4952  << TheCall->getSourceRange();
4953  }
4954  TheCall->setArg(i, Arg.get());
4955  i++;
4956  }
4957 
4958  // Check formatting specifiers. NOTE: We're only doing this for the non-size
4959  // call to avoid duplicate diagnostics.
4960  if (!IsSizeCall) {
4961  llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
4962  ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
4963  bool Success = CheckFormatArguments(
4964  Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog,
4965  VariadicFunction, TheCall->getLocStart(), SourceRange(),
4966  CheckedVarArgs);
4967  if (!Success)
4968  return true;
4969  }
4970 
4971  if (IsSizeCall) {
4972  TheCall->setType(Context.getSizeType());
4973  } else {
4974  TheCall->setType(Context.VoidPtrTy);
4975  }
4976  return false;
4977 }
4978 
4979 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
4980 /// TheCall is a constant expression.
4981 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
4982  llvm::APSInt &Result) {
4983  Expr *Arg = TheCall->getArg(ArgNum);
4984  DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
4985  FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
4986 
4987  if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
4988 
4989  if (!Arg->isIntegerConstantExpr(Result, Context))
4990  return Diag(TheCall->getLocStart(), diag::err_constant_integer_arg_type)
4991  << FDecl->getDeclName() << Arg->getSourceRange();
4992 
4993  return false;
4994 }
4995 
4996 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
4997 /// TheCall is a constant expression in the range [Low, High].
4998 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
4999  int Low, int High) {
5000  llvm::APSInt Result;
5001 
5002  // We can't check the value of a dependent argument.
5003  Expr *Arg = TheCall->getArg(ArgNum);
5004  if (Arg->isTypeDependent() || Arg->isValueDependent())
5005  return false;
5006 
5007  // Check constant-ness first.
5008  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
5009  return true;
5010 
5011  if (Result.getSExtValue() < Low || Result.getSExtValue() > High)
5012  return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
5013  << Low << High << Arg->getSourceRange();
5014 
5015  return false;
5016 }
5017 
5018 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
5019 /// TheCall is a constant expression is a multiple of Num..
5020 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
5021  unsigned Num) {
5022  llvm::APSInt Result;
5023 
5024  // We can't check the value of a dependent argument.
5025  Expr *Arg = TheCall->getArg(ArgNum);
5026  if (Arg->isTypeDependent() || Arg->isValueDependent())
5027  return false;
5028 
5029  // Check constant-ness first.
5030  if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
5031  return true;
5032 
5033  if (Result.getSExtValue() % Num != 0)
5034  return Diag(TheCall->getLocStart(), diag::err_argument_not_multiple)
5035  << Num << Arg->getSourceRange();
5036 
5037  return false;
5038 }
5039 
5040 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
5041 /// TheCall is an ARM/AArch64 special register string literal.
5042 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
5043  int ArgNum, unsigned ExpectedFieldNum,
5044  bool AllowName) {
5045  bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
5046  BuiltinID == ARM::BI__builtin_arm_wsr64 ||
5047  BuiltinID == ARM::BI__builtin_arm_rsr ||
5048  BuiltinID == ARM::BI__builtin_arm_rsrp ||
5049  BuiltinID == ARM::BI__builtin_arm_wsr ||
5050  BuiltinID == ARM::BI__builtin_arm_wsrp;
5051  bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
5052  BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
5053  BuiltinID == AArch64::BI__builtin_arm_rsr ||
5054  BuiltinID == AArch64::BI__builtin_arm_rsrp ||
5055  BuiltinID == AArch64::BI__builtin_arm_wsr ||
5056  BuiltinID == AArch64::BI__builtin_arm_wsrp;
5057  assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
5058 
5059  // We can't check the value of a dependent argument.
5060  Expr *Arg = TheCall->getArg(ArgNum);
5061  if (Arg->isTypeDependent() || Arg->isValueDependent())
5062  return false;
5063 
5064  // Check if the argument is a string literal.
5065  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
5066  return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
5067  << Arg->getSourceRange();
5068 
5069  // Check the type of special register given.
5070  StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
5072  Reg.split(Fields, ":");
5073 
5074  if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
5075  return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
5076  << Arg->getSourceRange();
5077 
5078  // If the string is the name of a register then we cannot check that it is
5079  // valid here but if the string is of one the forms described in ACLE then we
5080  // can check that the supplied fields are integers and within the valid
5081  // ranges.
5082  if (Fields.size() > 1) {
5083  bool FiveFields = Fields.size() == 5;
5084 
5085  bool ValidString = true;
5086  if (IsARMBuiltin) {
5087  ValidString &= Fields[0].startswith_lower("cp") ||
5088  Fields[0].startswith_lower("p");
5089  if (ValidString)
5090  Fields[0] =
5091  Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1);
5092 
5093  ValidString &= Fields[2].startswith_lower("c");
5094  if (ValidString)
5095  Fields[2] = Fields[2].drop_front(1);
5096 
5097  if (FiveFields) {
5098  ValidString &= Fields[3].startswith_lower("c");
5099  if (ValidString)
5100  Fields[3] = Fields[3].drop_front(1);
5101  }
5102  }
5103 
5104  SmallVector<int, 5> Ranges;
5105  if (FiveFields)
5106  Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
5107  else
5108  Ranges.append({15, 7, 15});
5109 
5110  for (unsigned i=0; i<Fields.size(); ++i) {
5111  int IntField;
5112  ValidString &= !Fields[i].getAsInteger(10, IntField);
5113  ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
5114  }
5115 
5116  if (!ValidString)
5117  return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
5118  << Arg->getSourceRange();
5119  } else if (IsAArch64Builtin && Fields.size() == 1) {
5120  // If the register name is one of those that appear in the condition below
5121  // and the special register builtin being used is one of the write builtins,
5122  // then we require that the argument provided for writing to the register
5123  // is an integer constant expression. This is because it will be lowered to
5124  // an MSR (immediate) instruction, so we need to know the immediate at
5125  // compile time.
5126  if (TheCall->getNumArgs() != 2)
5127  return false;
5128 
5129  std::string RegLower = Reg.lower();
5130  if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" &&
5131  RegLower != "pan" && RegLower != "uao")
5132  return false;
5133 
5134  return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
5135  }
5136 
5137  return false;
5138 }
5139 
5140 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
5141 /// This checks that the target supports __builtin_longjmp and
5142 /// that val is a constant 1.
5143 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
5144  if (!Context.getTargetInfo().hasSjLjLowering())
5145  return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_unsupported)
5146  << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd());
5147 
5148  Expr *Arg = TheCall->getArg(1);
5149  llvm::APSInt Result;
5150 
5151  // TODO: This is less than ideal. Overload this to take a value.
5152  if (SemaBuiltinConstantArg(TheCall, 1, Result))
5153  return true;
5154 
5155  if (Result != 1)
5156  return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val)
5157  << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
5158 
5159  return false;
5160 }
5161 
5162 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
5163 /// This checks that the target supports __builtin_setjmp.
5164 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
5165  if (!Context.getTargetInfo().hasSjLjLowering())
5166  return Diag(TheCall->getLocStart(), diag::err_builtin_setjmp_unsupported)
5167  << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd());
5168  return false;
5169 }
5170 
5171 namespace {
5172 
5173 class UncoveredArgHandler {
5174  enum { Unknown = -1, AllCovered = -2 };
5175 
5176  signed FirstUncoveredArg = Unknown;
5177  SmallVector<const Expr *, 4> DiagnosticExprs;
5178 
5179 public:
5180  UncoveredArgHandler() = default;
5181 
5182  bool hasUncoveredArg() const {
5183  return (FirstUncoveredArg >= 0);
5184  }
5185 
5186  unsigned getUncoveredArg() const {
5187  assert(hasUncoveredArg() && "no uncovered argument");
5188  return FirstUncoveredArg;
5189  }
5190 
5191  void setAllCovered() {
5192  // A string has been found with all arguments covered, so clear out
5193  // the diagnostics.
5194  DiagnosticExprs.clear();
5195  FirstUncoveredArg = AllCovered;
5196  }
5197 
5198  void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) {
5199  assert(NewFirstUncoveredArg >= 0 && "Outside range");
5200 
5201  // Don't update if a previous string covers all arguments.
5202  if (FirstUncoveredArg == AllCovered)
5203  return;
5204 
5205  // UncoveredArgHandler tracks the highest uncovered argument index
5206  // and with it all the strings that match this index.
5207  if (NewFirstUncoveredArg == FirstUncoveredArg)
5208  DiagnosticExprs.push_back(StrExpr);
5209  else if (NewFirstUncoveredArg > FirstUncoveredArg) {
5210  DiagnosticExprs.clear();
5211  DiagnosticExprs.push_back(StrExpr);
5212  FirstUncoveredArg = NewFirstUncoveredArg;
5213  }
5214  }
5215 
5216  void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr);
5217 };
5218 
5220  SLCT_NotALiteral,
5221  SLCT_UncheckedLiteral,
5222  SLCT_CheckedLiteral
5223 };
5224 
5225 } // namespace
5226 
5227 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
5228  BinaryOperatorKind BinOpKind,
5229  bool AddendIsRight) {
5230  unsigned BitWidth = Offset.getBitWidth();
5231  unsigned AddendBitWidth = Addend.getBitWidth();
5232  // There might be negative interim results.
5233  if (Addend.isUnsigned()) {
5234  Addend = Addend.zext(++AddendBitWidth);
5235  Addend.setIsSigned(true);
5236  }
5237  // Adjust the bit width of the APSInts.
5238  if (AddendBitWidth > BitWidth) {
5239  Offset = Offset.sext(AddendBitWidth);
5240  BitWidth = AddendBitWidth;
5241  } else if (BitWidth > AddendBitWidth) {
5242  Addend = Addend.sext(BitWidth);
5243  }
5244 
5245  bool Ov = false;
5246  llvm::APSInt ResOffset = Offset;
5247  if (BinOpKind == BO_Add)
5248  ResOffset = Offset.sadd_ov(Addend, Ov);
5249  else {
5250  assert(AddendIsRight && BinOpKind == BO_Sub &&
5251  "operator must be add or sub with addend on the right");
5252  ResOffset = Offset.ssub_ov(Addend, Ov);
5253  }
5254 
5255  // We add an offset to a pointer here so we should support an offset as big as
5256  // possible.
5257  if (Ov) {
5258  assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 &&
5259  "index (intermediate) result too big");
5260  Offset = Offset.sext(2 * BitWidth);
5261  sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
5262  return;
5263  }
5264 
5265  Offset = ResOffset;
5266 }
5267 
5268 namespace {
5269 
5270 // This is a wrapper class around StringLiteral to support offsetted string
5271 // literals as format strings. It takes the offset into account when returning
5272 // the string and its length or the source locations to display notes correctly.
5273 class FormatStringLiteral {
5274  const StringLiteral *FExpr;
5275  int64_t Offset;
5276 
5277  public:
5278  FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
5279  : FExpr(fexpr), Offset(Offset) {}
5280 
5281  StringRef getString() const {
5282  return FExpr->getString().drop_front(Offset);
5283  }
5284 
5285  unsigned getByteLength() const {
5286  return FExpr->getByteLength() - getCharByteWidth() * Offset;
5287  }
5288 
5289  unsigned getLength() const { return FExpr->getLength() - Offset; }
5290  unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
5291 
5292  StringLiteral::StringKind getKind() const { return FExpr->getKind(); }
5293 
5294  QualType getType() const { return FExpr->getType(); }
5295 
5296  bool isAscii() const { return FExpr->isAscii(); }
5297  bool isWide() const { return FExpr->isWide(); }
5298  bool isUTF8() const { return FExpr->isUTF8(); }
5299  bool isUTF16() const { return FExpr->isUTF16(); }
5300  bool isUTF32() const { return FExpr->isUTF32(); }
5301  bool isPascal() const { return FExpr->isPascal(); }
5302 
5303  SourceLocation getLocationOfByte(
5304  unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
5305  const TargetInfo &Target, unsigned *StartToken = nullptr,
5306  unsigned *StartTokenByteOffset = nullptr) const {
5307  return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target,
5308  StartToken, StartTokenByteOffset);
5309  }
5310 
5311  SourceLocation getLocStart() const LLVM_READONLY {
5312  return FExpr->getLocStart().getLocWithOffset(Offset);
5313  }
5314 
5315  SourceLocation getLocEnd() const LLVM_READONLY { return FExpr->getLocEnd(); }
5316 };
5317 
5318 } // namespace
5319 
5320 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
5321  const Expr *OrigFormatExpr,
5322  ArrayRef<const Expr *> Args,
5323  bool HasVAListArg, unsigned format_idx,
5324  unsigned firstDataArg,
5326  bool inFunctionCall,
5327  Sema::VariadicCallType CallType,
5328  llvm::SmallBitVector &CheckedVarArgs,
5329  UncoveredArgHandler &UncoveredArg);
5330 
5331 // Determine if an expression is a string literal or constant string.
5332 // If this function returns false on the arguments to a function expecting a
5333 // format string, we will usually need to emit a warning.
5334 // True string literals are then checked by CheckFormatString.
5336 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
5337  bool HasVAListArg, unsigned format_idx,
5338  unsigned firstDataArg, Sema::FormatStringType Type,
5339  Sema::VariadicCallType CallType, bool InFunctionCall,
5340  llvm::SmallBitVector &CheckedVarArgs,
5341  UncoveredArgHandler &UncoveredArg,
5342  llvm::APSInt Offset) {
5343  tryAgain:
5344  assert(Offset.isSigned() && "invalid offset");
5345 
5346  if (E->isTypeDependent() || E->isValueDependent())
5347  return SLCT_NotALiteral;
5348 
5349  E = E->IgnoreParenCasts();
5350 
5352  // Technically -Wformat-nonliteral does not warn about this case.
5353  // The behavior of printf and friends in this case is implementation
5354  // dependent. Ideally if the format string cannot be null then
5355  // it should have a 'nonnull' attribute in the function prototype.
5356  return SLCT_UncheckedLiteral;
5357 
5358  switch (E->getStmtClass()) {
5359  case Stmt::BinaryConditionalOperatorClass:
5360  case Stmt::ConditionalOperatorClass: {
5361  // The expression is a literal if both sub-expressions were, and it was
5362  // completely checked only if both sub-expressions were checked.
5363  const AbstractConditionalOperator *C =
5364  cast<AbstractConditionalOperator>(E);
5365 
5366  // Determine whether it is necessary to check both sub-expressions, for
5367  // example, because the condition expression is a constant that can be
5368  // evaluated at compile time.
5369  bool CheckLeft = true, CheckRight = true;
5370 
5371  bool Cond;
5372  if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext())) {
5373  if (Cond)
5374  CheckRight = false;
5375  else
5376  CheckLeft = false;
5377  }
5378 
5379  // We need to maintain the offsets for the right and the left hand side
5380  // separately to check if every possible indexed expression is a valid
5381  // string literal. They might have different offsets for different string
5382  // literals in the end.
5384  if (!CheckLeft)
5385  Left = SLCT_UncheckedLiteral;
5386  else {
5387  Left = checkFormatStringExpr(S, C->getTrueExpr(), Args,
5388  HasVAListArg, format_idx, firstDataArg,
5389  Type, CallType, InFunctionCall,
5390  CheckedVarArgs, UncoveredArg, Offset);
5391  if (Left == SLCT_NotALiteral || !CheckRight) {
5392  return Left;
5393  }
5394  }
5395 
5396  StringLiteralCheckType Right =
5397  checkFormatStringExpr(S, C->getFalseExpr(), Args,
5398  HasVAListArg, format_idx, firstDataArg,
5399  Type, CallType, InFunctionCall, CheckedVarArgs,
5400  UncoveredArg, Offset);
5401 
5402  return (CheckLeft && Left < Right) ? Left : Right;
5403  }
5404 
5405  case Stmt::ImplicitCastExprClass:
5406  E = cast<ImplicitCastExpr>(E)->getSubExpr();
5407  goto tryAgain;
5408 
5409  case Stmt::OpaqueValueExprClass:
5410  if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
5411  E = src;
5412  goto tryAgain;
5413  }
5414  return SLCT_NotALiteral;
5415 
5416  case Stmt::PredefinedExprClass:
5417  // While __func__, etc., are technically not string literals, they
5418  // cannot contain format specifiers and thus are not a security
5419  // liability.
5420  return SLCT_UncheckedLiteral;
5421 
5422  case Stmt::DeclRefExprClass: {
5423  const DeclRefExpr *DR = cast<DeclRefExpr>(E);
5424 
5425  // As an exception, do not flag errors for variables binding to
5426  // const string literals.
5427  if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
5428  bool isConstant = false;
5429  QualType T = DR->getType();
5430 
5431  if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
5432  isConstant = AT->getElementType().isConstant(S.Context);
5433  } else if (const PointerType *PT = T->getAs<PointerType>()) {
5434  isConstant = T.isConstant(S.Context) &&
5435  PT->getPointeeType().isConstant(S.Context);
5436  } else if (T->isObjCObjectPointerType()) {
5437  // In ObjC, there is usually no "const ObjectPointer" type,
5438  // so don't check if the pointee type is constant.
5439  isConstant = T.isConstant(S.Context);
5440  }
5441 
5442  if (isConstant) {
5443  if (const Expr *Init = VD->getAnyInitializer()) {
5444  // Look through initializers like const char c[] = { "foo" }
5445  if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) {
5446  if (InitList->isStringLiteralInit())
5447  Init = InitList->getInit(0)->IgnoreParenImpCasts();
5448  }
5449  return checkFormatStringExpr(S, Init, Args,
5450  HasVAListArg, format_idx,
5451  firstDataArg, Type, CallType,
5452  /*InFunctionCall*/ false, CheckedVarArgs,
5453  UncoveredArg, Offset);
5454  }
5455  }
5456 
5457  // For vprintf* functions (i.e., HasVAListArg==true), we add a
5458  // special check to see if the format string is a function parameter
5459  // of the function calling the printf function. If the function
5460  // has an attribute indicating it is a printf-like function, then we
5461  // should suppress warnings concerning non-literals being used in a call
5462  // to a vprintf function. For example:
5463  //
5464  // void
5465  // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
5466  // va_list ap;
5467  // va_start(ap, fmt);
5468  // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
5469  // ...
5470  // }
5471  if (HasVAListArg) {
5472  if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
5473  if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
5474  int PVIndex = PV->getFunctionScopeIndex() + 1;
5475  for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) {
5476  // adjust for implicit parameter
5477  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
5478  if (MD->isInstance())
5479  ++PVIndex;
5480  // We also check if the formats are compatible.
5481  // We can't pass a 'scanf' string to a 'printf' function.
5482  if (PVIndex == PVFormat->getFormatIdx() &&
5483  Type == S.GetFormatStringType(PVFormat))
5484  return SLCT_UncheckedLiteral;
5485  }
5486  }
5487  }
5488  }
5489  }
5490 
5491  return SLCT_NotALiteral;
5492  }
5493 
5494  case Stmt::CallExprClass:
5495  case Stmt::CXXMemberCallExprClass: {
5496  const CallExpr *CE = cast<CallExpr>(E);
5497  if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
5498  if (const FormatArgAttr *FA = ND->getAttr<FormatArgAttr>()) {
5499  const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
5500  return checkFormatStringExpr(S, Arg, Args,
5501  HasVAListArg, format_idx, firstDataArg,
5502  Type, CallType, InFunctionCall,
5503  CheckedVarArgs, UncoveredArg, Offset);
5504  } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
5505  unsigned BuiltinID = FD->getBuiltinID();
5506  if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
5507  BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
5508  const Expr *Arg = CE->getArg(0);
5509  return checkFormatStringExpr(S, Arg, Args,
5510  HasVAListArg, format_idx,
5511  firstDataArg, Type, CallType,
5512  InFunctionCall, CheckedVarArgs,
5513  UncoveredArg, Offset);
5514  }
5515  }
5516  }
5517 
5518  return SLCT_NotALiteral;
5519  }
5520  case Stmt::ObjCMessageExprClass: {
5521  const auto *ME = cast<ObjCMessageExpr>(E);
5522  if (const auto *ND = ME->getMethodDecl()) {
5523  if (const auto *FA = ND->getAttr<FormatArgAttr>()) {
5524  const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
5525  return checkFormatStringExpr(
5526  S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
5527  CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
5528  }
5529  }
5530 
5531  return SLCT_NotALiteral;
5532  }
5533  case Stmt::ObjCStringLiteralClass:
5534  case Stmt::StringLiteralClass: {
5535  const StringLiteral *StrE = nullptr;
5536 
5537  if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
5538  StrE = ObjCFExpr->getString();
5539  else
5540  StrE = cast<StringLiteral>(E);
5541 
5542  if (StrE) {
5543  if (Offset.isNegative() || Offset > StrE->getLength()) {
5544  // TODO: It would be better to have an explicit warning for out of
5545  // bounds literals.
5546  return SLCT_NotALiteral;
5547  }
5548  FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue());
5549  CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx,
5550  firstDataArg, Type, InFunctionCall, CallType,
5551  CheckedVarArgs, UncoveredArg);
5552  return SLCT_CheckedLiteral;
5553  }
5554 
5555  return SLCT_NotALiteral;
5556  }
5557  case Stmt::BinaryOperatorClass: {
5558  llvm::APSInt LResult;
5559  llvm::APSInt RResult;
5560 
5561  const BinaryOperator *BinOp = cast<BinaryOperator>(E);
5562 
5563  // A string literal + an int offset is still a string literal.
5564  if (BinOp->isAdditiveOp()) {
5565  bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context);
5566  bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context);
5567 
5568  if (LIsInt != RIsInt) {
5569  BinaryOperatorKind BinOpKind = BinOp->getOpcode();
5570 
5571  if (LIsInt) {
5572  if (BinOpKind == BO_Add) {
5573  sumOffsets(Offset, LResult, BinOpKind, RIsInt);
5574  E = BinOp->getRHS();
5575  goto tryAgain;
5576  }
5577  } else {
5578  sumOffsets(Offset, RResult, BinOpKind, RIsInt);
5579  E = BinOp->getLHS();
5580  goto tryAgain;
5581  }
5582  }
5583  }
5584 
5585  return SLCT_NotALiteral;
5586  }
5587  case Stmt::UnaryOperatorClass: {
5588  const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
5589  auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
5590  if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
5591  llvm::APSInt IndexResult;
5592  if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) {
5593  sumOffsets(Offset, IndexResult, BO_Add, /*RHS is int*/ true);
5594  E = ASE->getBase();
5595  goto tryAgain;
5596  }
5597  }
5598 
5599  return SLCT_NotALiteral;
5600  }
5601 
5602  default:
5603  return SLCT_NotALiteral;
5604  }
5605 }
5606 
5608  return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
5609  .Case("scanf", FST_Scanf)
5610  .Cases("printf", "printf0", FST_Printf)
5611  .Cases("NSString", "CFString", FST_NSString)
5612  .Case("strftime", FST_Strftime)
5613  .Case("strfmon", FST_Strfmon)
5614  .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
5615  .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
5616  .Case("os_trace", FST_OSLog)
5617  .Case("os_log", FST_OSLog)
5618  .Default(FST_Unknown);
5619 }
5620 
5621 /// CheckFormatArguments - Check calls to printf and scanf (and similar
5622 /// functions) for correct use of format strings.
5623 /// Returns true if a format string has been fully checked.
5624 bool Sema::CheckFormatArguments(const FormatAttr *Format,
5626  bool IsCXXMember,
5627  VariadicCallType CallType,
5628  SourceLocation Loc, SourceRange Range,
5629  llvm::SmallBitVector &CheckedVarArgs) {
5630  FormatStringInfo FSI;
5631  if (getFormatStringInfo(Format, IsCXXMember, &FSI))
5632  return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx,
5633  FSI.FirstDataArg, GetFormatStringType(Format),
5634  CallType, Loc, Range, CheckedVarArgs);
5635  return false;
5636 }
5637 
5638 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
5639  bool HasVAListArg, unsigned format_idx,
5640  unsigned firstDataArg, FormatStringType Type,
5641  VariadicCallType CallType,
5642  SourceLocation Loc, SourceRange Range,
5643  llvm::SmallBitVector &CheckedVarArgs) {
5644  // CHECK: printf/scanf-like function is called with no format string.
5645  if (format_idx >= Args.size()) {
5646  Diag(Loc, diag::warn_missing_format_string) << Range;
5647  return false;
5648  }
5649 
5650  const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
5651 
5652  // CHECK: format string is not a string literal.
5653  //
5654  // Dynamically generated format strings are difficult to
5655  // automatically vet at compile time. Requiring that format strings
5656  // are string literals: (1) permits the checking of format strings by
5657  // the compiler and thereby (2) can practically remove the source of
5658  // many format string exploits.
5659 
5660  // Format string can be either ObjC string (e.g. @"%d") or
5661  // C string (e.g. "%d")
5662  // ObjC string uses the same format specifiers as C string, so we can use
5663  // the same format string checking logic for both ObjC and C strings.
5664  UncoveredArgHandler UncoveredArg;
5666  checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg,
5667  format_idx, firstDataArg, Type, CallType,
5668  /*IsFunctionCall*/ true, CheckedVarArgs,
5669  UncoveredArg,
5670  /*no string offset*/ llvm::APSInt(64, false) = 0);
5671 
5672  // Generate a diagnostic where an uncovered argument is detected.
5673  if (UncoveredArg.hasUncoveredArg()) {
5674  unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg;
5675  assert(ArgIdx < Args.size() && "ArgIdx outside bounds");
5676  UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]);
5677  }
5678 
5679  if (CT != SLCT_NotALiteral)
5680  // Literal format string found, check done!
5681  return CT == SLCT_CheckedLiteral;
5682 
5683  // Strftime is particular as it always uses a single 'time' argument,
5684  // so it is safe to pass a non-literal string.
5685  if (Type == FST_Strftime)
5686  return false;
5687 
5688  // Do not emit diag when the string param is a macro expansion and the
5689  // format is either NSString or CFString. This is a hack to prevent
5690  // diag when using the NSLocalizedString and CFCopyLocalizedString macros
5691  // which are usually used in place of NS and CF string literals.
5692  SourceLocation FormatLoc = Args[format_idx]->getLocStart();
5693  if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc))
5694  return false;
5695 
5696  // If there are no arguments specified, warn with -Wformat-security, otherwise
5697  // warn only with -Wformat-nonliteral.
5698  if (Args.size() == firstDataArg) {
5699  Diag(FormatLoc, diag::warn_format_nonliteral_noargs)
5700  << OrigFormatExpr->getSourceRange();
5701  switch (Type) {
5702  default:
5703  break;
5704  case FST_Kprintf:
5705  case FST_FreeBSDKPrintf:
5706  case FST_Printf:
5707  Diag(FormatLoc, diag::note_format_security_fixit)
5708  << FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
5709  break;
5710  case FST_NSString:
5711  Diag(FormatLoc, diag::note_format_security_fixit)
5712  << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", ");
5713  break;
5714  }
5715  } else {
5716  Diag(FormatLoc, diag::warn_format_nonliteral)
5717  << OrigFormatExpr->getSourceRange();
5718  }
5719  return false;
5720 }
5721 
5722 namespace {
5723 
5724 class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
5725 protected:
5726  Sema &S;
5727  const FormatStringLiteral *FExpr;
5728  const Expr *OrigFormatExpr;
5729  const Sema::FormatStringType FSType;
5730  const unsigned FirstDataArg;
5731  const unsigned NumDataArgs;
5732  const char *Beg; // Start of format string.
5733  const bool HasVAListArg;
5735  unsigned FormatIdx;
5736  llvm::SmallBitVector CoveredArgs;
5737  bool usesPositionalArgs = false;
5738  bool atFirstArg = true;
5739  bool inFunctionCall;
5740  Sema::VariadicCallType CallType;
5741  llvm::SmallBitVector &CheckedVarArgs;
5742  UncoveredArgHandler &UncoveredArg;
5743 
5744 public:
5745  CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
5746  const Expr *origFormatExpr,
5747  const Sema::FormatStringType type, unsigned firstDataArg,
5748  unsigned numDataArgs, const char *beg, bool hasVAListArg,
5749  ArrayRef<const Expr *> Args, unsigned formatIdx,
5750  bool inFunctionCall, Sema::VariadicCallType callType,
5751  llvm::SmallBitVector &CheckedVarArgs,
5752  UncoveredArgHandler &UncoveredArg)
5753  : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
5754  FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
5755  HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx),
5756  inFunctionCall(inFunctionCall), CallType(callType),
5757  CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
5758  CoveredArgs.resize(numDataArgs);
5759  CoveredArgs.reset();
5760  }
5761 
5762  void DoneProcessing();
5763 
5764  void HandleIncompleteSpecifier(const char *startSpecifier,
5765  unsigned specifierLen) override;
5766 
5767  void HandleInvalidLengthModifier(
5770  const char *startSpecifier, unsigned specifierLen,
5771  unsigned DiagID);
5772 
5773  void HandleNonStandardLengthModifier(
5775  const char *startSpecifier, unsigned specifierLen);
5776 
5777  void HandleNonStandardConversionSpecifier(
5779  const char *startSpecifier, unsigned specifierLen);
5780 
5781  void HandlePosition(const char *startPos, unsigned posLen) override;
5782 
5783  void HandleInvalidPosition(const char *startSpecifier,
5784  unsigned specifierLen,
5786 
5787  void HandleZeroPosition(const char *startPos, unsigned posLen) override;
5788 
5789  void HandleNullChar(const char *nullCharacter) override;
5790 
5791  template <typename Range>
5792  static void
5793  EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
5794  const PartialDiagnostic &PDiag, SourceLocation StringLoc,
5795  bool IsStringLocation, Range StringRange,