clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
9#include "Boolean.h"
10#include "EvalEmitter.h"
12#include "InterpHelpers.h"
13#include "PrimType.h"
14#include "Program.h"
16#include "clang/AST/OSLog.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/Support/AllocToken.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/SipHash.h"
25
26namespace clang {
27namespace interp {
28
29[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
30 switch (ID) {
31 case Builtin::BIas_const:
32 case Builtin::BIforward:
33 case Builtin::BIforward_like:
34 case Builtin::BImove:
35 case Builtin::BImove_if_noexcept:
36 case Builtin::BIaddressof:
37 case Builtin::BI__addressof:
38 case Builtin::BI__builtin_addressof:
39 case Builtin::BI__builtin_launder:
40 return true;
41 default:
42 return false;
43 }
44 return false;
45}
46
47static void discard(InterpStack &Stk, PrimType T) {
48 TYPE_SWITCH(T, { Stk.discard<T>(); });
49}
50
52 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
53}
54
55static APSInt popToAPSInt(InterpState &S, const Expr *E) {
56 return popToAPSInt(S.Stk, *S.getContext().classify(E->getType()));
57}
59 return popToAPSInt(S.Stk, *S.getContext().classify(T));
60}
61
62/// Pushes \p Val on the stack as the type given by \p QT.
63static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
67 assert(T);
68
69 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
70
71 if (T == PT_IntAPS) {
72 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
73 Result.copy(Val);
75 return;
76 }
77
78 if (T == PT_IntAP) {
79 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
80 Result.copy(Val);
82 return;
83 }
84
86 int64_t V = Val.getSExtValue();
87 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
88 } else {
90 uint64_t V = Val.getZExtValue();
91 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
92 }
93}
94
95template <typename T>
96static void pushInteger(InterpState &S, T Val, QualType QT) {
97 if constexpr (std::is_same_v<T, APInt>)
98 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
99 else if constexpr (std::is_same_v<T, APSInt>)
100 pushInteger(S, Val, QT);
101 else
102 pushInteger(S,
103 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
104 std::is_signed_v<T>),
105 !std::is_signed_v<T>),
106 QT);
107}
108
109static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
110 const APSInt &Value) {
111
112 if (ValueT == PT_IntAPS) {
113 Dest.deref<IntegralAP<true>>() =
114 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
115 Dest.deref<IntegralAP<true>>().copy(Value);
116 } else if (ValueT == PT_IntAP) {
117 Dest.deref<IntegralAP<false>>() =
118 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
119 Dest.deref<IntegralAP<false>>().copy(Value);
120 } else {
122 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
123 }
124}
125
126static QualType getElemType(const Pointer &P) {
127 const Descriptor *Desc = P.getFieldDesc();
128 QualType T = Desc->getType();
129 if (Desc->isPrimitive())
130 return T;
131 if (T->isPointerType())
132 return T->getAs<PointerType>()->getPointeeType();
133 if (Desc->isArray())
134 return Desc->getElemQualType();
135 if (const auto *AT = T->getAsArrayTypeUnsafe())
136 return AT->getElementType();
137 return T;
138}
139
141 unsigned ID) {
142 if (!S.diagnosing())
143 return;
144
145 auto Loc = S.Current->getSource(OpPC);
146 if (S.getLangOpts().CPlusPlus11)
147 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
148 << /*isConstexpr=*/0 << /*isConstructor=*/0
150 else
151 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
152}
153
154static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
155 assert(Val.getFieldDesc()->isPrimitiveArray() &&
157 "Not a boolean vector");
158 unsigned NumElems = Val.getNumElems();
159
160 // Each element is one bit, so create an integer with NumElts bits.
161 llvm::APSInt Result(NumElems, 0);
162 for (unsigned I = 0; I != NumElems; ++I) {
163 if (Val.elem<bool>(I))
164 Result.setBit(I);
165 }
166
167 return Result;
168}
169
171 const InterpFrame *Frame,
172 const CallExpr *Call) {
173 unsigned Depth = S.Current->getDepth();
174 auto isStdCall = [](const FunctionDecl *F) -> bool {
175 return F && F->isInStdNamespace() && F->getIdentifier() &&
176 F->getIdentifier()->isStr("is_constant_evaluated");
177 };
178 const InterpFrame *Caller = Frame->Caller;
179 // The current frame is the one for __builtin_is_constant_evaluated.
180 // The one above that, potentially the one for std::is_constant_evaluated().
182 S.getEvalStatus().Diag &&
183 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
184 if (Caller && isStdCall(Frame->getCallee())) {
185 const Expr *E = Caller->getExpr(Caller->getRetPC());
186 S.report(E->getExprLoc(),
187 diag::warn_is_constant_evaluated_always_true_constexpr)
188 << "std::is_constant_evaluated" << E->getSourceRange();
189 } else {
190 S.report(Call->getExprLoc(),
191 diag::warn_is_constant_evaluated_always_true_constexpr)
192 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
193 }
194 }
195
197 return true;
198}
199
200// __builtin_assume(int)
202 const InterpFrame *Frame,
203 const CallExpr *Call) {
204 assert(Call->getNumArgs() == 1);
205 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
206 return true;
207}
208
210 const InterpFrame *Frame,
211 const CallExpr *Call, unsigned ID) {
212 uint64_t Limit = ~static_cast<uint64_t>(0);
213 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
214 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
215 Limit = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
216 .getZExtValue();
217
218 const Pointer &B = S.Stk.pop<Pointer>();
219 const Pointer &A = S.Stk.pop<Pointer>();
220 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
221 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
222 diagnoseNonConstexprBuiltin(S, OpPC, ID);
223
224 if (Limit == 0) {
225 pushInteger(S, 0, Call->getType());
226 return true;
227 }
228
229 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
230 return false;
231
232 if (A.isDummy() || B.isDummy())
233 return false;
234 if (!A.isBlockPointer() || !B.isBlockPointer())
235 return false;
236
237 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
238 ID == Builtin::BI__builtin_wcscmp ||
239 ID == Builtin::BI__builtin_wcsncmp;
240 assert(A.getFieldDesc()->isPrimitiveArray());
241 assert(B.getFieldDesc()->isPrimitiveArray());
242
243 // Different element types shouldn't happen, but with casts they can.
245 return false;
246
247 PrimType ElemT = *S.getContext().classify(getElemType(A));
248
249 auto returnResult = [&](int V) -> bool {
250 pushInteger(S, V, Call->getType());
251 return true;
252 };
253
254 unsigned IndexA = A.getIndex();
255 unsigned IndexB = B.getIndex();
256 uint64_t Steps = 0;
257 for (;; ++IndexA, ++IndexB, ++Steps) {
258
259 if (Steps >= Limit)
260 break;
261 const Pointer &PA = A.atIndex(IndexA);
262 const Pointer &PB = B.atIndex(IndexB);
263 if (!CheckRange(S, OpPC, PA, AK_Read) ||
264 !CheckRange(S, OpPC, PB, AK_Read)) {
265 return false;
266 }
267
268 if (IsWide) {
269 INT_TYPE_SWITCH(ElemT, {
270 T CA = PA.deref<T>();
271 T CB = PB.deref<T>();
272 if (CA > CB)
273 return returnResult(1);
274 if (CA < CB)
275 return returnResult(-1);
276 if (CA.isZero() || CB.isZero())
277 return returnResult(0);
278 });
279 continue;
280 }
281
282 uint8_t CA = PA.deref<uint8_t>();
283 uint8_t CB = PB.deref<uint8_t>();
284
285 if (CA > CB)
286 return returnResult(1);
287 if (CA < CB)
288 return returnResult(-1);
289 if (CA == 0 || CB == 0)
290 return returnResult(0);
291 }
292
293 return returnResult(0);
294}
295
297 const InterpFrame *Frame,
298 const CallExpr *Call, unsigned ID) {
299 const Pointer &StrPtr = S.Stk.pop<Pointer>().expand();
300
301 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
302 diagnoseNonConstexprBuiltin(S, OpPC, ID);
303
304 if (!CheckArray(S, OpPC, StrPtr))
305 return false;
306
307 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
308 return false;
309
310 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
311 return false;
312
313 if (!StrPtr.getFieldDesc()->isPrimitiveArray())
314 return false;
315
316 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
317 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
318
319 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
320 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
321 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
322 }
323
324 size_t Len = 0;
325 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
326 const Pointer &ElemPtr = StrPtr.atIndex(I);
327
328 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
329 return false;
330
331 uint32_t Val;
332 switch (ElemSize) {
333 case 1:
334 Val = ElemPtr.deref<uint8_t>();
335 break;
336 case 2:
337 Val = ElemPtr.deref<uint16_t>();
338 break;
339 case 4:
340 Val = ElemPtr.deref<uint32_t>();
341 break;
342 default:
343 llvm_unreachable("Unsupported char size");
344 }
345 if (Val == 0)
346 break;
347 }
348
349 pushInteger(S, Len, Call->getType());
350
351 return true;
352}
353
355 const InterpFrame *Frame, const CallExpr *Call,
356 bool Signaling) {
357 const Pointer &Arg = S.Stk.pop<Pointer>();
358
359 if (!CheckLoad(S, OpPC, Arg))
360 return false;
361
362 assert(Arg.getFieldDesc()->isPrimitiveArray());
363
364 // Convert the given string to an integer using StringRef's API.
365 llvm::APInt Fill;
366 std::string Str;
367 assert(Arg.getNumElems() >= 1);
368 for (unsigned I = 0;; ++I) {
369 const Pointer &Elem = Arg.atIndex(I);
370
371 if (!CheckLoad(S, OpPC, Elem))
372 return false;
373
374 if (Elem.deref<int8_t>() == 0)
375 break;
376
377 Str += Elem.deref<char>();
378 }
379
380 // Treat empty strings as if they were zero.
381 if (Str.empty())
382 Fill = llvm::APInt(32, 0);
383 else if (StringRef(Str).getAsInteger(0, Fill))
384 return false;
385
386 const llvm::fltSemantics &TargetSemantics =
388 Call->getDirectCallee()->getReturnType());
389
390 Floating Result = S.allocFloat(TargetSemantics);
392 if (Signaling)
393 Result.copy(
394 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
395 else
396 Result.copy(
397 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
398 } else {
399 // Prior to IEEE 754-2008, architectures were allowed to choose whether
400 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
401 // a different encoding to what became a standard in 2008, and for pre-
402 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
403 // sNaN. This is now known as "legacy NaN" encoding.
404 if (Signaling)
405 Result.copy(
406 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
407 else
408 Result.copy(
409 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
410 }
411
413 return true;
414}
415
417 const InterpFrame *Frame,
418 const CallExpr *Call) {
419 const llvm::fltSemantics &TargetSemantics =
421 Call->getDirectCallee()->getReturnType());
422
423 Floating Result = S.allocFloat(TargetSemantics);
424 Result.copy(APFloat::getInf(TargetSemantics));
426 return true;
427}
428
430 const InterpFrame *Frame) {
431 const Floating &Arg2 = S.Stk.pop<Floating>();
432 const Floating &Arg1 = S.Stk.pop<Floating>();
433 Floating Result = S.allocFloat(Arg1.getSemantics());
434
435 APFloat Copy = Arg1.getAPFloat();
436 Copy.copySign(Arg2.getAPFloat());
437 Result.copy(Copy);
439
440 return true;
441}
442
444 const InterpFrame *Frame, bool IsNumBuiltin) {
445 const Floating &RHS = S.Stk.pop<Floating>();
446 const Floating &LHS = S.Stk.pop<Floating>();
447 Floating Result = S.allocFloat(LHS.getSemantics());
448
449 if (IsNumBuiltin)
450 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
451 else
452 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
454 return true;
455}
456
458 const InterpFrame *Frame, bool IsNumBuiltin) {
459 const Floating &RHS = S.Stk.pop<Floating>();
460 const Floating &LHS = S.Stk.pop<Floating>();
461 Floating Result = S.allocFloat(LHS.getSemantics());
462
463 if (IsNumBuiltin)
464 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
465 else
466 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
468 return true;
469}
470
471/// Defined as __builtin_isnan(...), to accommodate the fact that it can
472/// take a float, double, long double, etc.
473/// But for us, that's all a Floating anyway.
475 const InterpFrame *Frame,
476 const CallExpr *Call) {
477 const Floating &Arg = S.Stk.pop<Floating>();
478
479 pushInteger(S, Arg.isNan(), Call->getType());
480 return true;
481}
482
484 const InterpFrame *Frame,
485 const CallExpr *Call) {
486 const Floating &Arg = S.Stk.pop<Floating>();
487
488 pushInteger(S, Arg.isSignaling(), Call->getType());
489 return true;
490}
491
493 const InterpFrame *Frame, bool CheckSign,
494 const CallExpr *Call) {
495 const Floating &Arg = S.Stk.pop<Floating>();
496 APFloat F = Arg.getAPFloat();
497 bool IsInf = F.isInfinity();
498
499 if (CheckSign)
500 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
501 else
502 pushInteger(S, IsInf, Call->getType());
503 return true;
504}
505
507 const InterpFrame *Frame,
508 const CallExpr *Call) {
509 const Floating &Arg = S.Stk.pop<Floating>();
510
511 pushInteger(S, Arg.isFinite(), Call->getType());
512 return true;
513}
514
516 const InterpFrame *Frame,
517 const CallExpr *Call) {
518 const Floating &Arg = S.Stk.pop<Floating>();
519
520 pushInteger(S, Arg.isNormal(), Call->getType());
521 return true;
522}
523
525 const InterpFrame *Frame,
526 const CallExpr *Call) {
527 const Floating &Arg = S.Stk.pop<Floating>();
528
529 pushInteger(S, Arg.isDenormal(), Call->getType());
530 return true;
531}
532
534 const InterpFrame *Frame,
535 const CallExpr *Call) {
536 const Floating &Arg = S.Stk.pop<Floating>();
537
538 pushInteger(S, Arg.isZero(), Call->getType());
539 return true;
540}
541
543 const InterpFrame *Frame,
544 const CallExpr *Call) {
545 const Floating &Arg = S.Stk.pop<Floating>();
546
547 pushInteger(S, Arg.isNegative(), Call->getType());
548 return true;
549}
550
552 const CallExpr *Call, unsigned ID) {
553 const Floating &RHS = S.Stk.pop<Floating>();
554 const Floating &LHS = S.Stk.pop<Floating>();
555
557 S,
558 [&] {
559 switch (ID) {
560 case Builtin::BI__builtin_isgreater:
561 return LHS > RHS;
562 case Builtin::BI__builtin_isgreaterequal:
563 return LHS >= RHS;
564 case Builtin::BI__builtin_isless:
565 return LHS < RHS;
566 case Builtin::BI__builtin_islessequal:
567 return LHS <= RHS;
568 case Builtin::BI__builtin_islessgreater: {
569 ComparisonCategoryResult Cmp = LHS.compare(RHS);
570 return Cmp == ComparisonCategoryResult::Less ||
572 }
573 case Builtin::BI__builtin_isunordered:
575 default:
576 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
577 "comparison function");
578 }
579 }(),
580 Call->getType());
581 return true;
582}
583
584/// First parameter to __builtin_isfpclass is the floating value, the
585/// second one is an integral value.
587 const InterpFrame *Frame,
588 const CallExpr *Call) {
589 APSInt FPClassArg = popToAPSInt(S, Call->getArg(1));
590 const Floating &F = S.Stk.pop<Floating>();
591
592 int32_t Result = static_cast<int32_t>(
593 (F.classify() & std::move(FPClassArg)).getZExtValue());
594 pushInteger(S, Result, Call->getType());
595
596 return true;
597}
598
599/// Five int values followed by one floating value.
600/// __builtin_fpclassify(int, int, int, int, int, float)
602 const InterpFrame *Frame,
603 const CallExpr *Call) {
604 const Floating &Val = S.Stk.pop<Floating>();
605
606 PrimType IntT = *S.getContext().classify(Call->getArg(0));
607 APSInt Values[5];
608 for (unsigned I = 0; I != 5; ++I)
609 Values[4 - I] = popToAPSInt(S.Stk, IntT);
610
611 unsigned Index;
612 switch (Val.getCategory()) {
613 case APFloat::fcNaN:
614 Index = 0;
615 break;
616 case APFloat::fcInfinity:
617 Index = 1;
618 break;
619 case APFloat::fcNormal:
620 Index = Val.isDenormal() ? 3 : 2;
621 break;
622 case APFloat::fcZero:
623 Index = 4;
624 break;
625 }
626
627 // The last argument is first on the stack.
628 assert(Index <= 4);
629
630 pushInteger(S, Values[Index], Call->getType());
631 return true;
632}
633
634static inline Floating abs(InterpState &S, const Floating &In) {
635 if (!In.isNegative())
636 return In;
637
638 Floating Output = S.allocFloat(In.getSemantics());
639 APFloat New = In.getAPFloat();
640 New.changeSign();
641 Output.copy(New);
642 return Output;
643}
644
645// The C standard says "fabs raises no floating-point exceptions,
646// even if x is a signaling NaN. The returned value is independent of
647// the current rounding direction mode." Therefore constant folding can
648// proceed without regard to the floating point settings.
649// Reference, WG14 N2478 F.10.4.3
651 const InterpFrame *Frame) {
652 const Floating &Val = S.Stk.pop<Floating>();
653 S.Stk.push<Floating>(abs(S, Val));
654 return true;
655}
656
658 const InterpFrame *Frame,
659 const CallExpr *Call) {
660 APSInt Val = popToAPSInt(S, Call->getArg(0));
661 if (Val ==
662 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
663 return false;
664 if (Val.isNegative())
665 Val.negate();
666 pushInteger(S, Val, Call->getType());
667 return true;
668}
669
671 const InterpFrame *Frame,
672 const CallExpr *Call) {
673 APSInt Val;
674 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
675 const Pointer &Arg = S.Stk.pop<Pointer>();
676 Val = convertBoolVectorToInt(Arg);
677 } else {
678 Val = popToAPSInt(S, Call->getArg(0));
679 }
680 pushInteger(S, Val.popcount(), Call->getType());
681 return true;
682}
683
685 const InterpFrame *Frame,
686 const CallExpr *Call) {
687 // This is an unevaluated call, so there are no arguments on the stack.
688 assert(Call->getNumArgs() == 1);
689 const Expr *Arg = Call->getArg(0);
690
691 GCCTypeClass ResultClass =
693 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
694 pushInteger(S, ReturnVal, Call->getType());
695 return true;
696}
697
698// __builtin_expect(long, long)
699// __builtin_expect_with_probability(long, long, double)
701 const InterpFrame *Frame,
702 const CallExpr *Call) {
703 // The return value is simply the value of the first parameter.
704 // We ignore the probability.
705 unsigned NumArgs = Call->getNumArgs();
706 assert(NumArgs == 2 || NumArgs == 3);
707
708 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
709 if (NumArgs == 3)
710 S.Stk.discard<Floating>();
711 discard(S.Stk, ArgT);
712
713 APSInt Val = popToAPSInt(S.Stk, ArgT);
714 pushInteger(S, Val, Call->getType());
715 return true;
716}
717
719 const InterpFrame *Frame,
720 const CallExpr *Call) {
721#ifndef NDEBUG
722 assert(Call->getArg(0)->isLValue());
723 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
724 assert(PtrT == PT_Ptr &&
725 "Unsupported pointer type passed to __builtin_addressof()");
726#endif
727 return true;
728}
729
731 const InterpFrame *Frame,
732 const CallExpr *Call) {
733 return Call->getDirectCallee()->isConstexpr();
734}
735
737 const InterpFrame *Frame,
738 const CallExpr *Call) {
739 APSInt Arg = popToAPSInt(S, Call->getArg(0));
740
742 Arg.getZExtValue());
743 pushInteger(S, Result, Call->getType());
744 return true;
745}
746
747// Two integral values followed by a pointer (lhs, rhs, resultOut)
749 const CallExpr *Call,
750 unsigned BuiltinOp) {
751 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
752 if (ResultPtr.isDummy() || !ResultPtr.isBlockPointer())
753 return false;
754
755 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
756 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
757 APSInt RHS = popToAPSInt(S.Stk, RHST);
758 APSInt LHS = popToAPSInt(S.Stk, LHST);
759 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
760 PrimType ResultT = *S.getContext().classify(ResultType);
761 bool Overflow;
762
764 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
765 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
766 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
767 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
769 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
771 uint64_t LHSSize = LHS.getBitWidth();
772 uint64_t RHSSize = RHS.getBitWidth();
773 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
774 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
775
776 // Add an additional bit if the signedness isn't uniformly agreed to. We
777 // could do this ONLY if there is a signed and an unsigned that both have
778 // MaxBits, but the code to check that is pretty nasty. The issue will be
779 // caught in the shrink-to-result later anyway.
780 if (IsSigned && !AllSigned)
781 ++MaxBits;
782
783 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
784 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
785 Result = APSInt(MaxBits, !IsSigned);
786 }
787
788 // Find largest int.
789 switch (BuiltinOp) {
790 default:
791 llvm_unreachable("Invalid value for BuiltinOp");
792 case Builtin::BI__builtin_add_overflow:
793 case Builtin::BI__builtin_sadd_overflow:
794 case Builtin::BI__builtin_saddl_overflow:
795 case Builtin::BI__builtin_saddll_overflow:
796 case Builtin::BI__builtin_uadd_overflow:
797 case Builtin::BI__builtin_uaddl_overflow:
798 case Builtin::BI__builtin_uaddll_overflow:
799 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
800 : LHS.uadd_ov(RHS, Overflow);
801 break;
802 case Builtin::BI__builtin_sub_overflow:
803 case Builtin::BI__builtin_ssub_overflow:
804 case Builtin::BI__builtin_ssubl_overflow:
805 case Builtin::BI__builtin_ssubll_overflow:
806 case Builtin::BI__builtin_usub_overflow:
807 case Builtin::BI__builtin_usubl_overflow:
808 case Builtin::BI__builtin_usubll_overflow:
809 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
810 : LHS.usub_ov(RHS, Overflow);
811 break;
812 case Builtin::BI__builtin_mul_overflow:
813 case Builtin::BI__builtin_smul_overflow:
814 case Builtin::BI__builtin_smull_overflow:
815 case Builtin::BI__builtin_smulll_overflow:
816 case Builtin::BI__builtin_umul_overflow:
817 case Builtin::BI__builtin_umull_overflow:
818 case Builtin::BI__builtin_umulll_overflow:
819 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
820 : LHS.umul_ov(RHS, Overflow);
821 break;
822 }
823
824 // In the case where multiple sizes are allowed, truncate and see if
825 // the values are the same.
826 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
827 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
828 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
829 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
830 // since it will give us the behavior of a TruncOrSelf in the case where
831 // its parameter <= its size. We previously set Result to be at least the
832 // type-size of the result, so getTypeSize(ResultType) <= Resu
833 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
834 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
835
836 if (!APSInt::isSameValue(Temp, Result))
837 Overflow = true;
838 Result = std::move(Temp);
839 }
840
841 // Write Result to ResultPtr and put Overflow on the stack.
842 assignInteger(S, ResultPtr, ResultT, Result);
843 if (ResultPtr.canBeInitialized())
844 ResultPtr.initialize();
845
846 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
847 S.Stk.push<Boolean>(Overflow);
848 return true;
849}
850
851/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
853 const InterpFrame *Frame,
854 const CallExpr *Call, unsigned BuiltinOp) {
855 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
856 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
857 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
858 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
859 APSInt RHS = popToAPSInt(S.Stk, RHST);
860 APSInt LHS = popToAPSInt(S.Stk, LHST);
861
862 if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer())
863 return false;
864
865 APSInt CarryOut;
866
868 // Copy the number of bits and sign.
869 Result = LHS;
870 CarryOut = LHS;
871
872 bool FirstOverflowed = false;
873 bool SecondOverflowed = false;
874 switch (BuiltinOp) {
875 default:
876 llvm_unreachable("Invalid value for BuiltinOp");
877 case Builtin::BI__builtin_addcb:
878 case Builtin::BI__builtin_addcs:
879 case Builtin::BI__builtin_addc:
880 case Builtin::BI__builtin_addcl:
881 case Builtin::BI__builtin_addcll:
882 Result =
883 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
884 break;
885 case Builtin::BI__builtin_subcb:
886 case Builtin::BI__builtin_subcs:
887 case Builtin::BI__builtin_subc:
888 case Builtin::BI__builtin_subcl:
889 case Builtin::BI__builtin_subcll:
890 Result =
891 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
892 break;
893 }
894 // It is possible for both overflows to happen but CGBuiltin uses an OR so
895 // this is consistent.
896 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
897
898 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
899 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
900 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
901 CarryOutPtr.initialize();
902
903 assert(Call->getType() == Call->getArg(0)->getType());
904 pushInteger(S, Result, Call->getType());
905 return true;
906}
907
909 const InterpFrame *Frame, const CallExpr *Call,
910 unsigned BuiltinOp) {
911
912 std::optional<APSInt> Fallback;
913 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2)
914 Fallback = popToAPSInt(S, Call->getArg(1));
915
916 APSInt Val;
917 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
918 const Pointer &Arg = S.Stk.pop<Pointer>();
919 Val = convertBoolVectorToInt(Arg);
920 } else {
921 Val = popToAPSInt(S, Call->getArg(0));
922 }
923
924 // When the argument is 0, the result of GCC builtins is undefined, whereas
925 // for Microsoft intrinsics, the result is the bit-width of the argument.
926 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
927 BuiltinOp != Builtin::BI__lzcnt &&
928 BuiltinOp != Builtin::BI__lzcnt64;
929
930 if (Val == 0) {
931 if (Fallback) {
932 pushInteger(S, *Fallback, Call->getType());
933 return true;
934 }
935
936 if (ZeroIsUndefined)
937 return false;
938 }
939
940 pushInteger(S, Val.countl_zero(), Call->getType());
941 return true;
942}
943
945 const InterpFrame *Frame, const CallExpr *Call,
946 unsigned BuiltinID) {
947 std::optional<APSInt> Fallback;
948 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2)
949 Fallback = popToAPSInt(S, Call->getArg(1));
950
951 APSInt Val;
952 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
953 const Pointer &Arg = S.Stk.pop<Pointer>();
954 Val = convertBoolVectorToInt(Arg);
955 } else {
956 Val = popToAPSInt(S, Call->getArg(0));
957 }
958
959 if (Val == 0) {
960 if (Fallback) {
961 pushInteger(S, *Fallback, Call->getType());
962 return true;
963 }
964 return false;
965 }
966
967 pushInteger(S, Val.countr_zero(), Call->getType());
968 return true;
969}
970
972 const InterpFrame *Frame,
973 const CallExpr *Call) {
974 const APSInt &Val = popToAPSInt(S, Call->getArg(0));
975 if (Val.getBitWidth() == 8)
976 pushInteger(S, Val, Call->getType());
977 else
978 pushInteger(S, Val.byteSwap(), Call->getType());
979 return true;
980}
981
982/// bool __atomic_always_lock_free(size_t, void const volatile*)
983/// bool __atomic_is_lock_free(size_t, void const volatile*)
985 const InterpFrame *Frame,
986 const CallExpr *Call,
987 unsigned BuiltinOp) {
988 auto returnBool = [&S](bool Value) -> bool {
989 S.Stk.push<Boolean>(Value);
990 return true;
991 };
992
993 const Pointer &Ptr = S.Stk.pop<Pointer>();
994 const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
995
996 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
997 // of two less than or equal to the maximum inline atomic width, we know it
998 // is lock-free. If the size isn't a power of two, or greater than the
999 // maximum alignment where we promote atomics, we know it is not lock-free
1000 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1001 // the answer can only be determined at runtime; for example, 16-byte
1002 // atomics have lock-free implementations on some, but not all,
1003 // x86-64 processors.
1004
1005 // Check power-of-two.
1006 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1007 if (Size.isPowerOfTwo()) {
1008 // Check against inlining width.
1009 unsigned InlineWidthBits =
1011 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1012
1013 // OK, we will inline appropriately-aligned operations of this size,
1014 // and _Atomic(T) is appropriately-aligned.
1015 if (Size == CharUnits::One())
1016 return returnBool(true);
1017
1018 // Same for null pointers.
1019 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1020 if (Ptr.isZero())
1021 return returnBool(true);
1022
1023 if (Ptr.isIntegralPointer()) {
1024 uint64_t IntVal = Ptr.getIntegerRepresentation();
1025 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1026 return returnBool(true);
1027 }
1028
1029 const Expr *PtrArg = Call->getArg(1);
1030 // Otherwise, check if the type's alignment against Size.
1031 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1032 // Drop the potential implicit-cast to 'const volatile void*', getting
1033 // the underlying type.
1034 if (ICE->getCastKind() == CK_BitCast)
1035 PtrArg = ICE->getSubExpr();
1036 }
1037
1038 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1039 QualType PointeeType = PtrTy->getPointeeType();
1040 if (!PointeeType->isIncompleteType() &&
1041 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1042 // OK, we will inline operations on this object.
1043 return returnBool(true);
1044 }
1045 }
1046 }
1047 }
1048
1049 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1050 return returnBool(false);
1051
1052 return false;
1053}
1054
1055/// bool __c11_atomic_is_lock_free(size_t)
1057 CodePtr OpPC,
1058 const InterpFrame *Frame,
1059 const CallExpr *Call) {
1060 const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
1061
1062 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1063 if (Size.isPowerOfTwo()) {
1064 // Check against inlining width.
1065 unsigned InlineWidthBits =
1067 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1068 S.Stk.push<Boolean>(true);
1069 return true;
1070 }
1071 }
1072
1073 return false; // returnBool(false);
1074}
1075
1076/// __builtin_complex(Float A, float B);
1078 const InterpFrame *Frame,
1079 const CallExpr *Call) {
1080 const Floating &Arg2 = S.Stk.pop<Floating>();
1081 const Floating &Arg1 = S.Stk.pop<Floating>();
1082 Pointer &Result = S.Stk.peek<Pointer>();
1083
1084 Result.elem<Floating>(0) = Arg1;
1085 Result.elem<Floating>(1) = Arg2;
1086 Result.initializeAllElements();
1087
1088 return true;
1089}
1090
1091/// __builtin_is_aligned()
1092/// __builtin_align_up()
1093/// __builtin_align_down()
1094/// The first parameter is either an integer or a pointer.
1095/// The second parameter is the requested alignment as an integer.
1097 const InterpFrame *Frame,
1098 const CallExpr *Call,
1099 unsigned BuiltinOp) {
1100 const APSInt &Alignment = popToAPSInt(S, Call->getArg(1));
1101
1102 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1103 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1104 return false;
1105 }
1106 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1107 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1108 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1109 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1110 << MaxValue << Call->getArg(0)->getType() << Alignment;
1111 return false;
1112 }
1113
1114 // The first parameter is either an integer or a pointer.
1115 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1116
1117 if (isIntegralType(FirstArgT)) {
1118 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1119 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1120 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1121 APSInt AlignedVal =
1122 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1123 pushInteger(S, AlignedVal, Call->getType());
1124 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1125 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1126 pushInteger(S, AlignedVal, Call->getType());
1127 } else {
1128 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1129 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1130 }
1131 return true;
1132 }
1133 assert(FirstArgT == PT_Ptr);
1134 const Pointer &Ptr = S.Stk.pop<Pointer>();
1135 if (!Ptr.isBlockPointer())
1136 return false;
1137
1138 unsigned PtrOffset = Ptr.getIndex();
1139 CharUnits BaseAlignment =
1141 CharUnits PtrAlign =
1142 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1143
1144 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1145 if (PtrAlign.getQuantity() >= Alignment) {
1146 S.Stk.push<Boolean>(true);
1147 return true;
1148 }
1149 // If the alignment is not known to be sufficient, some cases could still
1150 // be aligned at run time. However, if the requested alignment is less or
1151 // equal to the base alignment and the offset is not aligned, we know that
1152 // the run-time value can never be aligned.
1153 if (BaseAlignment.getQuantity() >= Alignment &&
1154 PtrAlign.getQuantity() < Alignment) {
1155 S.Stk.push<Boolean>(false);
1156 return true;
1157 }
1158
1159 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1160 << Alignment;
1161 return false;
1162 }
1163
1164 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1165 BuiltinOp == Builtin::BI__builtin_align_up);
1166
1167 // For align_up/align_down, we can return the same value if the alignment
1168 // is known to be greater or equal to the requested value.
1169 if (PtrAlign.getQuantity() >= Alignment) {
1170 S.Stk.push<Pointer>(Ptr);
1171 return true;
1172 }
1173
1174 // The alignment could be greater than the minimum at run-time, so we cannot
1175 // infer much about the resulting pointer value. One case is possible:
1176 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1177 // can infer the correct index if the requested alignment is smaller than
1178 // the base alignment so we can perform the computation on the offset.
1179 if (BaseAlignment.getQuantity() >= Alignment) {
1180 assert(Alignment.getBitWidth() <= 64 &&
1181 "Cannot handle > 64-bit address-space");
1182 uint64_t Alignment64 = Alignment.getZExtValue();
1183 CharUnits NewOffset =
1184 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1185 ? llvm::alignDown(PtrOffset, Alignment64)
1186 : llvm::alignTo(PtrOffset, Alignment64));
1187
1188 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1189 return true;
1190 }
1191
1192 // Otherwise, we cannot constant-evaluate the result.
1193 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1194 return false;
1195}
1196
1197/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1199 const InterpFrame *Frame,
1200 const CallExpr *Call) {
1201 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1202
1203 std::optional<APSInt> ExtraOffset;
1204 if (Call->getNumArgs() == 3)
1205 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1206
1207 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1208 const Pointer &Ptr = S.Stk.pop<Pointer>();
1209
1210 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1211
1212 // If there is a base object, then it must have the correct alignment.
1213 if (Ptr.isBlockPointer()) {
1214 CharUnits BaseAlignment;
1215 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1216 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1217 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1218 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1219
1220 if (BaseAlignment < Align) {
1221 S.CCEDiag(Call->getArg(0),
1222 diag::note_constexpr_baa_insufficient_alignment)
1223 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1224 return false;
1225 }
1226 }
1227
1228 APValue AV = Ptr.toAPValue(S.getASTContext());
1229 CharUnits AVOffset = AV.getLValueOffset();
1230 if (ExtraOffset)
1231 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1232 if (AVOffset.alignTo(Align) != AVOffset) {
1233 if (Ptr.isBlockPointer())
1234 S.CCEDiag(Call->getArg(0),
1235 diag::note_constexpr_baa_insufficient_alignment)
1236 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1237 else
1238 S.CCEDiag(Call->getArg(0),
1239 diag::note_constexpr_baa_value_insufficient_alignment)
1240 << AVOffset.getQuantity() << Align.getQuantity();
1241 return false;
1242 }
1243
1244 S.Stk.push<Pointer>(Ptr);
1245 return true;
1246}
1247
1248/// (CarryIn, LHS, RHS, Result)
1250 CodePtr OpPC,
1251 const InterpFrame *Frame,
1252 const CallExpr *Call,
1253 unsigned BuiltinOp) {
1254 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1255 !Call->getArg(1)->getType()->isIntegerType() ||
1256 !Call->getArg(2)->getType()->isIntegerType())
1257 return false;
1258
1259 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1260
1261 APSInt RHS = popToAPSInt(S, Call->getArg(2));
1262 APSInt LHS = popToAPSInt(S, Call->getArg(1));
1263 APSInt CarryIn = popToAPSInt(S, Call->getArg(0));
1264
1265 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1266 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1267
1268 unsigned BitWidth = LHS.getBitWidth();
1269 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1270 APInt ExResult =
1271 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1272 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1273
1274 APInt Result = ExResult.extractBits(BitWidth, 0);
1275 APSInt CarryOut =
1276 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1277
1278 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1279 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1280 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1281
1282 pushInteger(S, CarryOut, Call->getType());
1283
1284 return true;
1285}
1286
1288 CodePtr OpPC,
1289 const InterpFrame *Frame,
1290 const CallExpr *Call) {
1293 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1294 return true;
1295}
1296
1297static bool
1299 const InterpFrame *Frame,
1300 const CallExpr *Call) {
1301 const auto &Ptr = S.Stk.pop<Pointer>();
1302 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1303
1304 // This should be created for a StringLiteral, so should alway shold at least
1305 // one array element.
1306 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1307 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1308 uint64_t Result = getPointerAuthStableSipHash(R);
1309 pushInteger(S, Result, Call->getType());
1310 return true;
1311}
1312
1314 const InterpFrame *Frame,
1315 const CallExpr *Call) {
1316 const ASTContext &ASTCtx = S.getASTContext();
1317 uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
1318 auto Mode =
1319 ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
1320 auto MaxTokensOpt = ASTCtx.getLangOpts().AllocTokenMax;
1321 uint64_t MaxTokens =
1322 MaxTokensOpt.value_or(0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
1323
1324 // We do not read any of the arguments; discard them.
1325 for (int I = Call->getNumArgs() - 1; I >= 0; --I)
1326 discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
1327
1328 // Note: Type inference from a surrounding cast is not supported in
1329 // constexpr evaluation.
1330 QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
1331 if (AllocType.isNull()) {
1332 S.CCEDiag(Call,
1333 diag::note_constexpr_infer_alloc_token_type_inference_failed);
1334 return false;
1335 }
1336
1337 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
1338 if (!ATMD) {
1339 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
1340 return false;
1341 }
1342
1343 auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
1344 if (!MaybeToken) {
1345 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
1346 return false;
1347 }
1348
1349 pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
1350 return true;
1351}
1352
1354 const InterpFrame *Frame,
1355 const CallExpr *Call) {
1356 // A call to __operator_new is only valid within std::allocate<>::allocate.
1357 // Walk up the call stack to find the appropriate caller and get the
1358 // element type from it.
1359 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1360
1361 if (ElemType.isNull()) {
1362 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1363 ? diag::note_constexpr_new_untyped
1364 : diag::note_constexpr_new);
1365 return false;
1366 }
1367 assert(NewCall);
1368
1369 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1370 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1371 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1372 return false;
1373 }
1374
1375 // We only care about the first parameter (the size), so discard all the
1376 // others.
1377 {
1378 unsigned NumArgs = Call->getNumArgs();
1379 assert(NumArgs >= 1);
1380
1381 // The std::nothrow_t arg never gets put on the stack.
1382 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1383 --NumArgs;
1384 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1385 // First arg is needed.
1386 Args = Args.drop_front();
1387
1388 // Discard the rest.
1389 for (const Expr *Arg : Args)
1390 discard(S.Stk, *S.getContext().classify(Arg));
1391 }
1392
1393 APSInt Bytes = popToAPSInt(S, Call->getArg(0));
1394 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1395 assert(!ElemSize.isZero());
1396 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1397 // elements we should allocate.
1398 APInt NumElems, Remainder;
1399 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1400 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1401 if (Remainder != 0) {
1402 // This likely indicates a bug in the implementation of 'std::allocator'.
1403 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1404 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1405 return false;
1406 }
1407
1408 // NB: The same check we're using in CheckArraySize()
1409 if (NumElems.getActiveBits() >
1411 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1412 // FIXME: NoThrow check?
1413 const SourceInfo &Loc = S.Current->getSource(OpPC);
1414 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1415 << NumElems.getZExtValue();
1416 return false;
1417 }
1418
1419 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1420 return false;
1421
1422 bool IsArray = NumElems.ugt(1);
1423 OptPrimType ElemT = S.getContext().classify(ElemType);
1424 DynamicAllocator &Allocator = S.getAllocator();
1425 if (ElemT) {
1426 Block *B =
1427 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1429 assert(B);
1430 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1431 return true;
1432 }
1433
1434 assert(!ElemT);
1435
1436 // Composite arrays
1437 if (IsArray) {
1438 const Descriptor *Desc =
1439 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1440 Block *B =
1441 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1443 assert(B);
1444 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1445 return true;
1446 }
1447
1448 // Records. Still allocate them as single-element arrays.
1450 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1451
1452 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1454 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1456 assert(B);
1457 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1458 return true;
1459}
1460
1462 const InterpFrame *Frame,
1463 const CallExpr *Call) {
1464 const Expr *Source = nullptr;
1465 const Block *BlockToDelete = nullptr;
1466
1468 S.Stk.discard<Pointer>();
1469 return false;
1470 }
1471
1472 // This is permitted only within a call to std::allocator<T>::deallocate.
1473 if (!S.getStdAllocatorCaller("deallocate")) {
1474 S.FFDiag(Call);
1475 S.Stk.discard<Pointer>();
1476 return true;
1477 }
1478
1479 {
1480 const Pointer &Ptr = S.Stk.pop<Pointer>();
1481
1482 if (Ptr.isZero()) {
1483 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1484 return true;
1485 }
1486
1487 Source = Ptr.getDeclDesc()->asExpr();
1488 BlockToDelete = Ptr.block();
1489
1490 if (!BlockToDelete->isDynamic()) {
1491 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1493 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1494 S.Note(D->getLocation(), diag::note_declared_at);
1495 }
1496 }
1497 assert(BlockToDelete);
1498
1499 DynamicAllocator &Allocator = S.getAllocator();
1500 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1501 std::optional<DynamicAllocator::Form> AllocForm =
1502 Allocator.getAllocationForm(Source);
1503
1504 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1505 // Nothing has been deallocated, this must be a double-delete.
1506 const SourceInfo &Loc = S.Current->getSource(OpPC);
1507 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1508 return false;
1509 }
1510 assert(AllocForm);
1511
1512 return CheckNewDeleteForms(
1513 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1514}
1515
1517 const InterpFrame *Frame,
1518 const CallExpr *Call) {
1519 const Floating &Arg0 = S.Stk.pop<Floating>();
1520 S.Stk.push<Floating>(Arg0);
1521 return true;
1522}
1523
1525 const CallExpr *Call, unsigned ID) {
1526 const Pointer &Arg = S.Stk.pop<Pointer>();
1527 assert(Arg.getFieldDesc()->isPrimitiveArray());
1528
1529 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1530 assert(Call->getType() == ElemType);
1531 PrimType ElemT = *S.getContext().classify(ElemType);
1532 unsigned NumElems = Arg.getNumElems();
1533
1535 T Result = Arg.elem<T>(0);
1536 unsigned BitWidth = Result.bitWidth();
1537 for (unsigned I = 1; I != NumElems; ++I) {
1538 T Elem = Arg.elem<T>(I);
1539 T PrevResult = Result;
1540
1541 if (ID == Builtin::BI__builtin_reduce_add) {
1542 if (T::add(Result, Elem, BitWidth, &Result)) {
1543 unsigned OverflowBits = BitWidth + 1;
1544 (void)handleOverflow(S, OpPC,
1545 (PrevResult.toAPSInt(OverflowBits) +
1546 Elem.toAPSInt(OverflowBits)));
1547 return false;
1548 }
1549 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1550 if (T::mul(Result, Elem, BitWidth, &Result)) {
1551 unsigned OverflowBits = BitWidth * 2;
1552 (void)handleOverflow(S, OpPC,
1553 (PrevResult.toAPSInt(OverflowBits) *
1554 Elem.toAPSInt(OverflowBits)));
1555 return false;
1556 }
1557
1558 } else if (ID == Builtin::BI__builtin_reduce_and) {
1559 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1560 } else if (ID == Builtin::BI__builtin_reduce_or) {
1561 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1562 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1563 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1564 } else if (ID == Builtin::BI__builtin_reduce_min) {
1565 if (Elem < Result)
1566 Result = Elem;
1567 } else if (ID == Builtin::BI__builtin_reduce_max) {
1568 if (Elem > Result)
1569 Result = Elem;
1570 } else {
1571 llvm_unreachable("Unhandled vector reduce builtin");
1572 }
1573 }
1574 pushInteger(S, Result.toAPSInt(), Call->getType());
1575 });
1576
1577 return true;
1578}
1579
1581 const InterpFrame *Frame,
1582 const CallExpr *Call,
1583 unsigned BuiltinID) {
1584 assert(Call->getNumArgs() == 1);
1585 QualType Ty = Call->getArg(0)->getType();
1586 if (Ty->isIntegerType()) {
1587 APSInt Val = popToAPSInt(S, Call->getArg(0));
1588 pushInteger(S, Val.abs(), Call->getType());
1589 return true;
1590 }
1591
1592 if (Ty->isFloatingType()) {
1593 Floating Val = S.Stk.pop<Floating>();
1594 Floating Result = abs(S, Val);
1595 S.Stk.push<Floating>(Result);
1596 return true;
1597 }
1598
1599 // Otherwise, the argument must be a vector.
1600 assert(Call->getArg(0)->getType()->isVectorType());
1601 const Pointer &Arg = S.Stk.pop<Pointer>();
1602 assert(Arg.getFieldDesc()->isPrimitiveArray());
1603 const Pointer &Dst = S.Stk.peek<Pointer>();
1604 assert(Dst.getFieldDesc()->isPrimitiveArray());
1605 assert(Arg.getFieldDesc()->getNumElems() ==
1606 Dst.getFieldDesc()->getNumElems());
1607
1608 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1609 PrimType ElemT = *S.getContext().classify(ElemType);
1610 unsigned NumElems = Arg.getNumElems();
1611 // we can either have a vector of integer or a vector of floating point
1612 for (unsigned I = 0; I != NumElems; ++I) {
1613 if (ElemType->isIntegerType()) {
1615 Dst.elem<T>(I) = T::from(static_cast<T>(
1616 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1618 });
1619 } else {
1620 Floating Val = Arg.elem<Floating>(I);
1621 Dst.elem<Floating>(I) = abs(S, Val);
1622 }
1623 }
1625
1626 return true;
1627}
1628
1629/// Can be called with an integer or vector as the first and only parameter.
1631 const InterpFrame *Frame,
1632 const CallExpr *Call,
1633 unsigned BuiltinID) {
1634 assert(Call->getNumArgs() == 1);
1635 if (Call->getArg(0)->getType()->isIntegerType()) {
1636 APSInt Val = popToAPSInt(S, Call->getArg(0));
1637
1638 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1639 pushInteger(S, Val.popcount(), Call->getType());
1640 } else {
1641 pushInteger(S, Val.reverseBits(), Call->getType());
1642 }
1643 return true;
1644 }
1645 // Otherwise, the argument must be a vector.
1646 assert(Call->getArg(0)->getType()->isVectorType());
1647 const Pointer &Arg = S.Stk.pop<Pointer>();
1648 assert(Arg.getFieldDesc()->isPrimitiveArray());
1649 const Pointer &Dst = S.Stk.peek<Pointer>();
1650 assert(Dst.getFieldDesc()->isPrimitiveArray());
1651 assert(Arg.getFieldDesc()->getNumElems() ==
1652 Dst.getFieldDesc()->getNumElems());
1653
1654 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1655 PrimType ElemT = *S.getContext().classify(ElemType);
1656 unsigned NumElems = Arg.getNumElems();
1657
1658 // FIXME: Reading from uninitialized vector elements?
1659 for (unsigned I = 0; I != NumElems; ++I) {
1661 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1662 Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount());
1663 } else {
1664 Dst.elem<T>(I) =
1665 T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
1666 }
1667 });
1668 }
1670
1671 return true;
1672}
1673
1674/// Can be called with an integer or vector as the first and only parameter.
1676 CodePtr OpPC,
1677 const InterpFrame *Frame,
1678 const CallExpr *Call,
1679 unsigned BuiltinID) {
1680 bool HasZeroArg = Call->getNumArgs() == 2;
1681 bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg;
1682 assert(Call->getNumArgs() == 1 || HasZeroArg);
1683 if (Call->getArg(0)->getType()->isIntegerType()) {
1684 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1685 APSInt Val = popToAPSInt(S.Stk, ArgT);
1686 std::optional<APSInt> ZeroVal;
1687 if (HasZeroArg) {
1688 ZeroVal = Val;
1689 Val = popToAPSInt(S.Stk, ArgT);
1690 }
1691
1692 if (Val.isZero()) {
1693 if (ZeroVal) {
1694 pushInteger(S, *ZeroVal, Call->getType());
1695 return true;
1696 }
1697 // If we haven't been provided the second argument, the result is
1698 // undefined
1699 S.FFDiag(S.Current->getSource(OpPC),
1700 diag::note_constexpr_countzeroes_zero)
1701 << /*IsTrailing=*/IsCTTZ;
1702 return false;
1703 }
1704
1705 if (BuiltinID == Builtin::BI__builtin_elementwise_clzg) {
1706 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1707 } else {
1708 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1709 }
1710 return true;
1711 }
1712 // Otherwise, the argument must be a vector.
1713 const ASTContext &ASTCtx = S.getASTContext();
1714 Pointer ZeroArg;
1715 if (HasZeroArg) {
1716 assert(Call->getArg(1)->getType()->isVectorType() &&
1717 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1718 Call->getArg(1)->getType()));
1719 (void)ASTCtx;
1720 ZeroArg = S.Stk.pop<Pointer>();
1721 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1722 }
1723 assert(Call->getArg(0)->getType()->isVectorType());
1724 const Pointer &Arg = S.Stk.pop<Pointer>();
1725 assert(Arg.getFieldDesc()->isPrimitiveArray());
1726 const Pointer &Dst = S.Stk.peek<Pointer>();
1727 assert(Dst.getFieldDesc()->isPrimitiveArray());
1728 assert(Arg.getFieldDesc()->getNumElems() ==
1729 Dst.getFieldDesc()->getNumElems());
1730
1731 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1732 PrimType ElemT = *S.getContext().classify(ElemType);
1733 unsigned NumElems = Arg.getNumElems();
1734
1735 // FIXME: Reading from uninitialized vector elements?
1736 for (unsigned I = 0; I != NumElems; ++I) {
1738 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1739 if (EltVal.isZero()) {
1740 if (HasZeroArg) {
1741 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1742 } else {
1743 // If we haven't been provided the second argument, the result is
1744 // undefined
1745 S.FFDiag(S.Current->getSource(OpPC),
1746 diag::note_constexpr_countzeroes_zero)
1747 << /*IsTrailing=*/IsCTTZ;
1748 return false;
1749 }
1750 } else if (IsCTTZ) {
1751 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1752 } else {
1753 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1754 }
1755 Dst.atIndex(I).initialize();
1756 });
1757 }
1758
1759 return true;
1760}
1761
1763 const InterpFrame *Frame,
1764 const CallExpr *Call, unsigned ID) {
1765 assert(Call->getNumArgs() == 3);
1766 const ASTContext &ASTCtx = S.getASTContext();
1767 APSInt Size = popToAPSInt(S, Call->getArg(2));
1768 Pointer SrcPtr = S.Stk.pop<Pointer>().expand();
1769 Pointer DestPtr = S.Stk.pop<Pointer>().expand();
1770
1771 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1772
1773 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1774 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1775
1776 bool Move =
1777 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1778 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1779 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1780 ID == Builtin::BI__builtin_wmemcpy ||
1781 ID == Builtin::BI__builtin_wmemmove;
1782
1783 // If the size is zero, we treat this as always being a valid no-op.
1784 if (Size.isZero()) {
1785 S.Stk.push<Pointer>(DestPtr);
1786 return true;
1787 }
1788
1789 if (SrcPtr.isZero() || DestPtr.isZero()) {
1790 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1791 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1792 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1793 << DiagPtr.toDiagnosticString(ASTCtx);
1794 return false;
1795 }
1796
1797 // Diagnose integral src/dest pointers specially.
1798 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1799 std::string DiagVal = "(void *)";
1800 DiagVal += SrcPtr.isIntegralPointer()
1801 ? std::to_string(SrcPtr.getIntegerRepresentation())
1802 : std::to_string(DestPtr.getIntegerRepresentation());
1803 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1804 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1805 return false;
1806 }
1807
1808 // Can't read from dummy pointers.
1809 if (DestPtr.isDummy() || SrcPtr.isDummy())
1810 return false;
1811
1812 if (DestPtr.getType()->isIncompleteType()) {
1813 S.FFDiag(S.Current->getSource(OpPC),
1814 diag::note_constexpr_memcpy_incomplete_type)
1815 << Move << DestPtr.getType();
1816 return false;
1817 }
1818 if (SrcPtr.getType()->isIncompleteType()) {
1819 S.FFDiag(S.Current->getSource(OpPC),
1820 diag::note_constexpr_memcpy_incomplete_type)
1821 << Move << SrcPtr.getType();
1822 return false;
1823 }
1824
1825 QualType DestElemType = getElemType(DestPtr);
1826 if (DestElemType->isIncompleteType()) {
1827 S.FFDiag(S.Current->getSource(OpPC),
1828 diag::note_constexpr_memcpy_incomplete_type)
1829 << Move << DestElemType;
1830 return false;
1831 }
1832
1833 size_t RemainingDestElems;
1834 if (DestPtr.getFieldDesc()->isArray()) {
1835 RemainingDestElems = DestPtr.isUnknownSizeArray()
1836 ? 0
1837 : (DestPtr.getNumElems() - DestPtr.getIndex());
1838 } else {
1839 RemainingDestElems = 1;
1840 }
1841 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1842
1843 if (WChar) {
1844 uint64_t WCharSize =
1845 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1846 Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
1847 /*IsUnsigend=*/true);
1848 }
1849
1850 if (Size.urem(DestElemSize) != 0) {
1851 S.FFDiag(S.Current->getSource(OpPC),
1852 diag::note_constexpr_memcpy_unsupported)
1853 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1854 return false;
1855 }
1856
1857 QualType SrcElemType = getElemType(SrcPtr);
1858 size_t RemainingSrcElems;
1859 if (SrcPtr.getFieldDesc()->isArray()) {
1860 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1861 ? 0
1862 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1863 } else {
1864 RemainingSrcElems = 1;
1865 }
1866 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1867
1868 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1869 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1870 << Move << SrcElemType << DestElemType;
1871 return false;
1872 }
1873
1874 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
1875 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1876 << Move << DestElemType;
1877 return false;
1878 }
1879
1880 // Check if we have enough elements to read from and write to.
1881 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1882 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1883 if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
1884 APInt N = Size.udiv(DestElemSize);
1885 S.FFDiag(S.Current->getSource(OpPC),
1886 diag::note_constexpr_memcpy_unsupported)
1887 << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
1888 << DestElemType << toString(N, 10, /*Signed=*/false);
1889 return false;
1890 }
1891
1892 // Check for overlapping memory regions.
1893 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1894 // Remove base casts.
1895 Pointer SrcP = SrcPtr;
1896 while (SrcP.isBaseClass())
1897 SrcP = SrcP.getBase();
1898
1899 Pointer DestP = DestPtr;
1900 while (DestP.isBaseClass())
1901 DestP = DestP.getBase();
1902
1903 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1904 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1905 unsigned N = Size.getZExtValue();
1906
1907 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1908 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1909 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1910 << /*IsWChar=*/false;
1911 return false;
1912 }
1913 }
1914
1915 assert(Size.getZExtValue() % DestElemSize == 0);
1916 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
1917 return false;
1918
1919 S.Stk.push<Pointer>(DestPtr);
1920 return true;
1921}
1922
1923/// Determine if T is a character type for which we guarantee that
1924/// sizeof(T) == 1.
1926 return T->isCharType() || T->isChar8Type();
1927}
1928
1930 const InterpFrame *Frame,
1931 const CallExpr *Call, unsigned ID) {
1932 assert(Call->getNumArgs() == 3);
1933 const APSInt &Size = popToAPSInt(S, Call->getArg(2));
1934 const Pointer &PtrB = S.Stk.pop<Pointer>();
1935 const Pointer &PtrA = S.Stk.pop<Pointer>();
1936
1937 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1938 ID == Builtin::BIwmemcmp)
1939 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1940
1941 if (Size.isZero()) {
1942 pushInteger(S, 0, Call->getType());
1943 return true;
1944 }
1945
1946 if (!PtrA.isBlockPointer() || !PtrB.isBlockPointer())
1947 return false;
1948
1949 bool IsWide =
1950 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1951
1952 const ASTContext &ASTCtx = S.getASTContext();
1953 QualType ElemTypeA = getElemType(PtrA);
1954 QualType ElemTypeB = getElemType(PtrB);
1955 // FIXME: This is an arbitrary limitation the current constant interpreter
1956 // had. We could remove this.
1957 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
1958 !isOneByteCharacterType(ElemTypeB))) {
1959 S.FFDiag(S.Current->getSource(OpPC),
1960 diag::note_constexpr_memcmp_unsupported)
1961 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1962 << PtrB.getType();
1963 return false;
1964 }
1965
1966 if (PtrA.isDummy() || PtrB.isDummy())
1967 return false;
1968
1969 // Now, read both pointers to a buffer and compare those.
1970 BitcastBuffer BufferA(
1971 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
1972 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1973 // FIXME: The swapping here is UNDOING something we do when reading the
1974 // data into the buffer.
1975 if (ASTCtx.getTargetInfo().isBigEndian())
1976 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1977
1978 BitcastBuffer BufferB(
1979 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
1980 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
1981 // FIXME: The swapping here is UNDOING something we do when reading the
1982 // data into the buffer.
1983 if (ASTCtx.getTargetInfo().isBigEndian())
1984 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1985
1986 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
1987 BufferB.byteSize().getQuantity());
1988
1989 unsigned ElemSize = 1;
1990 if (IsWide)
1991 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1992 // The Size given for the wide variants is in wide-char units. Convert it
1993 // to bytes.
1994 size_t ByteSize = Size.getZExtValue() * ElemSize;
1995 size_t CmpSize = std::min(MinBufferSize, ByteSize);
1996
1997 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1998 if (IsWide) {
2000 T A = *reinterpret_cast<T *>(BufferA.atByte(I));
2001 T B = *reinterpret_cast<T *>(BufferB.atByte(I));
2002 if (A < B) {
2003 pushInteger(S, -1, Call->getType());
2004 return true;
2005 }
2006 if (A > B) {
2007 pushInteger(S, 1, Call->getType());
2008 return true;
2009 }
2010 });
2011 } else {
2012 std::byte A = BufferA.deref<std::byte>(Bytes(I));
2013 std::byte B = BufferB.deref<std::byte>(Bytes(I));
2014
2015 if (A < B) {
2016 pushInteger(S, -1, Call->getType());
2017 return true;
2018 }
2019 if (A > B) {
2020 pushInteger(S, 1, Call->getType());
2021 return true;
2022 }
2023 }
2024 }
2025
2026 // We compared CmpSize bytes above. If the limiting factor was the Size
2027 // passed, we're done and the result is equality (0).
2028 if (ByteSize <= CmpSize) {
2029 pushInteger(S, 0, Call->getType());
2030 return true;
2031 }
2032
2033 // However, if we read all the available bytes but were instructed to read
2034 // even more, diagnose this as a "read of dereferenced one-past-the-end
2035 // pointer". This is what would happen if we called CheckLoad() on every array
2036 // element.
2037 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2038 << AK_Read << S.Current->getRange(OpPC);
2039 return false;
2040}
2041
2042// __builtin_memchr(ptr, int, int)
2043// __builtin_strchr(ptr, int)
2045 const CallExpr *Call, unsigned ID) {
2046 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2047 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2048 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2049
2050 std::optional<APSInt> MaxLength;
2051 if (Call->getNumArgs() == 3)
2052 MaxLength = popToAPSInt(S, Call->getArg(2));
2053
2054 APSInt Desired = popToAPSInt(S, Call->getArg(1));
2055 const Pointer &Ptr = S.Stk.pop<Pointer>();
2056
2057 if (MaxLength && MaxLength->isZero()) {
2058 S.Stk.push<Pointer>();
2059 return true;
2060 }
2061
2062 if (Ptr.isDummy()) {
2063 if (Ptr.getType()->isIncompleteType())
2064 S.FFDiag(S.Current->getSource(OpPC),
2065 diag::note_constexpr_ltor_incomplete_type)
2066 << Ptr.getType();
2067 return false;
2068 }
2069
2070 // Null is only okay if the given size is 0.
2071 if (Ptr.isZero()) {
2072 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2073 << AK_Read;
2074 return false;
2075 }
2076
2077 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2078 ? Ptr.getFieldDesc()->getElemQualType()
2079 : Ptr.getFieldDesc()->getType();
2080 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2081
2082 // Give up on byte-oriented matching against multibyte elements.
2083 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2084 S.FFDiag(S.Current->getSource(OpPC),
2085 diag::note_constexpr_memchr_unsupported)
2086 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2087 return false;
2088 }
2089
2090 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2091 int64_t DesiredTrunc;
2092 if (S.getASTContext().CharTy->isSignedIntegerType())
2093 DesiredTrunc =
2094 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
2095 else
2096 DesiredTrunc =
2097 Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2098 // strchr compares directly to the passed integer, and therefore
2099 // always fails if given an int that is not a char.
2100 if (Desired != DesiredTrunc) {
2101 S.Stk.push<Pointer>();
2102 return true;
2103 }
2104 }
2105
2106 uint64_t DesiredVal;
2107 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2108 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2109 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2110 DesiredVal = Desired.getZExtValue();
2111 } else {
2112 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2113 }
2114
2115 bool StopAtZero =
2116 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2117 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2118
2119 PrimType ElemT =
2120 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2121
2122 size_t Index = Ptr.getIndex();
2123 size_t Step = 0;
2124 for (;;) {
2125 const Pointer &ElemPtr =
2126 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2127
2128 if (!CheckLoad(S, OpPC, ElemPtr))
2129 return false;
2130
2131 uint64_t V;
2133 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2134
2135 if (V == DesiredVal) {
2136 S.Stk.push<Pointer>(ElemPtr);
2137 return true;
2138 }
2139
2140 if (StopAtZero && V == 0)
2141 break;
2142
2143 ++Step;
2144 if (MaxLength && Step == MaxLength->getZExtValue())
2145 break;
2146 }
2147
2148 S.Stk.push<Pointer>();
2149 return true;
2150}
2151
2152static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2153 const Descriptor *Desc) {
2154 if (Desc->isPrimitive())
2155 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2156 if (Desc->isArray())
2157 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2158 Desc->getNumElems();
2159 if (Desc->isRecord()) {
2160 // Can't use Descriptor::getType() as that may return a pointer type. Look
2161 // at the decl directly.
2162 return ASTCtx
2164 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2165 .getQuantity();
2166 }
2167
2168 return std::nullopt;
2169}
2170
2171/// Compute the byte offset of \p Ptr in the full declaration.
2172static unsigned computePointerOffset(const ASTContext &ASTCtx,
2173 const Pointer &Ptr) {
2174 unsigned Result = 0;
2175
2176 Pointer P = Ptr;
2177 while (P.isField() || P.isArrayElement()) {
2178 P = P.expand();
2179 const Descriptor *D = P.getFieldDesc();
2180
2181 if (P.isArrayElement()) {
2182 unsigned ElemSize =
2184 if (P.isOnePastEnd())
2185 Result += ElemSize * P.getNumElems();
2186 else
2187 Result += ElemSize * P.getIndex();
2188 P = P.expand().getArray();
2189 } else if (P.isBaseClass()) {
2190 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2191 bool IsVirtual = Ptr.isVirtualBaseClass();
2192 P = P.getBase();
2193 const Record *BaseRecord = P.getRecord();
2194
2195 const ASTRecordLayout &Layout =
2196 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2197 if (IsVirtual)
2198 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2199 else
2200 Result += Layout.getBaseClassOffset(RD).getQuantity();
2201 } else if (P.isField()) {
2202 const FieldDecl *FD = P.getField();
2203 const ASTRecordLayout &Layout =
2204 ASTCtx.getASTRecordLayout(FD->getParent());
2205 unsigned FieldIndex = FD->getFieldIndex();
2206 uint64_t FieldOffset =
2207 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2208 .getQuantity();
2209 Result += FieldOffset;
2210 P = P.getBase();
2211 } else
2212 llvm_unreachable("Unhandled descriptor type");
2213 }
2214
2215 return Result;
2216}
2217
2218/// Does Ptr point to the last subobject?
2219static bool pointsToLastObject(const Pointer &Ptr) {
2220 Pointer P = Ptr;
2221 while (!P.isRoot()) {
2222
2223 if (P.isArrayElement()) {
2224 P = P.expand().getArray();
2225 continue;
2226 }
2227 if (P.isBaseClass()) {
2228 if (P.getRecord()->getNumFields() > 0)
2229 return false;
2230 P = P.getBase();
2231 continue;
2232 }
2233
2234 Pointer Base = P.getBase();
2235 if (const Record *R = Base.getRecord()) {
2236 assert(P.getField());
2237 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2238 return false;
2239 }
2240 P = Base;
2241 }
2242
2243 return true;
2244}
2245
2246/// Does Ptr point to the last object AND to a flexible array member?
2247static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2248 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2250 FAMKind StrictFlexArraysLevel =
2251 Ctx.getLangOpts().getStrictFlexArraysLevel();
2252
2253 if (StrictFlexArraysLevel == FAMKind::Default)
2254 return true;
2255
2256 unsigned NumElems = FieldDesc->getNumElems();
2257 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2258 return true;
2259
2260 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2261 return true;
2262 return false;
2263 };
2264
2265 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2266 if (!FieldDesc->isArray())
2267 return false;
2268
2269 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2270 isFlexibleArrayMember(FieldDesc);
2271}
2272
2274 const InterpFrame *Frame,
2275 const CallExpr *Call) {
2276 const ASTContext &ASTCtx = S.getASTContext();
2277 // From the GCC docs:
2278 // Kind is an integer constant from 0 to 3. If the least significant bit is
2279 // clear, objects are whole variables. If it is set, a closest surrounding
2280 // subobject is considered the object a pointer points to. The second bit
2281 // determines if maximum or minimum of remaining bytes is computed.
2282 unsigned Kind = popToAPSInt(S, Call->getArg(1)).getZExtValue();
2283 assert(Kind <= 3 && "unexpected kind");
2284 bool UseFieldDesc = (Kind & 1u);
2285 bool ReportMinimum = (Kind & 2u);
2286 const Pointer &Ptr = S.Stk.pop<Pointer>();
2287
2288 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2289 // "If there are any side effects in them, it returns (size_t) -1
2290 // for type 0 or 1 and (size_t) 0 for type 2 or 3."
2291 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2292 return true;
2293 }
2294
2295 if (Ptr.isZero() || !Ptr.isBlockPointer())
2296 return false;
2297
2298 // We can't load through pointers.
2299 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2300 return false;
2301
2302 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2303 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2304 assert(DeclDesc);
2305
2306 if (!UseFieldDesc || DetermineForCompleteObject) {
2307 // Lower bound, so we can't fall back to this.
2308 if (ReportMinimum && !DetermineForCompleteObject)
2309 return false;
2310
2311 // Can't read beyond the pointer decl desc.
2312 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2313 return false;
2314 } else {
2315 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2316 // If we cannot determine the size of the initial allocation, then we
2317 // can't given an accurate upper-bound. However, we are still able to give
2318 // conservative lower-bounds for Type=3.
2319 if (Kind == 1)
2320 return false;
2321 }
2322 }
2323
2324 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2325 assert(Desc);
2326
2327 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2328 if (!FullSize)
2329 return false;
2330
2331 unsigned ByteOffset;
2332 if (UseFieldDesc) {
2333 if (Ptr.isBaseClass())
2334 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2335 computePointerOffset(ASTCtx, Ptr);
2336 else {
2337 if (Ptr.inArray())
2338 ByteOffset =
2339 computePointerOffset(ASTCtx, Ptr) -
2340 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2341 else
2342 ByteOffset = 0;
2343 }
2344 } else
2345 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2346
2347 assert(ByteOffset <= *FullSize);
2348 unsigned Result = *FullSize - ByteOffset;
2349
2350 pushInteger(S, Result, Call->getType());
2351 return true;
2352}
2353
2355 const CallExpr *Call) {
2356
2357 if (!S.inConstantContext())
2358 return false;
2359
2360 const Pointer &Ptr = S.Stk.pop<Pointer>();
2361
2362 auto Error = [&](int Diag) {
2363 bool CalledFromStd = false;
2364 const auto *Callee = S.Current->getCallee();
2365 if (Callee && Callee->isInStdNamespace()) {
2366 const IdentifierInfo *Identifier = Callee->getIdentifier();
2367 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2368 }
2369 S.CCEDiag(CalledFromStd
2371 : S.Current->getSource(OpPC),
2372 diag::err_invalid_is_within_lifetime)
2373 << (CalledFromStd ? "std::is_within_lifetime"
2374 : "__builtin_is_within_lifetime")
2375 << Diag;
2376 return false;
2377 };
2378
2379 if (Ptr.isZero())
2380 return Error(0);
2381 if (Ptr.isOnePastEnd())
2382 return Error(1);
2383
2384 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2385 if (!Ptr.isActive()) {
2386 Result = false;
2387 } else {
2388 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2389 return false;
2390 if (!CheckMutable(S, OpPC, Ptr))
2391 return false;
2392 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2393 return false;
2394 }
2395
2396 // Check if we're currently running an initializer.
2397 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2398 return Error(2);
2399 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2400 return Error(2);
2401
2402 pushInteger(S, Result, Call->getType());
2403 return true;
2404}
2405
2407 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2408 llvm::function_ref<APInt(const APSInt &)> Fn) {
2409 assert(Call->getNumArgs() == 1);
2410 assert(Call->getType()->isIntegerType());
2411
2412 // Single integer case.
2413 if (!Call->getArg(0)->getType()->isVectorType()) {
2414 APSInt Src = popToAPSInt(S, Call->getArg(0));
2415 APInt Result = Fn(Src);
2416 pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
2417 return true;
2418 }
2419
2420 // TODO: Add vector integer handling.
2421 return false;
2422}
2423
2425 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2426 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2427 assert(Call->getNumArgs() == 2);
2428
2429 // Single integer case.
2430 if (!Call->getArg(0)->getType()->isVectorType()) {
2431 assert(!Call->getArg(1)->getType()->isVectorType());
2432 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2433 APSInt LHS = popToAPSInt(S, Call->getArg(0));
2434 APInt Result = Fn(LHS, RHS);
2435 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2436 return true;
2437 }
2438
2439 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2440 assert(VT->getElementType()->isIntegralOrEnumerationType());
2441 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2442 unsigned NumElems = VT->getNumElements();
2443 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2444
2445 // Vector + Scalar case.
2446 if (!Call->getArg(1)->getType()->isVectorType()) {
2447 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2448
2449 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2450 const Pointer &LHS = S.Stk.pop<Pointer>();
2451 const Pointer &Dst = S.Stk.peek<Pointer>();
2452
2453 for (unsigned I = 0; I != NumElems; ++I) {
2455 Dst.elem<T>(I) = static_cast<T>(
2456 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2457 });
2458 }
2460 return true;
2461 }
2462
2463 // Vector case.
2464 assert(Call->getArg(0)->getType()->isVectorType() &&
2465 Call->getArg(1)->getType()->isVectorType());
2466 assert(VT->getElementType() ==
2467 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2468 assert(VT->getNumElements() ==
2469 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2470 assert(VT->getElementType()->isIntegralOrEnumerationType());
2471
2472 const Pointer &RHS = S.Stk.pop<Pointer>();
2473 const Pointer &LHS = S.Stk.pop<Pointer>();
2474 const Pointer &Dst = S.Stk.peek<Pointer>();
2475 for (unsigned I = 0; I != NumElems; ++I) {
2477 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2478 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2479 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2480 });
2481 }
2483
2484 return true;
2485}
2486
2487static bool
2489 llvm::function_ref<APInt(const APSInt &)> PackFn) {
2490 const auto *VT0 = E->getArg(0)->getType()->castAs<VectorType>();
2491 [[maybe_unused]] const auto *VT1 =
2492 E->getArg(1)->getType()->castAs<VectorType>();
2493 assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType");
2494 assert(VT0->getElementType() == VT1->getElementType() &&
2495 VT0->getNumElements() == VT1->getNumElements() &&
2496 "pack builtin VT0 and VT1 ElementType must be same");
2497
2498 const Pointer &RHS = S.Stk.pop<Pointer>();
2499 const Pointer &LHS = S.Stk.pop<Pointer>();
2500 const Pointer &Dst = S.Stk.peek<Pointer>();
2501
2502 const ASTContext &ASTCtx = S.getASTContext();
2503 unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType());
2504 unsigned LHSVecLen = VT0->getNumElements();
2505 unsigned SrcPerLane = 128 / SrcBits;
2506 unsigned Lanes = LHSVecLen * SrcBits / 128;
2507
2508 PrimType SrcT = *S.getContext().classify(VT0->getElementType());
2509 PrimType DstT = *S.getContext().classify(getElemType(Dst));
2510 bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType();
2511
2512 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
2513 unsigned BaseSrc = Lane * SrcPerLane;
2514 unsigned BaseDst = Lane * (2 * SrcPerLane);
2515
2516 for (unsigned I = 0; I != SrcPerLane; ++I) {
2518 APSInt A = LHS.elem<T>(BaseSrc + I).toAPSInt();
2519 APSInt B = RHS.elem<T>(BaseSrc + I).toAPSInt();
2520
2521 assignInteger(S, Dst.atIndex(BaseDst + I), DstT,
2522 APSInt(PackFn(A), IsUnsigend));
2523 assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT,
2524 APSInt(PackFn(B), IsUnsigend));
2525 });
2526 }
2527 }
2528
2529 Dst.initializeAllElements();
2530 return true;
2531}
2532
2534 const CallExpr *Call,
2535 unsigned BuiltinID) {
2536 assert(Call->getNumArgs() == 2);
2537
2538 QualType Arg0Type = Call->getArg(0)->getType();
2539
2540 // TODO: Support floating-point types.
2541 if (!(Arg0Type->isIntegerType() ||
2542 (Arg0Type->isVectorType() &&
2543 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2544 return false;
2545
2546 if (!Arg0Type->isVectorType()) {
2547 assert(!Call->getArg(1)->getType()->isVectorType());
2548 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2549 APSInt LHS = popToAPSInt(S, Arg0Type);
2550 APInt Result;
2551 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2552 Result = std::max(LHS, RHS);
2553 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2554 Result = std::min(LHS, RHS);
2555 } else {
2556 llvm_unreachable("Wrong builtin ID");
2557 }
2558
2559 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2560 return true;
2561 }
2562
2563 // Vector case.
2564 assert(Call->getArg(0)->getType()->isVectorType() &&
2565 Call->getArg(1)->getType()->isVectorType());
2566 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2567 assert(VT->getElementType() ==
2568 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2569 assert(VT->getNumElements() ==
2570 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2571 assert(VT->getElementType()->isIntegralOrEnumerationType());
2572
2573 const Pointer &RHS = S.Stk.pop<Pointer>();
2574 const Pointer &LHS = S.Stk.pop<Pointer>();
2575 const Pointer &Dst = S.Stk.peek<Pointer>();
2576 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2577 unsigned NumElems = VT->getNumElements();
2578 for (unsigned I = 0; I != NumElems; ++I) {
2579 APSInt Elem1;
2580 APSInt Elem2;
2582 Elem1 = LHS.elem<T>(I).toAPSInt();
2583 Elem2 = RHS.elem<T>(I).toAPSInt();
2584 });
2585
2586 APSInt Result;
2587 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2588 Result = APSInt(std::max(Elem1, Elem2),
2589 Call->getType()->isUnsignedIntegerOrEnumerationType());
2590 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2591 Result = APSInt(std::min(Elem1, Elem2),
2592 Call->getType()->isUnsignedIntegerOrEnumerationType());
2593 } else {
2594 llvm_unreachable("Wrong builtin ID");
2595 }
2596
2598 { Dst.elem<T>(I) = static_cast<T>(Result); });
2599 }
2600 Dst.initializeAllElements();
2601
2602 return true;
2603}
2604
2606 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2607 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &,
2608 const APSInt &)>
2609 Fn) {
2610 assert(Call->getArg(0)->getType()->isVectorType() &&
2611 Call->getArg(1)->getType()->isVectorType());
2612 const Pointer &RHS = S.Stk.pop<Pointer>();
2613 const Pointer &LHS = S.Stk.pop<Pointer>();
2614 const Pointer &Dst = S.Stk.peek<Pointer>();
2615
2616 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2617 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2618 unsigned NumElems = VT->getNumElements();
2619 const auto *DestVT = Call->getType()->castAs<VectorType>();
2620 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2621 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2622
2623 unsigned DstElem = 0;
2624 for (unsigned I = 0; I != NumElems; I += 2) {
2625 APSInt Result;
2627 APSInt LoLHS = LHS.elem<T>(I).toAPSInt();
2628 APSInt HiLHS = LHS.elem<T>(I + 1).toAPSInt();
2629 APSInt LoRHS = RHS.elem<T>(I).toAPSInt();
2630 APSInt HiRHS = RHS.elem<T>(I + 1).toAPSInt();
2631 Result = APSInt(Fn(LoLHS, HiLHS, LoRHS, HiRHS), DestUnsigned);
2632 });
2633
2634 INT_TYPE_SWITCH_NO_BOOL(DestElemT,
2635 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2636 ++DstElem;
2637 }
2638
2639 Dst.initializeAllElements();
2640 return true;
2641}
2642
2644 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2645 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2646 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2647 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2648 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2649
2650 const Pointer &RHS = S.Stk.pop<Pointer>();
2651 const Pointer &LHS = S.Stk.pop<Pointer>();
2652 const Pointer &Dst = S.Stk.peek<Pointer>();
2653 unsigned NumElts = VT->getNumElements();
2654 unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType());
2655 unsigned EltsPerLane = 128 / EltBits;
2656 unsigned Lanes = NumElts * EltBits / 128;
2657 unsigned DestIndex = 0;
2658
2659 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2660 unsigned LaneStart = Lane * EltsPerLane;
2661 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2663 APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt();
2664 APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt();
2665 APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2666 Dst.elem<T>(DestIndex++) = static_cast<T>(ResL);
2667 });
2668 }
2669
2670 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2672 APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt();
2673 APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt();
2674 APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2675 Dst.elem<T>(DestIndex++) = static_cast<T>(ResR);
2676 });
2677 }
2678 }
2679 Dst.initializeAllElements();
2680 return true;
2681}
2682
2684 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2685 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2686 llvm::RoundingMode)>
2687 Fn) {
2688 const Pointer &RHS = S.Stk.pop<Pointer>();
2689 const Pointer &LHS = S.Stk.pop<Pointer>();
2690 const Pointer &Dst = S.Stk.peek<Pointer>();
2691 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2692 llvm::RoundingMode RM = getRoundingMode(FPO);
2693 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2694
2695 unsigned NumElts = VT->getNumElements();
2696 unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType());
2697 unsigned NumLanes = NumElts * EltBits / 128;
2698 unsigned NumElemsPerLane = NumElts / NumLanes;
2699 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
2700
2701 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
2702 using T = PrimConv<PT_Float>::T;
2703 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2704 APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2705 APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2706 Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM));
2707 }
2708 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2709 APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2710 APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2711 Dst.elem<T>(L + E + HalfElemsPerLane) =
2712 static_cast<T>(Fn(Elem1, Elem2, RM));
2713 }
2714 }
2715 Dst.initializeAllElements();
2716 return true;
2717}
2718
2720 const CallExpr *Call) {
2721 // Addsub: alternates between subtraction and addition
2722 // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
2723 const Pointer &RHS = S.Stk.pop<Pointer>();
2724 const Pointer &LHS = S.Stk.pop<Pointer>();
2725 const Pointer &Dst = S.Stk.peek<Pointer>();
2726 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2727 llvm::RoundingMode RM = getRoundingMode(FPO);
2728 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2729 unsigned NumElems = VT->getNumElements();
2730
2731 using T = PrimConv<PT_Float>::T;
2732 for (unsigned I = 0; I != NumElems; ++I) {
2733 APFloat LElem = LHS.elem<T>(I).getAPFloat();
2734 APFloat RElem = RHS.elem<T>(I).getAPFloat();
2735 if (I % 2 == 0) {
2736 // Even indices: subtract
2737 LElem.subtract(RElem, RM);
2738 } else {
2739 // Odd indices: add
2740 LElem.add(RElem, RM);
2741 }
2742 Dst.elem<T>(I) = static_cast<T>(LElem);
2743 }
2744 Dst.initializeAllElements();
2745 return true;
2746}
2747
2749 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2750 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2751 const APFloat &, llvm::RoundingMode)>
2752 Fn) {
2753 assert(Call->getNumArgs() == 3);
2754
2755 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2756 llvm::RoundingMode RM = getRoundingMode(FPO);
2757 QualType Arg1Type = Call->getArg(0)->getType();
2758 QualType Arg2Type = Call->getArg(1)->getType();
2759 QualType Arg3Type = Call->getArg(2)->getType();
2760
2761 // Non-vector floating point types.
2762 if (!Arg1Type->isVectorType()) {
2763 assert(!Arg2Type->isVectorType());
2764 assert(!Arg3Type->isVectorType());
2765 (void)Arg2Type;
2766 (void)Arg3Type;
2767
2768 const Floating &Z = S.Stk.pop<Floating>();
2769 const Floating &Y = S.Stk.pop<Floating>();
2770 const Floating &X = S.Stk.pop<Floating>();
2771 APFloat F = Fn(X.getAPFloat(), Y.getAPFloat(), Z.getAPFloat(), RM);
2772 Floating Result = S.allocFloat(X.getSemantics());
2773 Result.copy(F);
2774 S.Stk.push<Floating>(Result);
2775 return true;
2776 }
2777
2778 // Vector type.
2779 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2780 Arg3Type->isVectorType());
2781
2782 const VectorType *VecTy = Arg1Type->castAs<VectorType>();
2783 QualType ElemQT = VecTy->getElementType();
2784 unsigned NumElems = VecTy->getNumElements();
2785
2786 assert(ElemQT == Arg2Type->castAs<VectorType>()->getElementType() &&
2787 ElemQT == Arg3Type->castAs<VectorType>()->getElementType());
2788 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2789 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2790 assert(ElemQT->isRealFloatingType());
2791 (void)ElemQT;
2792
2793 const Pointer &VZ = S.Stk.pop<Pointer>();
2794 const Pointer &VY = S.Stk.pop<Pointer>();
2795 const Pointer &VX = S.Stk.pop<Pointer>();
2796 const Pointer &Dst = S.Stk.peek<Pointer>();
2797 for (unsigned I = 0; I != NumElems; ++I) {
2798 using T = PrimConv<PT_Float>::T;
2799 APFloat X = VX.elem<T>(I).getAPFloat();
2800 APFloat Y = VY.elem<T>(I).getAPFloat();
2801 APFloat Z = VZ.elem<T>(I).getAPFloat();
2802 APFloat F = Fn(X, Y, Z, RM);
2803 Dst.elem<Floating>(I) = Floating(F);
2804 }
2806 return true;
2807}
2808
2809/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2811 const CallExpr *Call) {
2812 const Pointer &RHS = S.Stk.pop<Pointer>();
2813 const Pointer &LHS = S.Stk.pop<Pointer>();
2814 APSInt Mask = popToAPSInt(S, Call->getArg(0));
2815 const Pointer &Dst = S.Stk.peek<Pointer>();
2816
2817 assert(LHS.getNumElems() == RHS.getNumElems());
2818 assert(LHS.getNumElems() == Dst.getNumElems());
2819 unsigned NumElems = LHS.getNumElems();
2820 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2821 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2822
2823 for (unsigned I = 0; I != NumElems; ++I) {
2824 if (ElemT == PT_Float) {
2825 assert(DstElemT == PT_Float);
2826 Dst.elem<Floating>(I) =
2827 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2828 } else {
2829 APSInt Elem;
2830 INT_TYPE_SWITCH(ElemT, {
2831 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2832 });
2833 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2834 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2835 }
2836 }
2838
2839 return true;
2840}
2841
2842/// Scalar variant of AVX512 predicated select:
2843/// Result[i] = (Mask bit 0) ? LHS[i] : RHS[i], but only element 0 may change.
2844/// All other elements are taken from RHS.
2846 const CallExpr *Call) {
2847 unsigned N =
2848 Call->getArg(1)->getType()->getAs<VectorType>()->getNumElements();
2849
2850 const Pointer &W = S.Stk.pop<Pointer>();
2851 const Pointer &A = S.Stk.pop<Pointer>();
2852 APSInt U = popToAPSInt(S, Call->getArg(0));
2853 const Pointer &Dst = S.Stk.peek<Pointer>();
2854
2855 bool TakeA0 = U.getZExtValue() & 1ULL;
2856
2857 for (unsigned I = TakeA0; I != N; ++I)
2858 Dst.elem<Floating>(I) = W.elem<Floating>(I);
2859 if (TakeA0)
2860 Dst.elem<Floating>(0) = A.elem<Floating>(0);
2861
2863 return true;
2864}
2865
2867 const CallExpr *Call) {
2868 APSInt Mask = popToAPSInt(S, Call->getArg(2));
2869 const Pointer &TrueVec = S.Stk.pop<Pointer>();
2870 const Pointer &FalseVec = S.Stk.pop<Pointer>();
2871 const Pointer &Dst = S.Stk.peek<Pointer>();
2872
2873 assert(FalseVec.getNumElems() == TrueVec.getNumElems());
2874 assert(FalseVec.getNumElems() == Dst.getNumElems());
2875 unsigned NumElems = FalseVec.getNumElems();
2876 PrimType ElemT = FalseVec.getFieldDesc()->getPrimType();
2877 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2878
2879 for (unsigned I = 0; I != NumElems; ++I) {
2880 bool MaskBit = Mask[I % 8];
2881 if (ElemT == PT_Float) {
2882 assert(DstElemT == PT_Float);
2883 Dst.elem<Floating>(I) =
2884 MaskBit ? TrueVec.elem<Floating>(I) : FalseVec.elem<Floating>(I);
2885 } else {
2886 assert(DstElemT == ElemT);
2887 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
2888 Dst.elem<T>(I) =
2889 static_cast<T>(MaskBit ? TrueVec.elem<T>(I).toAPSInt()
2890 : FalseVec.elem<T>(I).toAPSInt());
2891 });
2892 }
2893 }
2894 Dst.initializeAllElements();
2895
2896 return true;
2897}
2898
2900 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2901 llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) {
2902 const Pointer &RHS = S.Stk.pop<Pointer>();
2903 const Pointer &LHS = S.Stk.pop<Pointer>();
2904
2905 assert(LHS.getNumElems() == RHS.getNumElems());
2906
2907 unsigned SourceLen = LHS.getNumElems();
2908 QualType ElemQT = getElemType(LHS);
2909 OptPrimType ElemPT = S.getContext().classify(ElemQT);
2910 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
2911
2912 APInt AWide(LaneWidth * SourceLen, 0);
2913 APInt BWide(LaneWidth * SourceLen, 0);
2914
2915 for (unsigned I = 0; I != SourceLen; ++I) {
2916 APInt ALane;
2917 APInt BLane;
2918
2919 if (ElemQT->isIntegerType()) { // Get value.
2920 INT_TYPE_SWITCH_NO_BOOL(*ElemPT, {
2921 ALane = LHS.elem<T>(I).toAPSInt();
2922 BLane = RHS.elem<T>(I).toAPSInt();
2923 });
2924 } else if (ElemQT->isFloatingType()) { // Get only sign bit.
2925 using T = PrimConv<PT_Float>::T;
2926 ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2927 BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2928 } else { // Must be integer or floating type.
2929 return false;
2930 }
2931 AWide.insertBits(ALane, I * LaneWidth);
2932 BWide.insertBits(BLane, I * LaneWidth);
2933 }
2934 pushInteger(S, Fn(AWide, BWide), Call->getType());
2935 return true;
2936}
2937
2939 const CallExpr *Call) {
2940 assert(Call->getNumArgs() == 1);
2941
2942 const Pointer &Source = S.Stk.pop<Pointer>();
2943
2944 unsigned SourceLen = Source.getNumElems();
2945 QualType ElemQT = getElemType(Source);
2946 OptPrimType ElemT = S.getContext().classify(ElemQT);
2947 unsigned ResultLen =
2948 S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
2949 APInt Result(ResultLen, 0);
2950
2951 for (unsigned I = 0; I != SourceLen; ++I) {
2952 APInt Elem;
2953 if (ElemQT->isIntegerType()) {
2954 INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
2955 } else if (ElemQT->isRealFloatingType()) {
2956 using T = PrimConv<PT_Float>::T;
2957 Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
2958 } else {
2959 return false;
2960 }
2961 Result.setBitVal(I, Elem.isNegative());
2962 }
2963 pushInteger(S, Result, Call->getType());
2964 return true;
2965}
2966
2968 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2969 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
2970 Fn) {
2971 assert(Call->getNumArgs() == 3);
2972
2973 QualType Arg0Type = Call->getArg(0)->getType();
2974 QualType Arg2Type = Call->getArg(2)->getType();
2975 // Non-vector integer types.
2976 if (!Arg0Type->isVectorType()) {
2977 const APSInt &Op2 = popToAPSInt(S, Arg2Type);
2978 const APSInt &Op1 = popToAPSInt(S, Call->getArg(1));
2979 const APSInt &Op0 = popToAPSInt(S, Arg0Type);
2980 APSInt Result = APSInt(Fn(Op0, Op1, Op2), Op0.isUnsigned());
2981 pushInteger(S, Result, Call->getType());
2982 return true;
2983 }
2984
2985 const auto *VecT = Arg0Type->castAs<VectorType>();
2986 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
2987 unsigned NumElems = VecT->getNumElements();
2988 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2989
2990 // Vector + Vector + Scalar case.
2991 if (!Arg2Type->isVectorType()) {
2992 APSInt Op2 = popToAPSInt(S, Arg2Type);
2993
2994 const Pointer &Op1 = S.Stk.pop<Pointer>();
2995 const Pointer &Op0 = S.Stk.pop<Pointer>();
2996 const Pointer &Dst = S.Stk.peek<Pointer>();
2997 for (unsigned I = 0; I != NumElems; ++I) {
2999 Dst.elem<T>(I) = static_cast<T>(APSInt(
3000 Fn(Op0.elem<T>(I).toAPSInt(), Op1.elem<T>(I).toAPSInt(), Op2),
3001 DestUnsigned));
3002 });
3003 }
3005
3006 return true;
3007 }
3008
3009 // Vector type.
3010 const Pointer &Op2 = S.Stk.pop<Pointer>();
3011 const Pointer &Op1 = S.Stk.pop<Pointer>();
3012 const Pointer &Op0 = S.Stk.pop<Pointer>();
3013 const Pointer &Dst = S.Stk.peek<Pointer>();
3014 for (unsigned I = 0; I != NumElems; ++I) {
3015 APSInt Val0, Val1, Val2;
3017 Val0 = Op0.elem<T>(I).toAPSInt();
3018 Val1 = Op1.elem<T>(I).toAPSInt();
3019 Val2 = Op2.elem<T>(I).toAPSInt();
3020 });
3021 APSInt Result = APSInt(Fn(Val0, Val1, Val2), Val0.isUnsigned());
3023 { Dst.elem<T>(I) = static_cast<T>(Result); });
3024 }
3026
3027 return true;
3028}
3029
3031 const CallExpr *Call,
3032 unsigned ID) {
3033 assert(Call->getNumArgs() == 2);
3034
3035 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3036 uint64_t Index = ImmAPS.getZExtValue();
3037
3038 const Pointer &Src = S.Stk.pop<Pointer>();
3039 if (!Src.getFieldDesc()->isPrimitiveArray())
3040 return false;
3041
3042 const Pointer &Dst = S.Stk.peek<Pointer>();
3043 if (!Dst.getFieldDesc()->isPrimitiveArray())
3044 return false;
3045
3046 unsigned SrcElems = Src.getNumElems();
3047 unsigned DstElems = Dst.getNumElems();
3048
3049 unsigned NumLanes = SrcElems / DstElems;
3050 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3051 unsigned ExtractPos = Lane * DstElems;
3052
3053 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3054
3055 TYPE_SWITCH(ElemT, {
3056 for (unsigned I = 0; I != DstElems; ++I) {
3057 Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
3058 }
3059 });
3060
3062 return true;
3063}
3064
3066 CodePtr OpPC,
3067 const CallExpr *Call,
3068 unsigned ID) {
3069 assert(Call->getNumArgs() == 4);
3070
3071 APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
3072 const Pointer &Merge = S.Stk.pop<Pointer>();
3073 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3074 const Pointer &Src = S.Stk.pop<Pointer>();
3075
3076 if (!Src.getFieldDesc()->isPrimitiveArray() ||
3077 !Merge.getFieldDesc()->isPrimitiveArray())
3078 return false;
3079
3080 const Pointer &Dst = S.Stk.peek<Pointer>();
3081 if (!Dst.getFieldDesc()->isPrimitiveArray())
3082 return false;
3083
3084 unsigned SrcElems = Src.getNumElems();
3085 unsigned DstElems = Dst.getNumElems();
3086
3087 unsigned NumLanes = SrcElems / DstElems;
3088 unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
3089 unsigned Base = Lane * DstElems;
3090
3091 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3092
3093 TYPE_SWITCH(ElemT, {
3094 for (unsigned I = 0; I != DstElems; ++I) {
3095 if (MaskAPS[I])
3096 Dst.elem<T>(I) = Src.elem<T>(Base + I);
3097 else
3098 Dst.elem<T>(I) = Merge.elem<T>(I);
3099 }
3100 });
3101
3103 return true;
3104}
3105
3107 const CallExpr *Call,
3108 unsigned ID) {
3109 assert(Call->getNumArgs() == 3);
3110
3111 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3112 uint64_t Index = ImmAPS.getZExtValue();
3113
3114 const Pointer &SubVec = S.Stk.pop<Pointer>();
3115 if (!SubVec.getFieldDesc()->isPrimitiveArray())
3116 return false;
3117
3118 const Pointer &BaseVec = S.Stk.pop<Pointer>();
3119 if (!BaseVec.getFieldDesc()->isPrimitiveArray())
3120 return false;
3121
3122 const Pointer &Dst = S.Stk.peek<Pointer>();
3123
3124 unsigned BaseElements = BaseVec.getNumElems();
3125 unsigned SubElements = SubVec.getNumElems();
3126
3127 assert(SubElements != 0 && BaseElements != 0 &&
3128 (BaseElements % SubElements) == 0);
3129
3130 unsigned NumLanes = BaseElements / SubElements;
3131 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3132 unsigned InsertPos = Lane * SubElements;
3133
3134 PrimType ElemT = BaseVec.getFieldDesc()->getPrimType();
3135
3136 TYPE_SWITCH(ElemT, {
3137 for (unsigned I = 0; I != BaseElements; ++I)
3138 Dst.elem<T>(I) = BaseVec.elem<T>(I);
3139 for (unsigned I = 0; I != SubElements; ++I)
3140 Dst.elem<T>(InsertPos + I) = SubVec.elem<T>(I);
3141 });
3142
3144 return true;
3145}
3146
3148 const CallExpr *Call) {
3149 assert(Call->getNumArgs() == 1);
3150
3151 const Pointer &Source = S.Stk.pop<Pointer>();
3152 const Pointer &Dest = S.Stk.peek<Pointer>();
3153
3154 unsigned SourceLen = Source.getNumElems();
3155 QualType ElemQT = getElemType(Source);
3156 OptPrimType ElemT = S.getContext().classify(ElemQT);
3157 unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
3158
3159 bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
3160 ->castAs<VectorType>()
3161 ->getElementType()
3163
3164 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3165 APSInt MinIndex(ElemBitWidth, DestUnsigned);
3166 APSInt MinVal = Source.elem<T>(0).toAPSInt();
3167
3168 for (unsigned I = 1; I != SourceLen; ++I) {
3169 APSInt Val = Source.elem<T>(I).toAPSInt();
3170 if (MinVal.ugt(Val)) {
3171 MinVal = Val;
3172 MinIndex = I;
3173 }
3174 }
3175
3176 Dest.elem<T>(0) = static_cast<T>(MinVal);
3177 Dest.elem<T>(1) = static_cast<T>(MinIndex);
3178 for (unsigned I = 2; I != SourceLen; ++I) {
3179 Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
3180 }
3181 });
3182 Dest.initializeAllElements();
3183 return true;
3184}
3185
3187 const CallExpr *Call, bool MaskZ) {
3188 assert(Call->getNumArgs() == 5);
3189
3190 APInt U = popToAPSInt(S, Call->getArg(4)); // Lane mask
3191 APInt Imm = popToAPSInt(S, Call->getArg(3)); // Ternary truth table
3192 const Pointer &C = S.Stk.pop<Pointer>();
3193 const Pointer &B = S.Stk.pop<Pointer>();
3194 const Pointer &A = S.Stk.pop<Pointer>();
3195 const Pointer &Dst = S.Stk.peek<Pointer>();
3196
3197 unsigned DstLen = A.getNumElems();
3198 QualType ElemQT = getElemType(A);
3199 OptPrimType ElemT = S.getContext().classify(ElemQT);
3200 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
3201 bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType();
3202
3203 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3204 for (unsigned I = 0; I != DstLen; ++I) {
3205 APInt ALane = A.elem<T>(I).toAPSInt();
3206 APInt BLane = B.elem<T>(I).toAPSInt();
3207 APInt CLane = C.elem<T>(I).toAPSInt();
3208 APInt RLane(LaneWidth, 0);
3209 if (U[I]) { // If lane not masked, compute ternary logic.
3210 for (unsigned Bit = 0; Bit != LaneWidth; ++Bit) {
3211 unsigned ABit = ALane[Bit];
3212 unsigned BBit = BLane[Bit];
3213 unsigned CBit = CLane[Bit];
3214 unsigned Idx = (ABit << 2) | (BBit << 1) | (CBit);
3215 RLane.setBitVal(Bit, Imm[Idx]);
3216 }
3217 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3218 } else if (MaskZ) { // If zero masked, zero the lane.
3219 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3220 } else { // Just masked, put in A lane.
3221 Dst.elem<T>(I) = static_cast<T>(APSInt(ALane, DstUnsigned));
3222 }
3223 }
3224 });
3225 Dst.initializeAllElements();
3226 return true;
3227}
3228
3230 const CallExpr *Call, unsigned ID) {
3231 assert(Call->getNumArgs() == 2);
3232
3233 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3234 const Pointer &Vec = S.Stk.pop<Pointer>();
3235 if (!Vec.getFieldDesc()->isPrimitiveArray())
3236 return false;
3237
3238 unsigned NumElems = Vec.getNumElems();
3239 unsigned Index =
3240 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3241
3242 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3243 // FIXME(#161685): Replace float+int split with a numeric-only type switch
3244 if (ElemT == PT_Float) {
3245 S.Stk.push<Floating>(Vec.elem<Floating>(Index));
3246 return true;
3247 }
3249 APSInt V = Vec.elem<T>(Index).toAPSInt();
3250 pushInteger(S, V, Call->getType());
3251 });
3252
3253 return true;
3254}
3255
3257 const CallExpr *Call, unsigned ID) {
3258 assert(Call->getNumArgs() == 3);
3259
3260 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3261 APSInt ValAPS = popToAPSInt(S, Call->getArg(1));
3262
3263 const Pointer &Base = S.Stk.pop<Pointer>();
3264 if (!Base.getFieldDesc()->isPrimitiveArray())
3265 return false;
3266
3267 const Pointer &Dst = S.Stk.peek<Pointer>();
3268
3269 unsigned NumElems = Base.getNumElems();
3270 unsigned Index =
3271 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3272
3273 PrimType ElemT = Base.getFieldDesc()->getPrimType();
3275 for (unsigned I = 0; I != NumElems; ++I)
3276 Dst.elem<T>(I) = Base.elem<T>(I);
3277 Dst.elem<T>(Index) = static_cast<T>(ValAPS);
3278 });
3279
3281 return true;
3282}
3283
3284static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
3285 bool IsUnsigned) {
3286 switch (Imm & 0x7) {
3287 case 0x00: // _MM_CMPINT_EQ
3288 return (A == B);
3289 case 0x01: // _MM_CMPINT_LT
3290 return IsUnsigned ? A.ult(B) : A.slt(B);
3291 case 0x02: // _MM_CMPINT_LE
3292 return IsUnsigned ? A.ule(B) : A.sle(B);
3293 case 0x03: // _MM_CMPINT_FALSE
3294 return false;
3295 case 0x04: // _MM_CMPINT_NE
3296 return (A != B);
3297 case 0x05: // _MM_CMPINT_NLT
3298 return IsUnsigned ? A.ugt(B) : A.sgt(B);
3299 case 0x06: // _MM_CMPINT_NLE
3300 return IsUnsigned ? A.uge(B) : A.sge(B);
3301 case 0x07: // _MM_CMPINT_TRUE
3302 return true;
3303 default:
3304 llvm_unreachable("Invalid Op");
3305 }
3306}
3307
3309 const CallExpr *Call, unsigned ID,
3310 bool IsUnsigned) {
3311 assert(Call->getNumArgs() == 4);
3312
3313 APSInt Mask = popToAPSInt(S, Call->getArg(3));
3314 APSInt Opcode = popToAPSInt(S, Call->getArg(2));
3315 unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
3316 const Pointer &RHS = S.Stk.pop<Pointer>();
3317 const Pointer &LHS = S.Stk.pop<Pointer>();
3318
3319 assert(LHS.getNumElems() == RHS.getNumElems());
3320
3321 APInt RetMask = APInt::getZero(LHS.getNumElems());
3322 unsigned VectorLen = LHS.getNumElems();
3323 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
3324
3325 for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
3326 APSInt A, B;
3328 A = LHS.elem<T>(ElemNum).toAPSInt();
3329 B = RHS.elem<T>(ElemNum).toAPSInt();
3330 });
3331 RetMask.setBitVal(ElemNum,
3332 Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
3333 }
3334 pushInteger(S, RetMask, Call->getType());
3335 return true;
3336}
3337
3339 const CallExpr *Call) {
3340 assert(Call->getNumArgs() == 1);
3341
3342 QualType Arg0Type = Call->getArg(0)->getType();
3343 const auto *VecT = Arg0Type->castAs<VectorType>();
3344 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3345 unsigned NumElems = VecT->getNumElements();
3346 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3347 const Pointer &Src = S.Stk.pop<Pointer>();
3348 const Pointer &Dst = S.Stk.peek<Pointer>();
3349
3350 for (unsigned I = 0; I != NumElems; ++I) {
3352 APSInt ElemI = Src.elem<T>(I).toAPSInt();
3353 APInt ConflictMask(ElemI.getBitWidth(), 0);
3354 for (unsigned J = 0; J != I; ++J) {
3355 APSInt ElemJ = Src.elem<T>(J).toAPSInt();
3356 ConflictMask.setBitVal(J, ElemI == ElemJ);
3357 }
3358 Dst.elem<T>(I) = static_cast<T>(APSInt(ConflictMask, DestUnsigned));
3359 });
3360 }
3362 return true;
3363}
3364
3366 const CallExpr *Call,
3367 unsigned ID) {
3368 assert(Call->getNumArgs() == 1);
3369
3370 const Pointer &Vec = S.Stk.pop<Pointer>();
3371 unsigned RetWidth = S.getASTContext().getIntWidth(Call->getType());
3372 APInt RetMask(RetWidth, 0);
3373
3374 unsigned VectorLen = Vec.getNumElems();
3375 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3376
3377 for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
3378 APSInt A;
3379 INT_TYPE_SWITCH_NO_BOOL(ElemT, { A = Vec.elem<T>(ElemNum).toAPSInt(); });
3380 unsigned MSB = A[A.getBitWidth() - 1];
3381 RetMask.setBitVal(ElemNum, MSB);
3382 }
3383 pushInteger(S, RetMask, Call->getType());
3384 return true;
3385}
3386
3388 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3389 llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
3390 GetSourceIndex) {
3391
3392 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
3393
3394 unsigned ShuffleMask = 0;
3395 Pointer A, MaskVector, B;
3396 bool IsVectorMask = false;
3397 bool IsSingleOperand = (Call->getNumArgs() == 2);
3398
3399 if (IsSingleOperand) {
3400 QualType MaskType = Call->getArg(1)->getType();
3401 if (MaskType->isVectorType()) {
3402 IsVectorMask = true;
3403 MaskVector = S.Stk.pop<Pointer>();
3404 A = S.Stk.pop<Pointer>();
3405 B = A;
3406 } else if (MaskType->isIntegerType()) {
3407 ShuffleMask = popToAPSInt(S, Call->getArg(1)).getZExtValue();
3408 A = S.Stk.pop<Pointer>();
3409 B = A;
3410 } else {
3411 return false;
3412 }
3413 } else {
3414 QualType Arg2Type = Call->getArg(2)->getType();
3415 if (Arg2Type->isVectorType()) {
3416 IsVectorMask = true;
3417 B = S.Stk.pop<Pointer>();
3418 MaskVector = S.Stk.pop<Pointer>();
3419 A = S.Stk.pop<Pointer>();
3420 } else if (Arg2Type->isIntegerType()) {
3421 ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
3422 B = S.Stk.pop<Pointer>();
3423 A = S.Stk.pop<Pointer>();
3424 } else {
3425 return false;
3426 }
3427 }
3428
3429 QualType Arg0Type = Call->getArg(0)->getType();
3430 const auto *VecT = Arg0Type->castAs<VectorType>();
3431 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3432 unsigned NumElems = VecT->getNumElements();
3433
3434 const Pointer &Dst = S.Stk.peek<Pointer>();
3435
3436 PrimType MaskElemT = PT_Uint32;
3437 if (IsVectorMask) {
3438 QualType Arg1Type = Call->getArg(1)->getType();
3439 const auto *MaskVecT = Arg1Type->castAs<VectorType>();
3440 QualType MaskElemType = MaskVecT->getElementType();
3441 MaskElemT = *S.getContext().classify(MaskElemType);
3442 }
3443
3444 for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
3445 if (IsVectorMask) {
3446 INT_TYPE_SWITCH(MaskElemT, {
3447 ShuffleMask = static_cast<unsigned>(MaskVector.elem<T>(DstIdx));
3448 });
3449 }
3450
3451 auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
3452
3453 if (SrcIdx < 0) {
3454 // Zero out this element
3455 if (ElemT == PT_Float) {
3456 Dst.elem<Floating>(DstIdx) = Floating(
3457 S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
3458 } else {
3459 INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
3460 }
3461 } else {
3462 const Pointer &Src = (SrcVecIdx == 0) ? A : B;
3463 TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
3464 }
3465 }
3467
3468 return true;
3469}
3470
3472 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3473 llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
3474 llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
3475
3476 assert(Call->getNumArgs() == 2);
3477
3478 const Pointer &Count = S.Stk.pop<Pointer>();
3479 const Pointer &Source = S.Stk.pop<Pointer>();
3480
3481 QualType SourceType = Call->getArg(0)->getType();
3482 QualType CountType = Call->getArg(1)->getType();
3483 assert(SourceType->isVectorType() && CountType->isVectorType());
3484
3485 const auto *SourceVecT = SourceType->castAs<VectorType>();
3486 const auto *CountVecT = CountType->castAs<VectorType>();
3487 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3488 PrimType CountElemT = *S.getContext().classify(CountVecT->getElementType());
3489
3490 const Pointer &Dst = S.Stk.peek<Pointer>();
3491
3492 unsigned DestEltWidth =
3493 S.getASTContext().getTypeSize(SourceVecT->getElementType());
3494 bool IsDestUnsigned = SourceVecT->getElementType()->isUnsignedIntegerType();
3495 unsigned DestLen = SourceVecT->getNumElements();
3496 unsigned CountEltWidth =
3497 S.getASTContext().getTypeSize(CountVecT->getElementType());
3498 unsigned NumBitsInQWord = 64;
3499 unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
3500
3501 uint64_t CountLQWord = 0;
3502 for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
3503 uint64_t Elt = 0;
3504 INT_TYPE_SWITCH(CountElemT,
3505 { Elt = static_cast<uint64_t>(Count.elem<T>(EltIdx)); });
3506 CountLQWord |= (Elt << (EltIdx * CountEltWidth));
3507 }
3508
3509 for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
3510 APSInt Elt;
3511 INT_TYPE_SWITCH(SourceElemT, { Elt = Source.elem<T>(EltIdx).toAPSInt(); });
3512
3513 APInt Result;
3514 if (CountLQWord < DestEltWidth) {
3515 Result = ShiftOp(Elt, CountLQWord);
3516 } else {
3517 Result = OverflowOp(Elt, DestEltWidth);
3518 }
3519 if (IsDestUnsigned) {
3520 INT_TYPE_SWITCH(SourceElemT, {
3521 Dst.elem<T>(EltIdx) = T::from(Result.getZExtValue());
3522 });
3523 } else {
3524 INT_TYPE_SWITCH(SourceElemT, {
3525 Dst.elem<T>(EltIdx) = T::from(Result.getSExtValue());
3526 });
3527 }
3528 }
3529
3531 return true;
3532}
3533
3535 const CallExpr *Call) {
3536
3537 assert(Call->getNumArgs() == 3);
3538
3539 QualType SourceType = Call->getArg(0)->getType();
3540 QualType ShuffleMaskType = Call->getArg(1)->getType();
3541 QualType ZeroMaskType = Call->getArg(2)->getType();
3542 if (!SourceType->isVectorType() || !ShuffleMaskType->isVectorType() ||
3543 !ZeroMaskType->isIntegerType()) {
3544 return false;
3545 }
3546
3547 Pointer Source, ShuffleMask;
3548 APSInt ZeroMask = popToAPSInt(S, Call->getArg(2));
3549 ShuffleMask = S.Stk.pop<Pointer>();
3550 Source = S.Stk.pop<Pointer>();
3551
3552 const auto *SourceVecT = SourceType->castAs<VectorType>();
3553 const auto *ShuffleMaskVecT = ShuffleMaskType->castAs<VectorType>();
3554 assert(SourceVecT->getNumElements() == ShuffleMaskVecT->getNumElements());
3555 assert(ZeroMask.getBitWidth() == SourceVecT->getNumElements());
3556
3557 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3558 PrimType ShuffleMaskElemT =
3559 *S.getContext().classify(ShuffleMaskVecT->getElementType());
3560
3561 unsigned NumBytesInQWord = 8;
3562 unsigned NumBitsInByte = 8;
3563 unsigned NumBytes = SourceVecT->getNumElements();
3564 unsigned NumQWords = NumBytes / NumBytesInQWord;
3565 unsigned RetWidth = ZeroMask.getBitWidth();
3566 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
3567
3568 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3569 APInt SourceQWord(64, 0);
3570 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3571 uint64_t Byte = 0;
3572 INT_TYPE_SWITCH(SourceElemT, {
3573 Byte = static_cast<uint64_t>(
3574 Source.elem<T>(QWordId * NumBytesInQWord + ByteIdx));
3575 });
3576 SourceQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3577 }
3578
3579 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3580 unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
3581 unsigned M = 0;
3582 INT_TYPE_SWITCH(ShuffleMaskElemT, {
3583 M = static_cast<unsigned>(ShuffleMask.elem<T>(SelIdx)) & 0x3F;
3584 });
3585
3586 if (ZeroMask[SelIdx]) {
3587 RetMask.setBitVal(SelIdx, SourceQWord[M]);
3588 }
3589 }
3590 }
3591
3592 pushInteger(S, RetMask, Call->getType());
3593 return true;
3594}
3595
3597 const CallExpr *Call) {
3598 // Arguments are: vector of floats, rounding immediate
3599 assert(Call->getNumArgs() == 2);
3600
3601 APSInt Imm = popToAPSInt(S, Call->getArg(1));
3602 const Pointer &Src = S.Stk.pop<Pointer>();
3603 const Pointer &Dst = S.Stk.peek<Pointer>();
3604
3605 assert(Src.getFieldDesc()->isPrimitiveArray());
3606 assert(Dst.getFieldDesc()->isPrimitiveArray());
3607
3608 const auto *SrcVTy = Call->getArg(0)->getType()->castAs<VectorType>();
3609 unsigned SrcNumElems = SrcVTy->getNumElements();
3610 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3611 unsigned DstNumElems = DstVTy->getNumElements();
3612
3613 const llvm::fltSemantics &HalfSem =
3615
3616 // imm[2] == 1 means use MXCSR rounding mode.
3617 // In that case, we can only evaluate if the conversion is exact.
3618 int ImmVal = Imm.getZExtValue();
3619 bool UseMXCSR = (ImmVal & 4) != 0;
3620 bool IsFPConstrained =
3621 Call->getFPFeaturesInEffect(S.getASTContext().getLangOpts())
3622 .isFPConstrained();
3623
3624 llvm::RoundingMode RM;
3625 if (!UseMXCSR) {
3626 switch (ImmVal & 3) {
3627 case 0:
3628 RM = llvm::RoundingMode::NearestTiesToEven;
3629 break;
3630 case 1:
3631 RM = llvm::RoundingMode::TowardNegative;
3632 break;
3633 case 2:
3634 RM = llvm::RoundingMode::TowardPositive;
3635 break;
3636 case 3:
3637 RM = llvm::RoundingMode::TowardZero;
3638 break;
3639 default:
3640 llvm_unreachable("Invalid immediate rounding mode");
3641 }
3642 } else {
3643 // For MXCSR, we must check for exactness. We can use any rounding mode
3644 // for the trial conversion since the result is the same if it's exact.
3645 RM = llvm::RoundingMode::NearestTiesToEven;
3646 }
3647
3648 QualType DstElemQT = Dst.getFieldDesc()->getElemQualType();
3649 PrimType DstElemT = *S.getContext().classify(DstElemQT);
3650
3651 for (unsigned I = 0; I != SrcNumElems; ++I) {
3652 Floating SrcVal = Src.elem<Floating>(I);
3653 APFloat DstVal = SrcVal.getAPFloat();
3654
3655 bool LostInfo;
3656 APFloat::opStatus St = DstVal.convert(HalfSem, RM, &LostInfo);
3657
3658 if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
3659 S.FFDiag(S.Current->getSource(OpPC),
3660 diag::note_constexpr_dynamic_rounding);
3661 return false;
3662 }
3663
3664 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
3665 // Convert the destination value's bit pattern to an unsigned integer,
3666 // then reconstruct the element using the target type's 'from' method.
3667 uint64_t RawBits = DstVal.bitcastToAPInt().getZExtValue();
3668 Dst.elem<T>(I) = T::from(RawBits);
3669 });
3670 }
3671
3672 // Zero out remaining elements if the destination has more elements
3673 // (e.g., vcvtps2ph converting 4 floats to 8 shorts).
3674 if (DstNumElems > SrcNumElems) {
3675 for (unsigned I = SrcNumElems; I != DstNumElems; ++I) {
3676 INT_TYPE_SWITCH_NO_BOOL(DstElemT, { Dst.elem<T>(I) = T::from(0); });
3677 }
3678 }
3679
3680 Dst.initializeAllElements();
3681 return true;
3682}
3683
3685 const CallExpr *Call) {
3686 assert(Call->getNumArgs() == 2);
3687
3688 QualType ATy = Call->getArg(0)->getType();
3689 QualType BTy = Call->getArg(1)->getType();
3690 if (!ATy->isVectorType() || !BTy->isVectorType()) {
3691 return false;
3692 }
3693
3694 const Pointer &BPtr = S.Stk.pop<Pointer>();
3695 const Pointer &APtr = S.Stk.pop<Pointer>();
3696 const auto *AVecT = ATy->castAs<VectorType>();
3697 assert(AVecT->getNumElements() ==
3698 BTy->castAs<VectorType>()->getNumElements());
3699
3700 PrimType ElemT = *S.getContext().classify(AVecT->getElementType());
3701
3702 unsigned NumBytesInQWord = 8;
3703 unsigned NumBitsInByte = 8;
3704 unsigned NumBytes = AVecT->getNumElements();
3705 unsigned NumQWords = NumBytes / NumBytesInQWord;
3706 const Pointer &Dst = S.Stk.peek<Pointer>();
3707
3708 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3709 APInt BQWord(64, 0);
3710 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3711 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3712 INT_TYPE_SWITCH(ElemT, {
3713 uint64_t Byte = static_cast<uint64_t>(BPtr.elem<T>(Idx));
3714 BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3715 });
3716 }
3717
3718 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3719 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3720 uint64_t Ctrl = 0;
3722 ElemT, { Ctrl = static_cast<uint64_t>(APtr.elem<T>(Idx)) & 0x3F; });
3723
3724 APInt Byte(8, 0);
3725 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
3726 Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]);
3727 }
3728 INT_TYPE_SWITCH(ElemT,
3729 { Dst.elem<T>(Idx) = T::from(Byte.getZExtValue()); });
3730 }
3731 }
3732
3734
3735 return true;
3736}
3737
3739 uint32_t BuiltinID) {
3740 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
3741 return Invalid(S, OpPC);
3742
3743 const InterpFrame *Frame = S.Current;
3744 switch (BuiltinID) {
3745 case Builtin::BI__builtin_is_constant_evaluated:
3747
3748 case Builtin::BI__builtin_assume:
3749 case Builtin::BI__assume:
3750 return interp__builtin_assume(S, OpPC, Frame, Call);
3751
3752 case Builtin::BI__builtin_strcmp:
3753 case Builtin::BIstrcmp:
3754 case Builtin::BI__builtin_strncmp:
3755 case Builtin::BIstrncmp:
3756 case Builtin::BI__builtin_wcsncmp:
3757 case Builtin::BIwcsncmp:
3758 case Builtin::BI__builtin_wcscmp:
3759 case Builtin::BIwcscmp:
3760 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
3761
3762 case Builtin::BI__builtin_strlen:
3763 case Builtin::BIstrlen:
3764 case Builtin::BI__builtin_wcslen:
3765 case Builtin::BIwcslen:
3766 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
3767
3768 case Builtin::BI__builtin_nan:
3769 case Builtin::BI__builtin_nanf:
3770 case Builtin::BI__builtin_nanl:
3771 case Builtin::BI__builtin_nanf16:
3772 case Builtin::BI__builtin_nanf128:
3773 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
3774
3775 case Builtin::BI__builtin_nans:
3776 case Builtin::BI__builtin_nansf:
3777 case Builtin::BI__builtin_nansl:
3778 case Builtin::BI__builtin_nansf16:
3779 case Builtin::BI__builtin_nansf128:
3780 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
3781
3782 case Builtin::BI__builtin_huge_val:
3783 case Builtin::BI__builtin_huge_valf:
3784 case Builtin::BI__builtin_huge_vall:
3785 case Builtin::BI__builtin_huge_valf16:
3786 case Builtin::BI__builtin_huge_valf128:
3787 case Builtin::BI__builtin_inf:
3788 case Builtin::BI__builtin_inff:
3789 case Builtin::BI__builtin_infl:
3790 case Builtin::BI__builtin_inff16:
3791 case Builtin::BI__builtin_inff128:
3792 return interp__builtin_inf(S, OpPC, Frame, Call);
3793
3794 case Builtin::BI__builtin_copysign:
3795 case Builtin::BI__builtin_copysignf:
3796 case Builtin::BI__builtin_copysignl:
3797 case Builtin::BI__builtin_copysignf128:
3798 return interp__builtin_copysign(S, OpPC, Frame);
3799
3800 case Builtin::BI__builtin_fmin:
3801 case Builtin::BI__builtin_fminf:
3802 case Builtin::BI__builtin_fminl:
3803 case Builtin::BI__builtin_fminf16:
3804 case Builtin::BI__builtin_fminf128:
3805 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3806
3807 case Builtin::BI__builtin_fminimum_num:
3808 case Builtin::BI__builtin_fminimum_numf:
3809 case Builtin::BI__builtin_fminimum_numl:
3810 case Builtin::BI__builtin_fminimum_numf16:
3811 case Builtin::BI__builtin_fminimum_numf128:
3812 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3813
3814 case Builtin::BI__builtin_fmax:
3815 case Builtin::BI__builtin_fmaxf:
3816 case Builtin::BI__builtin_fmaxl:
3817 case Builtin::BI__builtin_fmaxf16:
3818 case Builtin::BI__builtin_fmaxf128:
3819 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3820
3821 case Builtin::BI__builtin_fmaximum_num:
3822 case Builtin::BI__builtin_fmaximum_numf:
3823 case Builtin::BI__builtin_fmaximum_numl:
3824 case Builtin::BI__builtin_fmaximum_numf16:
3825 case Builtin::BI__builtin_fmaximum_numf128:
3826 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3827
3828 case Builtin::BI__builtin_isnan:
3829 return interp__builtin_isnan(S, OpPC, Frame, Call);
3830
3831 case Builtin::BI__builtin_issignaling:
3832 return interp__builtin_issignaling(S, OpPC, Frame, Call);
3833
3834 case Builtin::BI__builtin_isinf:
3835 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
3836
3837 case Builtin::BI__builtin_isinf_sign:
3838 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
3839
3840 case Builtin::BI__builtin_isfinite:
3841 return interp__builtin_isfinite(S, OpPC, Frame, Call);
3842
3843 case Builtin::BI__builtin_isnormal:
3844 return interp__builtin_isnormal(S, OpPC, Frame, Call);
3845
3846 case Builtin::BI__builtin_issubnormal:
3847 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
3848
3849 case Builtin::BI__builtin_iszero:
3850 return interp__builtin_iszero(S, OpPC, Frame, Call);
3851
3852 case Builtin::BI__builtin_signbit:
3853 case Builtin::BI__builtin_signbitf:
3854 case Builtin::BI__builtin_signbitl:
3855 return interp__builtin_signbit(S, OpPC, Frame, Call);
3856
3857 case Builtin::BI__builtin_isgreater:
3858 case Builtin::BI__builtin_isgreaterequal:
3859 case Builtin::BI__builtin_isless:
3860 case Builtin::BI__builtin_islessequal:
3861 case Builtin::BI__builtin_islessgreater:
3862 case Builtin::BI__builtin_isunordered:
3863 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
3864
3865 case Builtin::BI__builtin_isfpclass:
3866 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
3867
3868 case Builtin::BI__builtin_fpclassify:
3869 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
3870
3871 case Builtin::BI__builtin_fabs:
3872 case Builtin::BI__builtin_fabsf:
3873 case Builtin::BI__builtin_fabsl:
3874 case Builtin::BI__builtin_fabsf128:
3875 return interp__builtin_fabs(S, OpPC, Frame);
3876
3877 case Builtin::BI__builtin_abs:
3878 case Builtin::BI__builtin_labs:
3879 case Builtin::BI__builtin_llabs:
3880 return interp__builtin_abs(S, OpPC, Frame, Call);
3881
3882 case Builtin::BI__builtin_popcount:
3883 case Builtin::BI__builtin_popcountl:
3884 case Builtin::BI__builtin_popcountll:
3885 case Builtin::BI__builtin_popcountg:
3886 case Builtin::BI__popcnt16: // Microsoft variants of popcount
3887 case Builtin::BI__popcnt:
3888 case Builtin::BI__popcnt64:
3889 return interp__builtin_popcount(S, OpPC, Frame, Call);
3890
3891 case Builtin::BI__builtin_parity:
3892 case Builtin::BI__builtin_parityl:
3893 case Builtin::BI__builtin_parityll:
3895 S, OpPC, Call, [](const APSInt &Val) {
3896 return APInt(Val.getBitWidth(), Val.popcount() % 2);
3897 });
3898 case Builtin::BI__builtin_clrsb:
3899 case Builtin::BI__builtin_clrsbl:
3900 case Builtin::BI__builtin_clrsbll:
3902 S, OpPC, Call, [](const APSInt &Val) {
3903 return APInt(Val.getBitWidth(),
3904 Val.getBitWidth() - Val.getSignificantBits());
3905 });
3906 case Builtin::BI__builtin_bitreverse8:
3907 case Builtin::BI__builtin_bitreverse16:
3908 case Builtin::BI__builtin_bitreverse32:
3909 case Builtin::BI__builtin_bitreverse64:
3911 S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
3912
3913 case Builtin::BI__builtin_classify_type:
3914 return interp__builtin_classify_type(S, OpPC, Frame, Call);
3915
3916 case Builtin::BI__builtin_expect:
3917 case Builtin::BI__builtin_expect_with_probability:
3918 return interp__builtin_expect(S, OpPC, Frame, Call);
3919
3920 case Builtin::BI__builtin_rotateleft8:
3921 case Builtin::BI__builtin_rotateleft16:
3922 case Builtin::BI__builtin_rotateleft32:
3923 case Builtin::BI__builtin_rotateleft64:
3924 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3925 case Builtin::BI_rotl16:
3926 case Builtin::BI_rotl:
3927 case Builtin::BI_lrotl:
3928 case Builtin::BI_rotl64:
3930 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
3931 return Value.rotl(Amount);
3932 });
3933
3934 case Builtin::BI__builtin_rotateright8:
3935 case Builtin::BI__builtin_rotateright16:
3936 case Builtin::BI__builtin_rotateright32:
3937 case Builtin::BI__builtin_rotateright64:
3938 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3939 case Builtin::BI_rotr16:
3940 case Builtin::BI_rotr:
3941 case Builtin::BI_lrotr:
3942 case Builtin::BI_rotr64:
3944 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
3945 return Value.rotr(Amount);
3946 });
3947
3948 case Builtin::BI__builtin_ffs:
3949 case Builtin::BI__builtin_ffsl:
3950 case Builtin::BI__builtin_ffsll:
3952 S, OpPC, Call, [](const APSInt &Val) {
3953 return APInt(Val.getBitWidth(),
3954 Val.isZero() ? 0u : Val.countTrailingZeros() + 1u);
3955 });
3956
3957 case Builtin::BIaddressof:
3958 case Builtin::BI__addressof:
3959 case Builtin::BI__builtin_addressof:
3960 assert(isNoopBuiltin(BuiltinID));
3961 return interp__builtin_addressof(S, OpPC, Frame, Call);
3962
3963 case Builtin::BIas_const:
3964 case Builtin::BIforward:
3965 case Builtin::BIforward_like:
3966 case Builtin::BImove:
3967 case Builtin::BImove_if_noexcept:
3968 assert(isNoopBuiltin(BuiltinID));
3969 return interp__builtin_move(S, OpPC, Frame, Call);
3970
3971 case Builtin::BI__builtin_eh_return_data_regno:
3973
3974 case Builtin::BI__builtin_launder:
3975 assert(isNoopBuiltin(BuiltinID));
3976 return true;
3977
3978 case Builtin::BI__builtin_add_overflow:
3979 case Builtin::BI__builtin_sub_overflow:
3980 case Builtin::BI__builtin_mul_overflow:
3981 case Builtin::BI__builtin_sadd_overflow:
3982 case Builtin::BI__builtin_uadd_overflow:
3983 case Builtin::BI__builtin_uaddl_overflow:
3984 case Builtin::BI__builtin_uaddll_overflow:
3985 case Builtin::BI__builtin_usub_overflow:
3986 case Builtin::BI__builtin_usubl_overflow:
3987 case Builtin::BI__builtin_usubll_overflow:
3988 case Builtin::BI__builtin_umul_overflow:
3989 case Builtin::BI__builtin_umull_overflow:
3990 case Builtin::BI__builtin_umulll_overflow:
3991 case Builtin::BI__builtin_saddl_overflow:
3992 case Builtin::BI__builtin_saddll_overflow:
3993 case Builtin::BI__builtin_ssub_overflow:
3994 case Builtin::BI__builtin_ssubl_overflow:
3995 case Builtin::BI__builtin_ssubll_overflow:
3996 case Builtin::BI__builtin_smul_overflow:
3997 case Builtin::BI__builtin_smull_overflow:
3998 case Builtin::BI__builtin_smulll_overflow:
3999 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
4000
4001 case Builtin::BI__builtin_addcb:
4002 case Builtin::BI__builtin_addcs:
4003 case Builtin::BI__builtin_addc:
4004 case Builtin::BI__builtin_addcl:
4005 case Builtin::BI__builtin_addcll:
4006 case Builtin::BI__builtin_subcb:
4007 case Builtin::BI__builtin_subcs:
4008 case Builtin::BI__builtin_subc:
4009 case Builtin::BI__builtin_subcl:
4010 case Builtin::BI__builtin_subcll:
4011 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
4012
4013 case Builtin::BI__builtin_clz:
4014 case Builtin::BI__builtin_clzl:
4015 case Builtin::BI__builtin_clzll:
4016 case Builtin::BI__builtin_clzs:
4017 case Builtin::BI__builtin_clzg:
4018 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
4019 case Builtin::BI__lzcnt:
4020 case Builtin::BI__lzcnt64:
4021 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
4022
4023 case Builtin::BI__builtin_ctz:
4024 case Builtin::BI__builtin_ctzl:
4025 case Builtin::BI__builtin_ctzll:
4026 case Builtin::BI__builtin_ctzs:
4027 case Builtin::BI__builtin_ctzg:
4028 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
4029
4030 case Builtin::BI__builtin_elementwise_clzg:
4031 case Builtin::BI__builtin_elementwise_ctzg:
4033 BuiltinID);
4034 case Builtin::BI__builtin_bswapg:
4035 case Builtin::BI__builtin_bswap16:
4036 case Builtin::BI__builtin_bswap32:
4037 case Builtin::BI__builtin_bswap64:
4038 return interp__builtin_bswap(S, OpPC, Frame, Call);
4039
4040 case Builtin::BI__atomic_always_lock_free:
4041 case Builtin::BI__atomic_is_lock_free:
4042 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
4043
4044 case Builtin::BI__c11_atomic_is_lock_free:
4046
4047 case Builtin::BI__builtin_complex:
4048 return interp__builtin_complex(S, OpPC, Frame, Call);
4049
4050 case Builtin::BI__builtin_is_aligned:
4051 case Builtin::BI__builtin_align_up:
4052 case Builtin::BI__builtin_align_down:
4053 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
4054
4055 case Builtin::BI__builtin_assume_aligned:
4056 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
4057
4058 case clang::X86::BI__builtin_ia32_bextr_u32:
4059 case clang::X86::BI__builtin_ia32_bextr_u64:
4060 case clang::X86::BI__builtin_ia32_bextri_u32:
4061 case clang::X86::BI__builtin_ia32_bextri_u64:
4063 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4064 unsigned BitWidth = Val.getBitWidth();
4065 uint64_t Shift = Idx.extractBitsAsZExtValue(8, 0);
4066 uint64_t Length = Idx.extractBitsAsZExtValue(8, 8);
4067 if (Length > BitWidth) {
4068 Length = BitWidth;
4069 }
4070
4071 // Handle out of bounds cases.
4072 if (Length == 0 || Shift >= BitWidth)
4073 return APInt(BitWidth, 0);
4074
4075 uint64_t Result = Val.getZExtValue() >> Shift;
4076 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
4077 return APInt(BitWidth, Result);
4078 });
4079
4080 case clang::X86::BI__builtin_ia32_bzhi_si:
4081 case clang::X86::BI__builtin_ia32_bzhi_di:
4083 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4084 unsigned BitWidth = Val.getBitWidth();
4085 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
4086 APSInt Result = Val;
4087
4088 if (Index < BitWidth)
4089 Result.clearHighBits(BitWidth - Index);
4090
4091 return Result;
4092 });
4093
4094 case clang::X86::BI__builtin_ia32_ktestcqi:
4095 case clang::X86::BI__builtin_ia32_ktestchi:
4096 case clang::X86::BI__builtin_ia32_ktestcsi:
4097 case clang::X86::BI__builtin_ia32_ktestcdi:
4099 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4100 return APInt(sizeof(unsigned char) * 8, (~A & B) == 0);
4101 });
4102
4103 case clang::X86::BI__builtin_ia32_ktestzqi:
4104 case clang::X86::BI__builtin_ia32_ktestzhi:
4105 case clang::X86::BI__builtin_ia32_ktestzsi:
4106 case clang::X86::BI__builtin_ia32_ktestzdi:
4108 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4109 return APInt(sizeof(unsigned char) * 8, (A & B) == 0);
4110 });
4111
4112 case clang::X86::BI__builtin_ia32_kortestcqi:
4113 case clang::X86::BI__builtin_ia32_kortestchi:
4114 case clang::X86::BI__builtin_ia32_kortestcsi:
4115 case clang::X86::BI__builtin_ia32_kortestcdi:
4117 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4118 return APInt(sizeof(unsigned char) * 8, ~(A | B) == 0);
4119 });
4120
4121 case clang::X86::BI__builtin_ia32_kortestzqi:
4122 case clang::X86::BI__builtin_ia32_kortestzhi:
4123 case clang::X86::BI__builtin_ia32_kortestzsi:
4124 case clang::X86::BI__builtin_ia32_kortestzdi:
4126 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4127 return APInt(sizeof(unsigned char) * 8, (A | B) == 0);
4128 });
4129
4130 case clang::X86::BI__builtin_ia32_lzcnt_u16:
4131 case clang::X86::BI__builtin_ia32_lzcnt_u32:
4132 case clang::X86::BI__builtin_ia32_lzcnt_u64:
4134 S, OpPC, Call, [](const APSInt &Src) {
4135 return APInt(Src.getBitWidth(), Src.countLeadingZeros());
4136 });
4137
4138 case clang::X86::BI__builtin_ia32_tzcnt_u16:
4139 case clang::X86::BI__builtin_ia32_tzcnt_u32:
4140 case clang::X86::BI__builtin_ia32_tzcnt_u64:
4142 S, OpPC, Call, [](const APSInt &Src) {
4143 return APInt(Src.getBitWidth(), Src.countTrailingZeros());
4144 });
4145
4146 case clang::X86::BI__builtin_ia32_pdep_si:
4147 case clang::X86::BI__builtin_ia32_pdep_di:
4149 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4150 unsigned BitWidth = Val.getBitWidth();
4151 APInt Result = APInt::getZero(BitWidth);
4152
4153 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4154 if (Mask[I])
4155 Result.setBitVal(I, Val[P++]);
4156 }
4157
4158 return Result;
4159 });
4160
4161 case clang::X86::BI__builtin_ia32_pext_si:
4162 case clang::X86::BI__builtin_ia32_pext_di:
4164 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4165 unsigned BitWidth = Val.getBitWidth();
4166 APInt Result = APInt::getZero(BitWidth);
4167
4168 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4169 if (Mask[I])
4170 Result.setBitVal(P++, Val[I]);
4171 }
4172
4173 return Result;
4174 });
4175
4176 case clang::X86::BI__builtin_ia32_addcarryx_u32:
4177 case clang::X86::BI__builtin_ia32_addcarryx_u64:
4178 case clang::X86::BI__builtin_ia32_subborrow_u32:
4179 case clang::X86::BI__builtin_ia32_subborrow_u64:
4181 BuiltinID);
4182
4183 case Builtin::BI__builtin_os_log_format_buffer_size:
4185
4186 case Builtin::BI__builtin_ptrauth_string_discriminator:
4188
4189 case Builtin::BI__builtin_infer_alloc_token:
4191
4192 case Builtin::BI__noop:
4193 pushInteger(S, 0, Call->getType());
4194 return true;
4195
4196 case Builtin::BI__builtin_operator_new:
4197 return interp__builtin_operator_new(S, OpPC, Frame, Call);
4198
4199 case Builtin::BI__builtin_operator_delete:
4200 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
4201
4202 case Builtin::BI__arithmetic_fence:
4204
4205 case Builtin::BI__builtin_reduce_add:
4206 case Builtin::BI__builtin_reduce_mul:
4207 case Builtin::BI__builtin_reduce_and:
4208 case Builtin::BI__builtin_reduce_or:
4209 case Builtin::BI__builtin_reduce_xor:
4210 case Builtin::BI__builtin_reduce_min:
4211 case Builtin::BI__builtin_reduce_max:
4212 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
4213
4214 case Builtin::BI__builtin_elementwise_popcount:
4215 case Builtin::BI__builtin_elementwise_bitreverse:
4217 BuiltinID);
4218
4219 case Builtin::BI__builtin_elementwise_abs:
4220 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
4221
4222 case Builtin::BI__builtin_memcpy:
4223 case Builtin::BImemcpy:
4224 case Builtin::BI__builtin_wmemcpy:
4225 case Builtin::BIwmemcpy:
4226 case Builtin::BI__builtin_memmove:
4227 case Builtin::BImemmove:
4228 case Builtin::BI__builtin_wmemmove:
4229 case Builtin::BIwmemmove:
4230 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
4231
4232 case Builtin::BI__builtin_memcmp:
4233 case Builtin::BImemcmp:
4234 case Builtin::BI__builtin_bcmp:
4235 case Builtin::BIbcmp:
4236 case Builtin::BI__builtin_wmemcmp:
4237 case Builtin::BIwmemcmp:
4238 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
4239
4240 case Builtin::BImemchr:
4241 case Builtin::BI__builtin_memchr:
4242 case Builtin::BIstrchr:
4243 case Builtin::BI__builtin_strchr:
4244 case Builtin::BIwmemchr:
4245 case Builtin::BI__builtin_wmemchr:
4246 case Builtin::BIwcschr:
4247 case Builtin::BI__builtin_wcschr:
4248 case Builtin::BI__builtin_char_memchr:
4249 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
4250
4251 case Builtin::BI__builtin_object_size:
4252 case Builtin::BI__builtin_dynamic_object_size:
4253 return interp__builtin_object_size(S, OpPC, Frame, Call);
4254
4255 case Builtin::BI__builtin_is_within_lifetime:
4257
4258 case Builtin::BI__builtin_elementwise_add_sat:
4260 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4261 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
4262 });
4263
4264 case Builtin::BI__builtin_elementwise_sub_sat:
4266 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4267 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
4268 });
4269 case X86::BI__builtin_ia32_extract128i256:
4270 case X86::BI__builtin_ia32_vextractf128_pd256:
4271 case X86::BI__builtin_ia32_vextractf128_ps256:
4272 case X86::BI__builtin_ia32_vextractf128_si256:
4273 return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
4274
4275 case X86::BI__builtin_ia32_extractf32x4_256_mask:
4276 case X86::BI__builtin_ia32_extractf32x4_mask:
4277 case X86::BI__builtin_ia32_extractf32x8_mask:
4278 case X86::BI__builtin_ia32_extractf64x2_256_mask:
4279 case X86::BI__builtin_ia32_extractf64x2_512_mask:
4280 case X86::BI__builtin_ia32_extractf64x4_mask:
4281 case X86::BI__builtin_ia32_extracti32x4_256_mask:
4282 case X86::BI__builtin_ia32_extracti32x4_mask:
4283 case X86::BI__builtin_ia32_extracti32x8_mask:
4284 case X86::BI__builtin_ia32_extracti64x2_256_mask:
4285 case X86::BI__builtin_ia32_extracti64x2_512_mask:
4286 case X86::BI__builtin_ia32_extracti64x4_mask:
4287 return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
4288
4289 case clang::X86::BI__builtin_ia32_pmulhrsw128:
4290 case clang::X86::BI__builtin_ia32_pmulhrsw256:
4291 case clang::X86::BI__builtin_ia32_pmulhrsw512:
4293 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4294 return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
4295 .extractBits(16, 1);
4296 });
4297
4298 case clang::X86::BI__builtin_ia32_movmskps:
4299 case clang::X86::BI__builtin_ia32_movmskpd:
4300 case clang::X86::BI__builtin_ia32_pmovmskb128:
4301 case clang::X86::BI__builtin_ia32_pmovmskb256:
4302 case clang::X86::BI__builtin_ia32_movmskps256:
4303 case clang::X86::BI__builtin_ia32_movmskpd256: {
4304 return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
4305 }
4306
4307 case X86::BI__builtin_ia32_psignb128:
4308 case X86::BI__builtin_ia32_psignb256:
4309 case X86::BI__builtin_ia32_psignw128:
4310 case X86::BI__builtin_ia32_psignw256:
4311 case X86::BI__builtin_ia32_psignd128:
4312 case X86::BI__builtin_ia32_psignd256:
4314 S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
4315 if (BElem.isZero())
4316 return APInt::getZero(AElem.getBitWidth());
4317 if (BElem.isNegative())
4318 return -AElem;
4319 return AElem;
4320 });
4321
4322 case clang::X86::BI__builtin_ia32_pavgb128:
4323 case clang::X86::BI__builtin_ia32_pavgw128:
4324 case clang::X86::BI__builtin_ia32_pavgb256:
4325 case clang::X86::BI__builtin_ia32_pavgw256:
4326 case clang::X86::BI__builtin_ia32_pavgb512:
4327 case clang::X86::BI__builtin_ia32_pavgw512:
4329 llvm::APIntOps::avgCeilU);
4330
4331 case clang::X86::BI__builtin_ia32_pmaddubsw128:
4332 case clang::X86::BI__builtin_ia32_pmaddubsw256:
4333 case clang::X86::BI__builtin_ia32_pmaddubsw512:
4335 S, OpPC, Call,
4336 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4337 const APSInt &HiRHS) {
4338 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4339 return (LoLHS.zext(BitWidth) * LoRHS.sext(BitWidth))
4340 .sadd_sat((HiLHS.zext(BitWidth) * HiRHS.sext(BitWidth)));
4341 });
4342
4343 case clang::X86::BI__builtin_ia32_pmaddwd128:
4344 case clang::X86::BI__builtin_ia32_pmaddwd256:
4345 case clang::X86::BI__builtin_ia32_pmaddwd512:
4347 S, OpPC, Call,
4348 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4349 const APSInt &HiRHS) {
4350 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4351 return (LoLHS.sext(BitWidth) * LoRHS.sext(BitWidth)) +
4352 (HiLHS.sext(BitWidth) * HiRHS.sext(BitWidth));
4353 });
4354
4355 case clang::X86::BI__builtin_ia32_pmulhuw128:
4356 case clang::X86::BI__builtin_ia32_pmulhuw256:
4357 case clang::X86::BI__builtin_ia32_pmulhuw512:
4359 llvm::APIntOps::mulhu);
4360
4361 case clang::X86::BI__builtin_ia32_pmulhw128:
4362 case clang::X86::BI__builtin_ia32_pmulhw256:
4363 case clang::X86::BI__builtin_ia32_pmulhw512:
4365 llvm::APIntOps::mulhs);
4366
4367 case clang::X86::BI__builtin_ia32_psllv2di:
4368 case clang::X86::BI__builtin_ia32_psllv4di:
4369 case clang::X86::BI__builtin_ia32_psllv4si:
4370 case clang::X86::BI__builtin_ia32_psllv8di:
4371 case clang::X86::BI__builtin_ia32_psllv8hi:
4372 case clang::X86::BI__builtin_ia32_psllv8si:
4373 case clang::X86::BI__builtin_ia32_psllv16hi:
4374 case clang::X86::BI__builtin_ia32_psllv16si:
4375 case clang::X86::BI__builtin_ia32_psllv32hi:
4376 case clang::X86::BI__builtin_ia32_psllwi128:
4377 case clang::X86::BI__builtin_ia32_psllwi256:
4378 case clang::X86::BI__builtin_ia32_psllwi512:
4379 case clang::X86::BI__builtin_ia32_pslldi128:
4380 case clang::X86::BI__builtin_ia32_pslldi256:
4381 case clang::X86::BI__builtin_ia32_pslldi512:
4382 case clang::X86::BI__builtin_ia32_psllqi128:
4383 case clang::X86::BI__builtin_ia32_psllqi256:
4384 case clang::X86::BI__builtin_ia32_psllqi512:
4386 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4387 if (RHS.uge(LHS.getBitWidth())) {
4388 return APInt::getZero(LHS.getBitWidth());
4389 }
4390 return LHS.shl(RHS.getZExtValue());
4391 });
4392
4393 case clang::X86::BI__builtin_ia32_psrav4si:
4394 case clang::X86::BI__builtin_ia32_psrav8di:
4395 case clang::X86::BI__builtin_ia32_psrav8hi:
4396 case clang::X86::BI__builtin_ia32_psrav8si:
4397 case clang::X86::BI__builtin_ia32_psrav16hi:
4398 case clang::X86::BI__builtin_ia32_psrav16si:
4399 case clang::X86::BI__builtin_ia32_psrav32hi:
4400 case clang::X86::BI__builtin_ia32_psravq128:
4401 case clang::X86::BI__builtin_ia32_psravq256:
4402 case clang::X86::BI__builtin_ia32_psrawi128:
4403 case clang::X86::BI__builtin_ia32_psrawi256:
4404 case clang::X86::BI__builtin_ia32_psrawi512:
4405 case clang::X86::BI__builtin_ia32_psradi128:
4406 case clang::X86::BI__builtin_ia32_psradi256:
4407 case clang::X86::BI__builtin_ia32_psradi512:
4408 case clang::X86::BI__builtin_ia32_psraqi128:
4409 case clang::X86::BI__builtin_ia32_psraqi256:
4410 case clang::X86::BI__builtin_ia32_psraqi512:
4412 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4413 if (RHS.uge(LHS.getBitWidth())) {
4414 return LHS.ashr(LHS.getBitWidth() - 1);
4415 }
4416 return LHS.ashr(RHS.getZExtValue());
4417 });
4418
4419 case clang::X86::BI__builtin_ia32_psrlv2di:
4420 case clang::X86::BI__builtin_ia32_psrlv4di:
4421 case clang::X86::BI__builtin_ia32_psrlv4si:
4422 case clang::X86::BI__builtin_ia32_psrlv8di:
4423 case clang::X86::BI__builtin_ia32_psrlv8hi:
4424 case clang::X86::BI__builtin_ia32_psrlv8si:
4425 case clang::X86::BI__builtin_ia32_psrlv16hi:
4426 case clang::X86::BI__builtin_ia32_psrlv16si:
4427 case clang::X86::BI__builtin_ia32_psrlv32hi:
4428 case clang::X86::BI__builtin_ia32_psrlwi128:
4429 case clang::X86::BI__builtin_ia32_psrlwi256:
4430 case clang::X86::BI__builtin_ia32_psrlwi512:
4431 case clang::X86::BI__builtin_ia32_psrldi128:
4432 case clang::X86::BI__builtin_ia32_psrldi256:
4433 case clang::X86::BI__builtin_ia32_psrldi512:
4434 case clang::X86::BI__builtin_ia32_psrlqi128:
4435 case clang::X86::BI__builtin_ia32_psrlqi256:
4436 case clang::X86::BI__builtin_ia32_psrlqi512:
4438 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4439 if (RHS.uge(LHS.getBitWidth())) {
4440 return APInt::getZero(LHS.getBitWidth());
4441 }
4442 return LHS.lshr(RHS.getZExtValue());
4443 });
4444 case clang::X86::BI__builtin_ia32_packsswb128:
4445 case clang::X86::BI__builtin_ia32_packsswb256:
4446 case clang::X86::BI__builtin_ia32_packsswb512:
4447 case clang::X86::BI__builtin_ia32_packssdw128:
4448 case clang::X86::BI__builtin_ia32_packssdw256:
4449 case clang::X86::BI__builtin_ia32_packssdw512:
4450 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4451 return APInt(Src).truncSSat(Src.getBitWidth() / 2);
4452 });
4453 case clang::X86::BI__builtin_ia32_packusdw128:
4454 case clang::X86::BI__builtin_ia32_packusdw256:
4455 case clang::X86::BI__builtin_ia32_packusdw512:
4456 case clang::X86::BI__builtin_ia32_packuswb128:
4457 case clang::X86::BI__builtin_ia32_packuswb256:
4458 case clang::X86::BI__builtin_ia32_packuswb512:
4459 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4460 unsigned DstBits = Src.getBitWidth() / 2;
4461 if (Src.isNegative())
4462 return APInt::getZero(DstBits);
4463 if (Src.isIntN(DstBits))
4464 return APInt(Src).trunc(DstBits);
4465 return APInt::getAllOnes(DstBits);
4466 });
4467
4468 case clang::X86::BI__builtin_ia32_selectss_128:
4469 case clang::X86::BI__builtin_ia32_selectsd_128:
4470 case clang::X86::BI__builtin_ia32_selectsh_128:
4471 case clang::X86::BI__builtin_ia32_selectsbf_128:
4473 case clang::X86::BI__builtin_ia32_vprotbi:
4474 case clang::X86::BI__builtin_ia32_vprotdi:
4475 case clang::X86::BI__builtin_ia32_vprotqi:
4476 case clang::X86::BI__builtin_ia32_vprotwi:
4477 case clang::X86::BI__builtin_ia32_prold128:
4478 case clang::X86::BI__builtin_ia32_prold256:
4479 case clang::X86::BI__builtin_ia32_prold512:
4480 case clang::X86::BI__builtin_ia32_prolq128:
4481 case clang::X86::BI__builtin_ia32_prolq256:
4482 case clang::X86::BI__builtin_ia32_prolq512:
4484 S, OpPC, Call,
4485 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
4486
4487 case clang::X86::BI__builtin_ia32_prord128:
4488 case clang::X86::BI__builtin_ia32_prord256:
4489 case clang::X86::BI__builtin_ia32_prord512:
4490 case clang::X86::BI__builtin_ia32_prorq128:
4491 case clang::X86::BI__builtin_ia32_prorq256:
4492 case clang::X86::BI__builtin_ia32_prorq512:
4494 S, OpPC, Call,
4495 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
4496
4497 case Builtin::BI__builtin_elementwise_max:
4498 case Builtin::BI__builtin_elementwise_min:
4499 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
4500
4501 case clang::X86::BI__builtin_ia32_phaddw128:
4502 case clang::X86::BI__builtin_ia32_phaddw256:
4503 case clang::X86::BI__builtin_ia32_phaddd128:
4504 case clang::X86::BI__builtin_ia32_phaddd256:
4506 S, OpPC, Call,
4507 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
4508 case clang::X86::BI__builtin_ia32_phaddsw128:
4509 case clang::X86::BI__builtin_ia32_phaddsw256:
4511 S, OpPC, Call,
4512 [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); });
4513 case clang::X86::BI__builtin_ia32_phsubw128:
4514 case clang::X86::BI__builtin_ia32_phsubw256:
4515 case clang::X86::BI__builtin_ia32_phsubd128:
4516 case clang::X86::BI__builtin_ia32_phsubd256:
4518 S, OpPC, Call,
4519 [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; });
4520 case clang::X86::BI__builtin_ia32_phsubsw128:
4521 case clang::X86::BI__builtin_ia32_phsubsw256:
4523 S, OpPC, Call,
4524 [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); });
4525 case clang::X86::BI__builtin_ia32_haddpd:
4526 case clang::X86::BI__builtin_ia32_haddps:
4527 case clang::X86::BI__builtin_ia32_haddpd256:
4528 case clang::X86::BI__builtin_ia32_haddps256:
4530 S, OpPC, Call,
4531 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4532 APFloat F = LHS;
4533 F.add(RHS, RM);
4534 return F;
4535 });
4536 case clang::X86::BI__builtin_ia32_hsubpd:
4537 case clang::X86::BI__builtin_ia32_hsubps:
4538 case clang::X86::BI__builtin_ia32_hsubpd256:
4539 case clang::X86::BI__builtin_ia32_hsubps256:
4541 S, OpPC, Call,
4542 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4543 APFloat F = LHS;
4544 F.subtract(RHS, RM);
4545 return F;
4546 });
4547 case clang::X86::BI__builtin_ia32_addsubpd:
4548 case clang::X86::BI__builtin_ia32_addsubps:
4549 case clang::X86::BI__builtin_ia32_addsubpd256:
4550 case clang::X86::BI__builtin_ia32_addsubps256:
4551 return interp__builtin_ia32_addsub(S, OpPC, Call);
4552
4553 case clang::X86::BI__builtin_ia32_pmuldq128:
4554 case clang::X86::BI__builtin_ia32_pmuldq256:
4555 case clang::X86::BI__builtin_ia32_pmuldq512:
4557 S, OpPC, Call,
4558 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4559 const APSInt &HiRHS) {
4560 return llvm::APIntOps::mulsExtended(LoLHS, LoRHS);
4561 });
4562
4563 case clang::X86::BI__builtin_ia32_pmuludq128:
4564 case clang::X86::BI__builtin_ia32_pmuludq256:
4565 case clang::X86::BI__builtin_ia32_pmuludq512:
4567 S, OpPC, Call,
4568 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4569 const APSInt &HiRHS) {
4570 return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
4571 });
4572
4573 case Builtin::BI__builtin_elementwise_fma:
4575 S, OpPC, Call,
4576 [](const APFloat &X, const APFloat &Y, const APFloat &Z,
4577 llvm::RoundingMode RM) {
4578 APFloat F = X;
4579 F.fusedMultiplyAdd(Y, Z, RM);
4580 return F;
4581 });
4582
4583 case X86::BI__builtin_ia32_vpmadd52luq128:
4584 case X86::BI__builtin_ia32_vpmadd52luq256:
4585 case X86::BI__builtin_ia32_vpmadd52luq512:
4587 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4588 return A + (B.trunc(52) * C.trunc(52)).zext(64);
4589 });
4590 case X86::BI__builtin_ia32_vpmadd52huq128:
4591 case X86::BI__builtin_ia32_vpmadd52huq256:
4592 case X86::BI__builtin_ia32_vpmadd52huq512:
4594 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4595 return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64);
4596 });
4597
4598 case X86::BI__builtin_ia32_vpshldd128:
4599 case X86::BI__builtin_ia32_vpshldd256:
4600 case X86::BI__builtin_ia32_vpshldd512:
4601 case X86::BI__builtin_ia32_vpshldq128:
4602 case X86::BI__builtin_ia32_vpshldq256:
4603 case X86::BI__builtin_ia32_vpshldq512:
4604 case X86::BI__builtin_ia32_vpshldw128:
4605 case X86::BI__builtin_ia32_vpshldw256:
4606 case X86::BI__builtin_ia32_vpshldw512:
4608 S, OpPC, Call,
4609 [](const APSInt &Hi, const APSInt &Lo, const APSInt &Amt) {
4610 return llvm::APIntOps::fshl(Hi, Lo, Amt);
4611 });
4612
4613 case X86::BI__builtin_ia32_vpshrdd128:
4614 case X86::BI__builtin_ia32_vpshrdd256:
4615 case X86::BI__builtin_ia32_vpshrdd512:
4616 case X86::BI__builtin_ia32_vpshrdq128:
4617 case X86::BI__builtin_ia32_vpshrdq256:
4618 case X86::BI__builtin_ia32_vpshrdq512:
4619 case X86::BI__builtin_ia32_vpshrdw128:
4620 case X86::BI__builtin_ia32_vpshrdw256:
4621 case X86::BI__builtin_ia32_vpshrdw512:
4622 // NOTE: Reversed Hi/Lo operands.
4624 S, OpPC, Call,
4625 [](const APSInt &Lo, const APSInt &Hi, const APSInt &Amt) {
4626 return llvm::APIntOps::fshr(Hi, Lo, Amt);
4627 });
4628 case X86::BI__builtin_ia32_vpconflictsi_128:
4629 case X86::BI__builtin_ia32_vpconflictsi_256:
4630 case X86::BI__builtin_ia32_vpconflictsi_512:
4631 case X86::BI__builtin_ia32_vpconflictdi_128:
4632 case X86::BI__builtin_ia32_vpconflictdi_256:
4633 case X86::BI__builtin_ia32_vpconflictdi_512:
4634 return interp__builtin_ia32_vpconflict(S, OpPC, Call);
4635 case clang::X86::BI__builtin_ia32_blendpd:
4636 case clang::X86::BI__builtin_ia32_blendpd256:
4637 case clang::X86::BI__builtin_ia32_blendps:
4638 case clang::X86::BI__builtin_ia32_blendps256:
4639 case clang::X86::BI__builtin_ia32_pblendw128:
4640 case clang::X86::BI__builtin_ia32_pblendw256:
4641 case clang::X86::BI__builtin_ia32_pblendd128:
4642 case clang::X86::BI__builtin_ia32_pblendd256:
4643 return interp__builtin_blend(S, OpPC, Call);
4644
4645 case clang::X86::BI__builtin_ia32_blendvpd:
4646 case clang::X86::BI__builtin_ia32_blendvpd256:
4647 case clang::X86::BI__builtin_ia32_blendvps:
4648 case clang::X86::BI__builtin_ia32_blendvps256:
4650 S, OpPC, Call,
4651 [](const APFloat &F, const APFloat &T, const APFloat &C,
4652 llvm::RoundingMode) { return C.isNegative() ? T : F; });
4653
4654 case clang::X86::BI__builtin_ia32_pblendvb128:
4655 case clang::X86::BI__builtin_ia32_pblendvb256:
4657 S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
4658 return ((APInt)C).isNegative() ? T : F;
4659 });
4660 case X86::BI__builtin_ia32_ptestz128:
4661 case X86::BI__builtin_ia32_ptestz256:
4662 case X86::BI__builtin_ia32_vtestzps:
4663 case X86::BI__builtin_ia32_vtestzps256:
4664 case X86::BI__builtin_ia32_vtestzpd:
4665 case X86::BI__builtin_ia32_vtestzpd256:
4667 S, OpPC, Call,
4668 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
4669 case X86::BI__builtin_ia32_ptestc128:
4670 case X86::BI__builtin_ia32_ptestc256:
4671 case X86::BI__builtin_ia32_vtestcps:
4672 case X86::BI__builtin_ia32_vtestcps256:
4673 case X86::BI__builtin_ia32_vtestcpd:
4674 case X86::BI__builtin_ia32_vtestcpd256:
4676 S, OpPC, Call,
4677 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
4678 case X86::BI__builtin_ia32_ptestnzc128:
4679 case X86::BI__builtin_ia32_ptestnzc256:
4680 case X86::BI__builtin_ia32_vtestnzcps:
4681 case X86::BI__builtin_ia32_vtestnzcps256:
4682 case X86::BI__builtin_ia32_vtestnzcpd:
4683 case X86::BI__builtin_ia32_vtestnzcpd256:
4685 S, OpPC, Call, [](const APInt &A, const APInt &B) {
4686 return ((A & B) != 0) && ((~A & B) != 0);
4687 });
4688 case X86::BI__builtin_ia32_selectb_128:
4689 case X86::BI__builtin_ia32_selectb_256:
4690 case X86::BI__builtin_ia32_selectb_512:
4691 case X86::BI__builtin_ia32_selectw_128:
4692 case X86::BI__builtin_ia32_selectw_256:
4693 case X86::BI__builtin_ia32_selectw_512:
4694 case X86::BI__builtin_ia32_selectd_128:
4695 case X86::BI__builtin_ia32_selectd_256:
4696 case X86::BI__builtin_ia32_selectd_512:
4697 case X86::BI__builtin_ia32_selectq_128:
4698 case X86::BI__builtin_ia32_selectq_256:
4699 case X86::BI__builtin_ia32_selectq_512:
4700 case X86::BI__builtin_ia32_selectph_128:
4701 case X86::BI__builtin_ia32_selectph_256:
4702 case X86::BI__builtin_ia32_selectph_512:
4703 case X86::BI__builtin_ia32_selectpbf_128:
4704 case X86::BI__builtin_ia32_selectpbf_256:
4705 case X86::BI__builtin_ia32_selectpbf_512:
4706 case X86::BI__builtin_ia32_selectps_128:
4707 case X86::BI__builtin_ia32_selectps_256:
4708 case X86::BI__builtin_ia32_selectps_512:
4709 case X86::BI__builtin_ia32_selectpd_128:
4710 case X86::BI__builtin_ia32_selectpd_256:
4711 case X86::BI__builtin_ia32_selectpd_512:
4712 return interp__builtin_select(S, OpPC, Call);
4713
4714 case X86::BI__builtin_ia32_shufps:
4715 case X86::BI__builtin_ia32_shufps256:
4716 case X86::BI__builtin_ia32_shufps512:
4718 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4719 unsigned NumElemPerLane = 4;
4720 unsigned NumSelectableElems = NumElemPerLane / 2;
4721 unsigned BitsPerElem = 2;
4722 unsigned IndexMask = 0x3;
4723 unsigned MaskBits = 8;
4724 unsigned Lane = DstIdx / NumElemPerLane;
4725 unsigned ElemInLane = DstIdx % NumElemPerLane;
4726 unsigned LaneOffset = Lane * NumElemPerLane;
4727 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
4728 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4729 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
4730 return std::pair<unsigned, int>{SrcIdx,
4731 static_cast<int>(LaneOffset + Index)};
4732 });
4733 case X86::BI__builtin_ia32_shufpd:
4734 case X86::BI__builtin_ia32_shufpd256:
4735 case X86::BI__builtin_ia32_shufpd512:
4737 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4738 unsigned NumElemPerLane = 2;
4739 unsigned NumSelectableElems = NumElemPerLane / 2;
4740 unsigned BitsPerElem = 1;
4741 unsigned IndexMask = 0x1;
4742 unsigned MaskBits = 8;
4743 unsigned Lane = DstIdx / NumElemPerLane;
4744 unsigned ElemInLane = DstIdx % NumElemPerLane;
4745 unsigned LaneOffset = Lane * NumElemPerLane;
4746 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
4747 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4748 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
4749 return std::pair<unsigned, int>{SrcIdx,
4750 static_cast<int>(LaneOffset + Index)};
4751 });
4752 case X86::BI__builtin_ia32_insertps128:
4754 S, OpPC, Call, [](unsigned DstIdx, unsigned Mask) {
4755 // Bits [3:0]: zero mask - if bit is set, zero this element
4756 if ((Mask & (1 << DstIdx)) != 0) {
4757 return std::pair<unsigned, int>{0, -1};
4758 }
4759 // Bits [7:6]: select element from source vector Y (0-3)
4760 // Bits [5:4]: select destination position (0-3)
4761 unsigned SrcElem = (Mask >> 6) & 0x3;
4762 unsigned DstElem = (Mask >> 4) & 0x3;
4763 if (DstIdx == DstElem) {
4764 // Insert element from source vector (B) at this position
4765 return std::pair<unsigned, int>{1, static_cast<int>(SrcElem)};
4766 } else {
4767 // Copy from destination vector (A)
4768 return std::pair<unsigned, int>{0, static_cast<int>(DstIdx)};
4769 }
4770 });
4771 case X86::BI__builtin_ia32_permvarsi256:
4772 case X86::BI__builtin_ia32_permvarsf256:
4773 case X86::BI__builtin_ia32_permvardf512:
4774 case X86::BI__builtin_ia32_permvardi512:
4775 case X86::BI__builtin_ia32_permvarhi128:
4777 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4778 int Offset = ShuffleMask & 0x7;
4779 return std::pair<unsigned, int>{0, Offset};
4780 });
4781 case X86::BI__builtin_ia32_permvarqi128:
4782 case X86::BI__builtin_ia32_permvarhi256:
4783 case X86::BI__builtin_ia32_permvarsi512:
4784 case X86::BI__builtin_ia32_permvarsf512:
4786 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4787 int Offset = ShuffleMask & 0xF;
4788 return std::pair<unsigned, int>{0, Offset};
4789 });
4790 case X86::BI__builtin_ia32_permvardi256:
4791 case X86::BI__builtin_ia32_permvardf256:
4793 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4794 int Offset = ShuffleMask & 0x3;
4795 return std::pair<unsigned, int>{0, Offset};
4796 });
4797 case X86::BI__builtin_ia32_permvarqi256:
4798 case X86::BI__builtin_ia32_permvarhi512:
4800 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4801 int Offset = ShuffleMask & 0x1F;
4802 return std::pair<unsigned, int>{0, Offset};
4803 });
4804 case X86::BI__builtin_ia32_permvarqi512:
4806 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4807 int Offset = ShuffleMask & 0x3F;
4808 return std::pair<unsigned, int>{0, Offset};
4809 });
4810 case X86::BI__builtin_ia32_vpermi2varq128:
4811 case X86::BI__builtin_ia32_vpermi2varpd128:
4813 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4814 int Offset = ShuffleMask & 0x1;
4815 unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
4816 return std::pair<unsigned, int>{SrcIdx, Offset};
4817 });
4818 case X86::BI__builtin_ia32_vpermi2vard128:
4819 case X86::BI__builtin_ia32_vpermi2varps128:
4820 case X86::BI__builtin_ia32_vpermi2varq256:
4821 case X86::BI__builtin_ia32_vpermi2varpd256:
4823 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4824 int Offset = ShuffleMask & 0x3;
4825 unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
4826 return std::pair<unsigned, int>{SrcIdx, Offset};
4827 });
4828 case X86::BI__builtin_ia32_vpermi2varhi128:
4829 case X86::BI__builtin_ia32_vpermi2vard256:
4830 case X86::BI__builtin_ia32_vpermi2varps256:
4831 case X86::BI__builtin_ia32_vpermi2varq512:
4832 case X86::BI__builtin_ia32_vpermi2varpd512:
4834 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4835 int Offset = ShuffleMask & 0x7;
4836 unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
4837 return std::pair<unsigned, int>{SrcIdx, Offset};
4838 });
4839 case X86::BI__builtin_ia32_vpermi2varqi128:
4840 case X86::BI__builtin_ia32_vpermi2varhi256:
4841 case X86::BI__builtin_ia32_vpermi2vard512:
4842 case X86::BI__builtin_ia32_vpermi2varps512:
4844 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4845 int Offset = ShuffleMask & 0xF;
4846 unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
4847 return std::pair<unsigned, int>{SrcIdx, Offset};
4848 });
4849 case X86::BI__builtin_ia32_vpermi2varqi256:
4850 case X86::BI__builtin_ia32_vpermi2varhi512:
4852 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4853 int Offset = ShuffleMask & 0x1F;
4854 unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
4855 return std::pair<unsigned, int>{SrcIdx, Offset};
4856 });
4857 case X86::BI__builtin_ia32_vpermi2varqi512:
4859 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4860 int Offset = ShuffleMask & 0x3F;
4861 unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
4862 return std::pair<unsigned, int>{SrcIdx, Offset};
4863 });
4864 case X86::BI__builtin_ia32_pshufb128:
4865 case X86::BI__builtin_ia32_pshufb256:
4866 case X86::BI__builtin_ia32_pshufb512:
4868 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4869 uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
4870 if (Ctlb & 0x80)
4871 return std::make_pair(0, -1);
4872
4873 unsigned LaneBase = (DstIdx / 16) * 16;
4874 unsigned SrcOffset = Ctlb & 0x0F;
4875 unsigned SrcIdx = LaneBase + SrcOffset;
4876 return std::make_pair(0, static_cast<int>(SrcIdx));
4877 });
4878
4879 case X86::BI__builtin_ia32_pshuflw:
4880 case X86::BI__builtin_ia32_pshuflw256:
4881 case X86::BI__builtin_ia32_pshuflw512:
4883 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4884 unsigned LaneBase = (DstIdx / 8) * 8;
4885 unsigned LaneIdx = DstIdx % 8;
4886 if (LaneIdx < 4) {
4887 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
4888 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
4889 }
4890
4891 return std::make_pair(0, static_cast<int>(DstIdx));
4892 });
4893
4894 case X86::BI__builtin_ia32_pshufhw:
4895 case X86::BI__builtin_ia32_pshufhw256:
4896 case X86::BI__builtin_ia32_pshufhw512:
4898 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4899 unsigned LaneBase = (DstIdx / 8) * 8;
4900 unsigned LaneIdx = DstIdx % 8;
4901 if (LaneIdx >= 4) {
4902 unsigned Sel = (ShuffleMask >> (2 * (LaneIdx - 4))) & 0x3;
4903 return std::make_pair(0, static_cast<int>(LaneBase + 4 + Sel));
4904 }
4905
4906 return std::make_pair(0, static_cast<int>(DstIdx));
4907 });
4908
4909 case X86::BI__builtin_ia32_pshufd:
4910 case X86::BI__builtin_ia32_pshufd256:
4911 case X86::BI__builtin_ia32_pshufd512:
4912 case X86::BI__builtin_ia32_vpermilps:
4913 case X86::BI__builtin_ia32_vpermilps256:
4914 case X86::BI__builtin_ia32_vpermilps512:
4916 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4917 unsigned LaneBase = (DstIdx / 4) * 4;
4918 unsigned LaneIdx = DstIdx % 4;
4919 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
4920 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
4921 });
4922
4923 case X86::BI__builtin_ia32_vpermilvarpd:
4924 case X86::BI__builtin_ia32_vpermilvarpd256:
4925 case X86::BI__builtin_ia32_vpermilvarpd512:
4927 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4928 unsigned NumElemPerLane = 2;
4929 unsigned Lane = DstIdx / NumElemPerLane;
4930 unsigned Offset = ShuffleMask & 0b10 ? 1 : 0;
4931 return std::make_pair(
4932 0, static_cast<int>(Lane * NumElemPerLane + Offset));
4933 });
4934
4935 case X86::BI__builtin_ia32_vpermilvarps:
4936 case X86::BI__builtin_ia32_vpermilvarps256:
4937 case X86::BI__builtin_ia32_vpermilvarps512:
4939 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4940 unsigned NumElemPerLane = 4;
4941 unsigned Lane = DstIdx / NumElemPerLane;
4942 unsigned Offset = ShuffleMask & 0b11;
4943 return std::make_pair(
4944 0, static_cast<int>(Lane * NumElemPerLane + Offset));
4945 });
4946
4947 case X86::BI__builtin_ia32_vpermilpd:
4948 case X86::BI__builtin_ia32_vpermilpd256:
4949 case X86::BI__builtin_ia32_vpermilpd512:
4951 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
4952 unsigned NumElemPerLane = 2;
4953 unsigned BitsPerElem = 1;
4954 unsigned MaskBits = 8;
4955 unsigned IndexMask = 0x1;
4956 unsigned Lane = DstIdx / NumElemPerLane;
4957 unsigned LaneOffset = Lane * NumElemPerLane;
4958 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4959 unsigned Index = (Control >> BitIndex) & IndexMask;
4960 return std::make_pair(0, static_cast<int>(LaneOffset + Index));
4961 });
4962
4963 case X86::BI__builtin_ia32_vpmultishiftqb128:
4964 case X86::BI__builtin_ia32_vpmultishiftqb256:
4965 case X86::BI__builtin_ia32_vpmultishiftqb512:
4966 return interp__builtin_ia32_multishiftqb(S, OpPC, Call);
4967 case X86::BI__builtin_ia32_kandqi:
4968 case X86::BI__builtin_ia32_kandhi:
4969 case X86::BI__builtin_ia32_kandsi:
4970 case X86::BI__builtin_ia32_kanddi:
4972 S, OpPC, Call,
4973 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
4974
4975 case X86::BI__builtin_ia32_kandnqi:
4976 case X86::BI__builtin_ia32_kandnhi:
4977 case X86::BI__builtin_ia32_kandnsi:
4978 case X86::BI__builtin_ia32_kandndi:
4980 S, OpPC, Call,
4981 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
4982
4983 case X86::BI__builtin_ia32_korqi:
4984 case X86::BI__builtin_ia32_korhi:
4985 case X86::BI__builtin_ia32_korsi:
4986 case X86::BI__builtin_ia32_kordi:
4988 S, OpPC, Call,
4989 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
4990
4991 case X86::BI__builtin_ia32_kxnorqi:
4992 case X86::BI__builtin_ia32_kxnorhi:
4993 case X86::BI__builtin_ia32_kxnorsi:
4994 case X86::BI__builtin_ia32_kxnordi:
4996 S, OpPC, Call,
4997 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
4998
4999 case X86::BI__builtin_ia32_kxorqi:
5000 case X86::BI__builtin_ia32_kxorhi:
5001 case X86::BI__builtin_ia32_kxorsi:
5002 case X86::BI__builtin_ia32_kxordi:
5004 S, OpPC, Call,
5005 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
5006
5007 case X86::BI__builtin_ia32_knotqi:
5008 case X86::BI__builtin_ia32_knothi:
5009 case X86::BI__builtin_ia32_knotsi:
5010 case X86::BI__builtin_ia32_knotdi:
5012 S, OpPC, Call, [](const APSInt &Src) { return ~Src; });
5013
5014 case X86::BI__builtin_ia32_kaddqi:
5015 case X86::BI__builtin_ia32_kaddhi:
5016 case X86::BI__builtin_ia32_kaddsi:
5017 case X86::BI__builtin_ia32_kadddi:
5019 S, OpPC, Call,
5020 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
5021
5022 case X86::BI__builtin_ia32_kunpckhi:
5023 case X86::BI__builtin_ia32_kunpckdi:
5024 case X86::BI__builtin_ia32_kunpcksi:
5026 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
5027 // Generic kunpack: extract lower half of each operand and concatenate
5028 // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
5029 unsigned BW = A.getBitWidth();
5030 return APSInt(A.trunc(BW / 2).concat(B.trunc(BW / 2)),
5031 A.isUnsigned());
5032 });
5033
5034 case X86::BI__builtin_ia32_phminposuw128:
5035 return interp__builtin_ia32_phminposuw(S, OpPC, Call);
5036
5037 case X86::BI__builtin_ia32_psraq128:
5038 case X86::BI__builtin_ia32_psraq256:
5039 case X86::BI__builtin_ia32_psraq512:
5040 case X86::BI__builtin_ia32_psrad128:
5041 case X86::BI__builtin_ia32_psrad256:
5042 case X86::BI__builtin_ia32_psrad512:
5043 case X86::BI__builtin_ia32_psraw128:
5044 case X86::BI__builtin_ia32_psraw256:
5045 case X86::BI__builtin_ia32_psraw512:
5047 S, OpPC, Call,
5048 [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); },
5049 [](const APInt &Elt, unsigned Width) { return Elt.ashr(Width - 1); });
5050
5051 case X86::BI__builtin_ia32_psllq128:
5052 case X86::BI__builtin_ia32_psllq256:
5053 case X86::BI__builtin_ia32_psllq512:
5054 case X86::BI__builtin_ia32_pslld128:
5055 case X86::BI__builtin_ia32_pslld256:
5056 case X86::BI__builtin_ia32_pslld512:
5057 case X86::BI__builtin_ia32_psllw128:
5058 case X86::BI__builtin_ia32_psllw256:
5059 case X86::BI__builtin_ia32_psllw512:
5061 S, OpPC, Call,
5062 [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); },
5063 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5064
5065 case X86::BI__builtin_ia32_psrlq128:
5066 case X86::BI__builtin_ia32_psrlq256:
5067 case X86::BI__builtin_ia32_psrlq512:
5068 case X86::BI__builtin_ia32_psrld128:
5069 case X86::BI__builtin_ia32_psrld256:
5070 case X86::BI__builtin_ia32_psrld512:
5071 case X86::BI__builtin_ia32_psrlw128:
5072 case X86::BI__builtin_ia32_psrlw256:
5073 case X86::BI__builtin_ia32_psrlw512:
5075 S, OpPC, Call,
5076 [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); },
5077 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5078
5079 case X86::BI__builtin_ia32_pternlogd128_mask:
5080 case X86::BI__builtin_ia32_pternlogd256_mask:
5081 case X86::BI__builtin_ia32_pternlogd512_mask:
5082 case X86::BI__builtin_ia32_pternlogq128_mask:
5083 case X86::BI__builtin_ia32_pternlogq256_mask:
5084 case X86::BI__builtin_ia32_pternlogq512_mask:
5085 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/false);
5086 case X86::BI__builtin_ia32_pternlogd128_maskz:
5087 case X86::BI__builtin_ia32_pternlogd256_maskz:
5088 case X86::BI__builtin_ia32_pternlogd512_maskz:
5089 case X86::BI__builtin_ia32_pternlogq128_maskz:
5090 case X86::BI__builtin_ia32_pternlogq256_maskz:
5091 case X86::BI__builtin_ia32_pternlogq512_maskz:
5092 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/true);
5093 case Builtin::BI__builtin_elementwise_fshl:
5095 llvm::APIntOps::fshl);
5096 case Builtin::BI__builtin_elementwise_fshr:
5098 llvm::APIntOps::fshr);
5099
5100 case X86::BI__builtin_ia32_shuf_f32x4_256:
5101 case X86::BI__builtin_ia32_shuf_i32x4_256:
5102 case X86::BI__builtin_ia32_shuf_f64x2_256:
5103 case X86::BI__builtin_ia32_shuf_i64x2_256:
5104 case X86::BI__builtin_ia32_shuf_f32x4:
5105 case X86::BI__builtin_ia32_shuf_i32x4:
5106 case X86::BI__builtin_ia32_shuf_f64x2:
5107 case X86::BI__builtin_ia32_shuf_i64x2: {
5108 // Destination and sources A, B all have the same type.
5109 QualType VecQT = Call->getArg(0)->getType();
5110 const auto *VecT = VecQT->castAs<VectorType>();
5111 unsigned NumElems = VecT->getNumElements();
5112 unsigned ElemBits = S.getASTContext().getTypeSize(VecT->getElementType());
5113 unsigned LaneBits = 128u;
5114 unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
5115 unsigned NumElemsPerLane = LaneBits / ElemBits;
5116
5118 S, OpPC, Call,
5119 [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) {
5120 // DstIdx determines source. ShuffleMask selects lane in source.
5121 unsigned BitsPerElem = NumLanes / 2;
5122 unsigned IndexMask = (1u << BitsPerElem) - 1;
5123 unsigned Lane = DstIdx / NumElemsPerLane;
5124 unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
5125 unsigned BitIdx = BitsPerElem * Lane;
5126 unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
5127 unsigned ElemInLane = DstIdx % NumElemsPerLane;
5128 unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
5129 return std::pair<unsigned, int>{SrcIdx, IdxToPick};
5130 });
5131 }
5132
5133 case X86::BI__builtin_ia32_insertf32x4_256:
5134 case X86::BI__builtin_ia32_inserti32x4_256:
5135 case X86::BI__builtin_ia32_insertf64x2_256:
5136 case X86::BI__builtin_ia32_inserti64x2_256:
5137 case X86::BI__builtin_ia32_insertf32x4:
5138 case X86::BI__builtin_ia32_inserti32x4:
5139 case X86::BI__builtin_ia32_insertf64x2_512:
5140 case X86::BI__builtin_ia32_inserti64x2_512:
5141 case X86::BI__builtin_ia32_insertf32x8:
5142 case X86::BI__builtin_ia32_inserti32x8:
5143 case X86::BI__builtin_ia32_insertf64x4:
5144 case X86::BI__builtin_ia32_inserti64x4:
5145 case X86::BI__builtin_ia32_vinsertf128_ps256:
5146 case X86::BI__builtin_ia32_vinsertf128_pd256:
5147 case X86::BI__builtin_ia32_vinsertf128_si256:
5148 case X86::BI__builtin_ia32_insert128i256:
5149 return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
5150
5151 case clang::X86::BI__builtin_ia32_vcvtps2ph:
5152 case clang::X86::BI__builtin_ia32_vcvtps2ph256:
5153 return interp__builtin_ia32_vcvtps2ph(S, OpPC, Call);
5154
5155 case X86::BI__builtin_ia32_vec_ext_v4hi:
5156 case X86::BI__builtin_ia32_vec_ext_v16qi:
5157 case X86::BI__builtin_ia32_vec_ext_v8hi:
5158 case X86::BI__builtin_ia32_vec_ext_v4si:
5159 case X86::BI__builtin_ia32_vec_ext_v2di:
5160 case X86::BI__builtin_ia32_vec_ext_v32qi:
5161 case X86::BI__builtin_ia32_vec_ext_v16hi:
5162 case X86::BI__builtin_ia32_vec_ext_v8si:
5163 case X86::BI__builtin_ia32_vec_ext_v4di:
5164 case X86::BI__builtin_ia32_vec_ext_v4sf:
5165 return interp__builtin_vec_ext(S, OpPC, Call, BuiltinID);
5166
5167 case X86::BI__builtin_ia32_vec_set_v4hi:
5168 case X86::BI__builtin_ia32_vec_set_v16qi:
5169 case X86::BI__builtin_ia32_vec_set_v8hi:
5170 case X86::BI__builtin_ia32_vec_set_v4si:
5171 case X86::BI__builtin_ia32_vec_set_v2di:
5172 case X86::BI__builtin_ia32_vec_set_v32qi:
5173 case X86::BI__builtin_ia32_vec_set_v16hi:
5174 case X86::BI__builtin_ia32_vec_set_v8si:
5175 case X86::BI__builtin_ia32_vec_set_v4di:
5176 return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
5177
5178 case X86::BI__builtin_ia32_cvtb2mask128:
5179 case X86::BI__builtin_ia32_cvtb2mask256:
5180 case X86::BI__builtin_ia32_cvtb2mask512:
5181 case X86::BI__builtin_ia32_cvtw2mask128:
5182 case X86::BI__builtin_ia32_cvtw2mask256:
5183 case X86::BI__builtin_ia32_cvtw2mask512:
5184 case X86::BI__builtin_ia32_cvtd2mask128:
5185 case X86::BI__builtin_ia32_cvtd2mask256:
5186 case X86::BI__builtin_ia32_cvtd2mask512:
5187 case X86::BI__builtin_ia32_cvtq2mask128:
5188 case X86::BI__builtin_ia32_cvtq2mask256:
5189 case X86::BI__builtin_ia32_cvtq2mask512:
5190 return interp__builtin_ia32_cvt_vec2mask(S, OpPC, Call, BuiltinID);
5191
5192 case X86::BI__builtin_ia32_cmpb128_mask:
5193 case X86::BI__builtin_ia32_cmpw128_mask:
5194 case X86::BI__builtin_ia32_cmpd128_mask:
5195 case X86::BI__builtin_ia32_cmpq128_mask:
5196 case X86::BI__builtin_ia32_cmpb256_mask:
5197 case X86::BI__builtin_ia32_cmpw256_mask:
5198 case X86::BI__builtin_ia32_cmpd256_mask:
5199 case X86::BI__builtin_ia32_cmpq256_mask:
5200 case X86::BI__builtin_ia32_cmpb512_mask:
5201 case X86::BI__builtin_ia32_cmpw512_mask:
5202 case X86::BI__builtin_ia32_cmpd512_mask:
5203 case X86::BI__builtin_ia32_cmpq512_mask:
5204 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5205 /*IsUnsigned=*/false);
5206
5207 case X86::BI__builtin_ia32_ucmpb128_mask:
5208 case X86::BI__builtin_ia32_ucmpw128_mask:
5209 case X86::BI__builtin_ia32_ucmpd128_mask:
5210 case X86::BI__builtin_ia32_ucmpq128_mask:
5211 case X86::BI__builtin_ia32_ucmpb256_mask:
5212 case X86::BI__builtin_ia32_ucmpw256_mask:
5213 case X86::BI__builtin_ia32_ucmpd256_mask:
5214 case X86::BI__builtin_ia32_ucmpq256_mask:
5215 case X86::BI__builtin_ia32_ucmpb512_mask:
5216 case X86::BI__builtin_ia32_ucmpw512_mask:
5217 case X86::BI__builtin_ia32_ucmpd512_mask:
5218 case X86::BI__builtin_ia32_ucmpq512_mask:
5219 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5220 /*IsUnsigned=*/true);
5221
5222 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
5223 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
5224 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
5226
5227 case X86::BI__builtin_ia32_pslldqi128_byteshift:
5228 case X86::BI__builtin_ia32_pslldqi256_byteshift:
5229 case X86::BI__builtin_ia32_pslldqi512_byteshift:
5230 // These SLLDQ intrinsics always operate on byte elements (8 bits).
5231 // The lane width is hardcoded to 16 to match the SIMD register size,
5232 // but the algorithm processes one byte per iteration,
5233 // so APInt(8, ...) is correct and intentional.
5235 S, OpPC, Call,
5236 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5237 unsigned LaneBase = (DstIdx / 16) * 16;
5238 unsigned LaneIdx = DstIdx % 16;
5239 if (LaneIdx < Shift)
5240 return std::make_pair(0, -1);
5241
5242 return std::make_pair(0,
5243 static_cast<int>(LaneBase + LaneIdx - Shift));
5244 });
5245
5246 case X86::BI__builtin_ia32_psrldqi128_byteshift:
5247 case X86::BI__builtin_ia32_psrldqi256_byteshift:
5248 case X86::BI__builtin_ia32_psrldqi512_byteshift:
5249 // These SRLDQ intrinsics always operate on byte elements (8 bits).
5250 // The lane width is hardcoded to 16 to match the SIMD register size,
5251 // but the algorithm processes one byte per iteration,
5252 // so APInt(8, ...) is correct and intentional.
5254 S, OpPC, Call,
5255 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5256 unsigned LaneBase = (DstIdx / 16) * 16;
5257 unsigned LaneIdx = DstIdx % 16;
5258 if (LaneIdx + Shift < 16)
5259 return std::make_pair(0,
5260 static_cast<int>(LaneBase + LaneIdx + Shift));
5261
5262 return std::make_pair(0, -1);
5263 });
5264
5265 case X86::BI__builtin_ia32_palignr128:
5266 case X86::BI__builtin_ia32_palignr256:
5267 case X86::BI__builtin_ia32_palignr512:
5269 S, OpPC, Call, [](unsigned DstIdx, unsigned Shift) {
5270 // Default to -1 → zero-fill this destination element
5271 unsigned VecIdx = 1;
5272 int ElemIdx = -1;
5273
5274 int Lane = DstIdx / 16;
5275 int Offset = DstIdx % 16;
5276
5277 // Elements come from VecB first, then VecA after the shift boundary
5278 unsigned ShiftedIdx = Offset + (Shift & 0xFF);
5279 if (ShiftedIdx < 16) { // from VecB
5280 ElemIdx = ShiftedIdx + (Lane * 16);
5281 } else if (ShiftedIdx < 32) { // from VecA
5282 VecIdx = 0;
5283 ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
5284 }
5285
5286 return std::pair<unsigned, int>{VecIdx, ElemIdx};
5287 });
5288
5289 case X86::BI__builtin_ia32_alignd128:
5290 case X86::BI__builtin_ia32_alignd256:
5291 case X86::BI__builtin_ia32_alignd512:
5292 case X86::BI__builtin_ia32_alignq128:
5293 case X86::BI__builtin_ia32_alignq256:
5294 case X86::BI__builtin_ia32_alignq512: {
5295 unsigned NumElems = Call->getType()->castAs<VectorType>()->getNumElements();
5297 S, OpPC, Call, [NumElems](unsigned DstIdx, unsigned Shift) {
5298 unsigned Imm = Shift & 0xFF;
5299 unsigned EffectiveShift = Imm & (NumElems - 1);
5300 unsigned SourcePos = DstIdx + EffectiveShift;
5301 unsigned VecIdx = SourcePos < NumElems ? 1u : 0u;
5302 unsigned ElemIdx = SourcePos & (NumElems - 1);
5303 return std::pair<unsigned, int>{VecIdx, static_cast<int>(ElemIdx)};
5304 });
5305 }
5306
5307 default:
5308 S.FFDiag(S.Current->getLocation(OpPC),
5309 diag::note_invalid_subexpr_in_const_expr)
5310 << S.Current->getRange(OpPC);
5311
5312 return false;
5313 }
5314
5315 llvm_unreachable("Unhandled builtin ID");
5316}
5317
5319 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
5321 unsigned N = E->getNumComponents();
5322 assert(N > 0);
5323
5324 unsigned ArrayIndex = 0;
5325 QualType CurrentType = E->getTypeSourceInfo()->getType();
5326 for (unsigned I = 0; I != N; ++I) {
5327 const OffsetOfNode &Node = E->getComponent(I);
5328 switch (Node.getKind()) {
5329 case OffsetOfNode::Field: {
5330 const FieldDecl *MemberDecl = Node.getField();
5331 const auto *RD = CurrentType->getAsRecordDecl();
5332 if (!RD || RD->isInvalidDecl())
5333 return false;
5335 unsigned FieldIndex = MemberDecl->getFieldIndex();
5336 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
5337 Result +=
5339 CurrentType = MemberDecl->getType().getNonReferenceType();
5340 break;
5341 }
5342 case OffsetOfNode::Array: {
5343 // When generating bytecode, we put all the index expressions as Sint64 on
5344 // the stack.
5345 int64_t Index = ArrayIndices[ArrayIndex];
5346 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
5347 if (!AT)
5348 return false;
5349 CurrentType = AT->getElementType();
5350 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
5351 Result += Index * ElementSize;
5352 ++ArrayIndex;
5353 break;
5354 }
5355 case OffsetOfNode::Base: {
5356 const CXXBaseSpecifier *BaseSpec = Node.getBase();
5357 if (BaseSpec->isVirtual())
5358 return false;
5359
5360 // Find the layout of the class whose base we are looking into.
5361 const auto *RD = CurrentType->getAsCXXRecordDecl();
5362 if (!RD || RD->isInvalidDecl())
5363 return false;
5365
5366 // Find the base class itself.
5367 CurrentType = BaseSpec->getType();
5368 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
5369 if (!BaseRD)
5370 return false;
5371
5372 // Add the offset to the base.
5373 Result += RL.getBaseClassOffset(BaseRD);
5374 break;
5375 }
5377 llvm_unreachable("Dependent OffsetOfExpr?");
5378 }
5379 }
5380
5381 IntResult = Result.getQuantity();
5382
5383 return true;
5384}
5385
5387 const Pointer &Ptr, const APSInt &IntValue) {
5388
5389 const Record *R = Ptr.getRecord();
5390 assert(R);
5391 assert(R->getNumFields() == 1);
5392
5393 unsigned FieldOffset = R->getField(0u)->Offset;
5394 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
5395 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
5396
5397 INT_TYPE_SWITCH(FieldT,
5398 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
5399 FieldPtr.initialize();
5400 return true;
5401}
5402
5403static void zeroAll(Pointer &Dest) {
5404 const Descriptor *Desc = Dest.getFieldDesc();
5405
5406 if (Desc->isPrimitive()) {
5407 TYPE_SWITCH(Desc->getPrimType(), {
5408 Dest.deref<T>().~T();
5409 new (&Dest.deref<T>()) T();
5410 });
5411 return;
5412 }
5413
5414 if (Desc->isRecord()) {
5415 const Record *R = Desc->ElemRecord;
5416 for (const Record::Field &F : R->fields()) {
5417 Pointer FieldPtr = Dest.atField(F.Offset);
5418 zeroAll(FieldPtr);
5419 }
5420 return;
5421 }
5422
5423 if (Desc->isPrimitiveArray()) {
5424 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5425 TYPE_SWITCH(Desc->getPrimType(), {
5426 Dest.deref<T>().~T();
5427 new (&Dest.deref<T>()) T();
5428 });
5429 }
5430 return;
5431 }
5432
5433 if (Desc->isCompositeArray()) {
5434 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5435 Pointer ElemPtr = Dest.atIndex(I).narrow();
5436 zeroAll(ElemPtr);
5437 }
5438 return;
5439 }
5440}
5441
5442static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5443 Pointer &Dest, bool Activate);
5444static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
5445 Pointer &Dest, bool Activate = false) {
5446 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5447 const Descriptor *DestDesc = Dest.getFieldDesc();
5448
5449 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
5450 Pointer DestField = Dest.atField(F.Offset);
5451 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
5452 TYPE_SWITCH(*FT, {
5453 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
5454 if (Src.atField(F.Offset).isInitialized())
5455 DestField.initialize();
5456 if (Activate)
5457 DestField.activate();
5458 });
5459 return true;
5460 }
5461 // Composite field.
5462 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
5463 };
5464
5465 assert(SrcDesc->isRecord());
5466 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
5467 const Record *R = DestDesc->ElemRecord;
5468 for (const Record::Field &F : R->fields()) {
5469 if (R->isUnion()) {
5470 // For unions, only copy the active field. Zero all others.
5471 const Pointer &SrcField = Src.atField(F.Offset);
5472 if (SrcField.isActive()) {
5473 if (!copyField(F, /*Activate=*/true))
5474 return false;
5475 } else {
5476 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
5477 return false;
5478 Pointer DestField = Dest.atField(F.Offset);
5479 zeroAll(DestField);
5480 }
5481 } else {
5482 if (!copyField(F, Activate))
5483 return false;
5484 }
5485 }
5486
5487 for (const Record::Base &B : R->bases()) {
5488 Pointer DestBase = Dest.atField(B.Offset);
5489 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
5490 return false;
5491 }
5492
5493 Dest.initialize();
5494 return true;
5495}
5496
5497static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5498 Pointer &Dest, bool Activate = false) {
5499 assert(Src.isLive() && Dest.isLive());
5500
5501 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5502 const Descriptor *DestDesc = Dest.getFieldDesc();
5503
5504 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
5505
5506 if (DestDesc->isPrimitiveArray()) {
5507 assert(SrcDesc->isPrimitiveArray());
5508 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5509 PrimType ET = DestDesc->getPrimType();
5510 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5511 Pointer DestElem = Dest.atIndex(I);
5512 TYPE_SWITCH(ET, {
5513 DestElem.deref<T>() = Src.elem<T>(I);
5514 DestElem.initialize();
5515 });
5516 }
5517 return true;
5518 }
5519
5520 if (DestDesc->isCompositeArray()) {
5521 assert(SrcDesc->isCompositeArray());
5522 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5523 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5524 const Pointer &SrcElem = Src.atIndex(I).narrow();
5525 Pointer DestElem = Dest.atIndex(I).narrow();
5526 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
5527 return false;
5528 }
5529 return true;
5530 }
5531
5532 if (DestDesc->isRecord())
5533 return copyRecord(S, OpPC, Src, Dest, Activate);
5534 return Invalid(S, OpPC);
5535}
5536
5537bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
5538 return copyComposite(S, OpPC, Src, Dest);
5539}
5540
5541} // namespace interp
5542} // namespace clang
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
Definition Compiler.cpp:24
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
TokenType getType() const
Returns the token's type, e.g.
#define X(type, name)
Definition Value.h:97
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition PrimType.h:251
#define INT_TYPE_SWITCH(Expr, B)
Definition PrimType.h:232
#define TYPE_SWITCH(Expr, B)
Definition PrimType.h:211
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
CharUnits & getLValueOffset()
Definition APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition ASTContext.h:774
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:926
CanQualType CharTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
CanQualType HalfTy
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition Builtins.h:448
Represents a base class of a C++ class.
Definition DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition Type.cpp:254
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
Represents a function declaration or definition.
Definition Decl.h:2000
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
std::optional< llvm::AllocTokenMode > AllocTokenMode
The allocation token mode.
std::optional< uint64_t > AllocTokenMax
Maximum number of allocation tokens (0 = target SIZE_MAX), nullopt if none set (use target SIZE_MAX).
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
Helper class for OffsetOfExpr.
Definition Expr.h:2421
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8463
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition TargetInfo.h:856
bool isBigEndian() const
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8260
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isPointerType() const
Definition TypeBase.h:8515
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2435
bool isVectorType() const
Definition TypeBase.h:8654
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
QualType getType() const
Definition Decl.h:723
Represents a GCC generic vector type.
Definition TypeBase.h:4175
unsigned getNumElements() const
Definition TypeBase.h:4190
QualType getElementType() const
Definition TypeBase.h:4189
A memory block, either on the stack or in the heap.
Definition InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition InterpBlock.h:73
bool isDynamic() const
Definition InterpBlock.h:83
Wrapper around boolean types.
Definition Boolean.h:25
static Boolean from(T Value)
Definition Boolean.h:97
Pointer into the code segment.
Definition Source.h:30
const LangOptions & getLangOpts() const
Returns the language options.
Definition Context.cpp:328
OptPrimType classify(QualType T) const
Classifies a type.
Definition Context.cpp:362
unsigned getEvalID() const
Definition Context.h:147
Manages dynamic memory allocations done during bytecode interpretation.
bool deallocate(const Expr *Source, const Block *BlockToDelete, InterpState &S)
Deallocate the given source+block combination.
std::optional< Form > getAllocationForm(const Expr *Source) const
Checks whether the allocation done at the given source is an array allocation.
Block * allocate(const Descriptor *D, unsigned EvalID, Form AllocForm)
Allocate ONE element of the given descriptor.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition Floating.h:35
void copy(const APFloat &F)
Definition Floating.h:123
llvm::FPClassTest classify() const
Definition Floating.h:154
bool isSignaling() const
Definition Floating.h:149
bool isNormal() const
Definition Floating.h:152
ComparisonCategoryResult compare(const Floating &RHS) const
Definition Floating.h:157
bool isZero() const
Definition Floating.h:144
bool isNegative() const
Definition Floating.h:143
bool isFinite() const
Definition Floating.h:151
bool isDenormal() const
Definition Floating.h:153
APFloat::fltCategory getCategory() const
Definition Floating.h:155
APFloat getAPFloat() const
Definition Floating.h:64
Base class for stack frames, shared between VM and walker.
Definition Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition IntegralAP.h:36
Frame storing local variables.
Definition InterpFrame.h:27
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition InterpFrame.h:30
SourceInfo getSource(CodePtr PC) const
Map a location to a source.
CodePtr getRetPC() const
Returns the return address of the frame.
SourceLocation getLocation(CodePtr PC) const
SourceRange getRange(CodePtr PC) const
unsigned getDepth() const
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition InterpStack.h:39
void push(Tys &&...Args)
Constructs a value in place on the top of the stack.
Definition InterpStack.h:33
void discard()
Discards the top value from the stack.
Definition InterpStack.h:50
T & peek() const
Returns a reference to the value on the top of the stack.
Definition InterpStack.h:62
Interpreter context.
Definition InterpState.h:43
Expr::EvalStatus & getEvalStatus() const override
Definition InterpState.h:67
Context & getContext() const
DynamicAllocator & getAllocator()
Context & Ctx
Interpreter Context.
Floating allocFloat(const llvm::fltSemantics &Sem)
llvm::SmallVector< const Block * > InitializingBlocks
List of blocks we're currently running either constructors or destructors for.
ASTContext & getASTContext() const override
Definition InterpState.h:70
InterpStack & Stk
Temporary stack.
const VarDecl * EvaluatingDecl
Declaration we're initializing/evaluting, if any.
InterpFrame * Current
The current frame.
T allocAP(unsigned BitWidth)
const LangOptions & getLangOpts() const
Definition InterpState.h:71
StdAllocatorCaller getStdAllocatorCaller(StringRef Name) const
Program & P
Reference to the module containing all bytecode.
PrimType value_or(PrimType PT) const
Definition PrimType.h:68
A pointer to a memory block, live or dead.
Definition Pointer.h:92
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition Pointer.h:189
bool isInitialized() const
Checks if an object was initialized.
Definition Pointer.cpp:441
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition Pointer.h:157
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition Pointer.h:552
int64_t getIndex() const
Returns the index into an array.
Definition Pointer.h:617
bool isActive() const
Checks if the object is active.
Definition Pointer.h:541
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition Pointer.h:174
T & deref() const
Dereferences the pointer, if it's live.
Definition Pointer.h:668
unsigned getNumElems() const
Returns the number of elements.
Definition Pointer.h:601
Pointer getArray() const
Returns the parent array.
Definition Pointer.h:321
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition Pointer.h:420
void activate() const
Activats a field.
Definition Pointer.cpp:577
bool isIntegralPointer() const
Definition Pointer.h:474
QualType getType() const
Returns the type of the innermost field.
Definition Pointer.h:341
bool isArrayElement() const
Checks if the pointer points to an array.
Definition Pointer.h:426
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition Pointer.cpp:546
bool isLive() const
Checks if the pointer is live.
Definition Pointer.h:273
bool inArray() const
Checks if the innermost field is an array.
Definition Pointer.h:402
T & elem(unsigned I) const
Dereferences the element at index I.
Definition Pointer.h:684
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition Pointer.h:312
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition Pointer.cpp:428
bool isZero() const
Checks if the pointer is null.
Definition Pointer.h:259
bool isRoot() const
Pointer points directly to a block.
Definition Pointer.h:442
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition Pointer.h:287
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition Pointer.cpp:653
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition Pointer.cpp:172
bool isOnePastEnd() const
Checks if the index is one past end.
Definition Pointer.h:634
uint64_t getIntegerRepresentation() const
Definition Pointer.h:144
const FieldDecl * getField() const
Returns the field information.
Definition Pointer.h:486
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition Pointer.h:224
bool isBlockPointer() const
Definition Pointer.h:473
const Block * block() const
Definition Pointer.h:607
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition Pointer.h:331
bool isVirtualBaseClass() const
Definition Pointer.h:548
bool isBaseClass() const
Checks if a structure is a base class.
Definition Pointer.h:547
size_t elemSize() const
Returns the element size of the innermost field.
Definition Pointer.h:363
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition Pointer.h:449
Lifetime getLifetime() const
Definition Pointer.h:729
void initialize() const
Initializes a field.
Definition Pointer.cpp:494
bool isField() const
Checks if the item is a field in an object.
Definition Pointer.h:279
const Record * getRecord() const
Returns the record descriptor of a class.
Definition Pointer.h:479
Descriptor * createDescriptor(const DeclTy &D, PrimType T, const Type *SourceTy=nullptr, Descriptor::MetadataSize MDSize=std::nullopt, bool IsConst=false, bool IsTemporary=false, bool IsMutable=false, bool IsVolatile=false)
Creates a descriptor for a primitive type.
Definition Program.h:119
Structure/Class descriptor.
Definition Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition Record.cpp:47
llvm::iterator_range< const_base_iter > bases() const
Definition Record.h:92
unsigned getNumFields() const
Definition Record.h:88
llvm::iterator_range< const_field_iter > fields() const
Definition Record.h:84
Describes the statement/declaration an opcode was generated from.
Definition Source.h:73
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId)
Add a note to a prior diagnostic.
Definition State.cpp:63
DiagnosticBuilder report(SourceLocation Loc, diag::kind DiagId)
Directly reports a diagnostic message.
Definition State.cpp:74
OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation could not be folded (FF => FoldFailure)
Definition State.cpp:21
OptionalDiagnostic CCEDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation does not produce a C++11 core constant expression.
Definition State.cpp:42
bool checkingPotentialConstantExpression() const
Are we checking whether the expression is a potential constant expression?
Definition State.h:99
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
static bool isNoopBuiltin(unsigned ID)
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shuffle_generic(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< std::pair< unsigned, int >(unsigned, unsigned)> GetSourceIndex)
static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_triop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition Interp.cpp:1117
static bool interp__builtin_ia32_shift_with_count(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APInt &, uint64_t)> ShiftOp, llvm::function_ref< APInt(const APInt &, unsigned)> OverflowOp)
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static llvm::RoundingMode getRoundingMode(FPOptions FPO)
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
bool Call(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize)
Definition Interp.cpp:1588
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_blend(InterpState &S, CodePtr OpPC, const CallExpr *Call)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}...
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition Interp.cpp:594
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_ia32_addsub(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool Activate(InterpState &S, CodePtr OpPC)
Definition Interp.h:1964
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition Interp.cpp:793
static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID, bool IsUnsigned)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_ext(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_test_op(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< bool(const APInt &A, const APInt &B)> Fn)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &IntResult)
Interpret an offsetof operation.
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition Interp.cpp:519
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
llvm::APFloat APFloat
Definition Floating.h:27
static void discard(InterpStack &Stk, PrimType T)
static bool interp__builtin_select_scalar(InterpState &S, const CallExpr *Call)
Scalar variant of AVX512 predicated select: Result[i] = (Mask bit 0) ?
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition Interp.cpp:414
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
llvm::APInt APInt
Definition FixedPoint.h:19
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate=false)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_x86_extract_vector_masked(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
PrimType
Enumeration of the primitive types of the VM.
Definition PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B, bool IsUnsigned)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition Interp.cpp:1168
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ia32_cvt_vec2mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, llvm::function_ref< APInt(const APSInt &)> PackFn)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition Interp.cpp:406
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool Error(InterpState &S, CodePtr OpPC)
Do nothing and just abort execution.
Definition Interp.h:3291
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp_builtin_horizontal_fp_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_elementwise_triop_fp(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static bool interp__builtin_ia32_vcvtps2ph(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ia32_multishiftqb(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shufbitqmb_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_elementwise_int_unaryop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &)> Fn)
constexpr bool isIntegralType(PrimType T)
Definition PrimType.h:128
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_builtin_horizontal_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
llvm::APSInt APSInt
Definition FixedPoint.h:20
static QualType getElemType(const Pointer &P)
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool MaskZ)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ AK_Read
Definition State.h:27
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
SmallVectorImpl< PartialDiagnosticAt > * Diag
Diag - If this is non-null, it will be filled in with a stack of notes indicating why evaluation fail...
Definition Expr.h:633
Track what bits have been initialized to known values and which ones have indeterminate value.
T deref(Bytes Offset) const
Dereferences the value at the given offset.
std::unique_ptr< std::byte[]> Data
A quantity in bits.
A quantity in bytes.
size_t getQuantity() const
Describes a memory block created by an allocation site.
Definition Descriptor.h:122
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition Descriptor.h:263
QualType getElemQualType() const
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition Descriptor.h:148
QualType getType() const
const Decl * asDecl() const
Definition Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition Descriptor.h:144
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition Descriptor.h:254
const VarDecl * asVarDecl() const
Definition Descriptor.h:218
PrimType getPrimType() const
Definition Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition Descriptor.h:153
const Expr * asExpr() const
Definition Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition Descriptor.h:266
Mapping from primitive types to their representation.
Definition PrimType.h:138