clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
9#include "Boolean.h"
10#include "EvalEmitter.h"
12#include "InterpHelpers.h"
13#include "PrimType.h"
14#include "Program.h"
16#include "clang/AST/OSLog.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/Support/AllocToken.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/SipHash.h"
25
26namespace clang {
27namespace interp {
28
29[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
30 switch (ID) {
31 case Builtin::BIas_const:
32 case Builtin::BIforward:
33 case Builtin::BIforward_like:
34 case Builtin::BImove:
35 case Builtin::BImove_if_noexcept:
36 case Builtin::BIaddressof:
37 case Builtin::BI__addressof:
38 case Builtin::BI__builtin_addressof:
39 case Builtin::BI__builtin_launder:
40 return true;
41 default:
42 return false;
43 }
44 return false;
45}
46
47static void discard(InterpStack &Stk, PrimType T) {
48 TYPE_SWITCH(T, { Stk.discard<T>(); });
49}
50
51static uint64_t popToUInt64(const InterpState &S, const Expr *E) {
53 return static_cast<uint64_t>(S.Stk.pop<T>()));
54}
55
57 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
58}
59
60static APSInt popToAPSInt(InterpState &S, const Expr *E) {
61 return popToAPSInt(S.Stk, *S.getContext().classify(E->getType()));
62}
64 return popToAPSInt(S.Stk, *S.getContext().classify(T));
65}
66
67/// Pushes \p Val on the stack as the type given by \p QT.
68static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
72 assert(T);
73
74 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
75
76 if (T == PT_IntAPS) {
77 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
78 Result.copy(Val);
80 return;
81 }
82
83 if (T == PT_IntAP) {
84 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
85 Result.copy(Val);
87 return;
88 }
89
91 int64_t V = Val.getSExtValue();
92 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
93 } else {
95 uint64_t V = Val.getZExtValue();
96 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
97 }
98}
99
100template <typename T>
101static void pushInteger(InterpState &S, T Val, QualType QT) {
102 if constexpr (std::is_same_v<T, APInt>)
103 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
104 else if constexpr (std::is_same_v<T, APSInt>)
105 pushInteger(S, Val, QT);
106 else
107 pushInteger(S,
108 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
109 std::is_signed_v<T>),
110 !std::is_signed_v<T>),
111 QT);
112}
113
114static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
115 const APSInt &Value) {
116
117 if (ValueT == PT_IntAPS) {
118 Dest.deref<IntegralAP<true>>() =
119 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
120 Dest.deref<IntegralAP<true>>().copy(Value);
121 } else if (ValueT == PT_IntAP) {
122 Dest.deref<IntegralAP<false>>() =
123 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
124 Dest.deref<IntegralAP<false>>().copy(Value);
125 } else {
127 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
128 }
129}
130
131static QualType getElemType(const Pointer &P) {
132 const Descriptor *Desc = P.getFieldDesc();
133 QualType T = Desc->getType();
134 if (Desc->isPrimitive())
135 return T;
136 if (T->isPointerType())
137 return T->getAs<PointerType>()->getPointeeType();
138 if (Desc->isArray())
139 return Desc->getElemQualType();
140 if (const auto *AT = T->getAsArrayTypeUnsafe())
141 return AT->getElementType();
142 return T;
143}
144
146 unsigned ID) {
147 if (!S.diagnosing())
148 return;
149
150 auto Loc = S.Current->getSource(OpPC);
151 if (S.getLangOpts().CPlusPlus11)
152 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
153 << /*isConstexpr=*/0 << /*isConstructor=*/0
155 else
156 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
157}
158
159static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
160 assert(Val.getFieldDesc()->isPrimitiveArray() &&
162 "Not a boolean vector");
163 unsigned NumElems = Val.getNumElems();
164
165 // Each element is one bit, so create an integer with NumElts bits.
166 llvm::APSInt Result(NumElems, 0);
167 for (unsigned I = 0; I != NumElems; ++I) {
168 if (Val.elem<bool>(I))
169 Result.setBit(I);
170 }
171
172 return Result;
173}
174
175// Strict double -> float conversion used for X86 PD2PS/cvtsd2ss intrinsics.
176// Reject NaN/Inf/Subnormal inputs and any lossy/inexact conversions.
178 InterpState &S, const Expr *DiagExpr) {
179 if (Src.isInfinity()) {
180 if (S.diagnosing())
181 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 0;
182 return false;
183 }
184 if (Src.isNaN()) {
185 if (S.diagnosing())
186 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 1;
187 return false;
188 }
189 APFloat Val = Src;
190 bool LosesInfo = false;
191 APFloat::opStatus Status = Val.convert(
192 APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &LosesInfo);
193 if (LosesInfo || Val.isDenormal()) {
194 if (S.diagnosing())
195 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic_strict);
196 return false;
197 }
198 if (Status != APFloat::opOK) {
199 if (S.diagnosing())
200 S.CCEDiag(DiagExpr, diag::note_invalid_subexpr_in_const_expr);
201 return false;
202 }
203 Dst.copy(Val);
204 return true;
205}
206
208 const InterpFrame *Frame,
209 const CallExpr *Call) {
210 unsigned Depth = S.Current->getDepth();
211 auto isStdCall = [](const FunctionDecl *F) -> bool {
212 return F && F->isInStdNamespace() && F->getIdentifier() &&
213 F->getIdentifier()->isStr("is_constant_evaluated");
214 };
215 const InterpFrame *Caller = Frame->Caller;
216 // The current frame is the one for __builtin_is_constant_evaluated.
217 // The one above that, potentially the one for std::is_constant_evaluated().
219 S.getEvalStatus().Diag &&
220 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
221 if (Caller && isStdCall(Frame->getCallee())) {
222 const Expr *E = Caller->getExpr(Caller->getRetPC());
223 S.report(E->getExprLoc(),
224 diag::warn_is_constant_evaluated_always_true_constexpr)
225 << "std::is_constant_evaluated" << E->getSourceRange();
226 } else {
227 S.report(Call->getExprLoc(),
228 diag::warn_is_constant_evaluated_always_true_constexpr)
229 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
230 }
231 }
232
234 return true;
235}
236
237// __builtin_assume(int)
239 const InterpFrame *Frame,
240 const CallExpr *Call) {
241 assert(Call->getNumArgs() == 1);
242 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
243 return true;
244}
245
247 const InterpFrame *Frame,
248 const CallExpr *Call, unsigned ID) {
249 uint64_t Limit = ~static_cast<uint64_t>(0);
250 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
251 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
252 Limit = popToUInt64(S, Call->getArg(2));
253
254 const Pointer &B = S.Stk.pop<Pointer>();
255 const Pointer &A = S.Stk.pop<Pointer>();
256 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
257 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
258 diagnoseNonConstexprBuiltin(S, OpPC, ID);
259
260 if (Limit == 0) {
261 pushInteger(S, 0, Call->getType());
262 return true;
263 }
264
265 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
266 return false;
267
268 if (A.isDummy() || B.isDummy())
269 return false;
270 if (!A.isBlockPointer() || !B.isBlockPointer())
271 return false;
272
273 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
274 ID == Builtin::BI__builtin_wcscmp ||
275 ID == Builtin::BI__builtin_wcsncmp;
276 assert(A.getFieldDesc()->isPrimitiveArray());
277 assert(B.getFieldDesc()->isPrimitiveArray());
278
279 // Different element types shouldn't happen, but with casts they can.
281 return false;
282
283 PrimType ElemT = *S.getContext().classify(getElemType(A));
284
285 auto returnResult = [&](int V) -> bool {
286 pushInteger(S, V, Call->getType());
287 return true;
288 };
289
290 unsigned IndexA = A.getIndex();
291 unsigned IndexB = B.getIndex();
292 uint64_t Steps = 0;
293 for (;; ++IndexA, ++IndexB, ++Steps) {
294
295 if (Steps >= Limit)
296 break;
297 const Pointer &PA = A.atIndex(IndexA);
298 const Pointer &PB = B.atIndex(IndexB);
299 if (!CheckRange(S, OpPC, PA, AK_Read) ||
300 !CheckRange(S, OpPC, PB, AK_Read)) {
301 return false;
302 }
303
304 if (IsWide) {
305 INT_TYPE_SWITCH(ElemT, {
306 T CA = PA.deref<T>();
307 T CB = PB.deref<T>();
308 if (CA > CB)
309 return returnResult(1);
310 if (CA < CB)
311 return returnResult(-1);
312 if (CA.isZero() || CB.isZero())
313 return returnResult(0);
314 });
315 continue;
316 }
317
318 uint8_t CA = PA.deref<uint8_t>();
319 uint8_t CB = PB.deref<uint8_t>();
320
321 if (CA > CB)
322 return returnResult(1);
323 if (CA < CB)
324 return returnResult(-1);
325 if (CA == 0 || CB == 0)
326 return returnResult(0);
327 }
328
329 return returnResult(0);
330}
331
333 const InterpFrame *Frame,
334 const CallExpr *Call, unsigned ID) {
335 const Pointer &StrPtr = S.Stk.pop<Pointer>().expand();
336
337 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
338 diagnoseNonConstexprBuiltin(S, OpPC, ID);
339
340 if (!CheckArray(S, OpPC, StrPtr))
341 return false;
342
343 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
344 return false;
345
346 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
347 return false;
348
349 if (!StrPtr.getFieldDesc()->isPrimitiveArray())
350 return false;
351
352 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
353 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
354
355 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
356 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
357 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
358 }
359
360 size_t Len = 0;
361 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
362 const Pointer &ElemPtr = StrPtr.atIndex(I);
363
364 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
365 return false;
366
367 uint32_t Val;
368 switch (ElemSize) {
369 case 1:
370 Val = ElemPtr.deref<uint8_t>();
371 break;
372 case 2:
373 Val = ElemPtr.deref<uint16_t>();
374 break;
375 case 4:
376 Val = ElemPtr.deref<uint32_t>();
377 break;
378 default:
379 llvm_unreachable("Unsupported char size");
380 }
381 if (Val == 0)
382 break;
383 }
384
385 pushInteger(S, Len, Call->getType());
386
387 return true;
388}
389
391 const InterpFrame *Frame, const CallExpr *Call,
392 bool Signaling) {
393 const Pointer &Arg = S.Stk.pop<Pointer>();
394
395 if (!CheckLoad(S, OpPC, Arg))
396 return false;
397
398 assert(Arg.getFieldDesc()->isPrimitiveArray());
399
400 // Convert the given string to an integer using StringRef's API.
401 llvm::APInt Fill;
402 std::string Str;
403 assert(Arg.getNumElems() >= 1);
404 for (unsigned I = 0;; ++I) {
405 const Pointer &Elem = Arg.atIndex(I);
406
407 if (!CheckLoad(S, OpPC, Elem))
408 return false;
409
410 if (Elem.deref<int8_t>() == 0)
411 break;
412
413 Str += Elem.deref<char>();
414 }
415
416 // Treat empty strings as if they were zero.
417 if (Str.empty())
418 Fill = llvm::APInt(32, 0);
419 else if (StringRef(Str).getAsInteger(0, Fill))
420 return false;
421
422 const llvm::fltSemantics &TargetSemantics =
424 Call->getDirectCallee()->getReturnType());
425
426 Floating Result = S.allocFloat(TargetSemantics);
428 if (Signaling)
429 Result.copy(
430 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
431 else
432 Result.copy(
433 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
434 } else {
435 // Prior to IEEE 754-2008, architectures were allowed to choose whether
436 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
437 // a different encoding to what became a standard in 2008, and for pre-
438 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
439 // sNaN. This is now known as "legacy NaN" encoding.
440 if (Signaling)
441 Result.copy(
442 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
443 else
444 Result.copy(
445 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
446 }
447
449 return true;
450}
451
453 const InterpFrame *Frame,
454 const CallExpr *Call) {
455 const llvm::fltSemantics &TargetSemantics =
457 Call->getDirectCallee()->getReturnType());
458
459 Floating Result = S.allocFloat(TargetSemantics);
460 Result.copy(APFloat::getInf(TargetSemantics));
462 return true;
463}
464
466 const InterpFrame *Frame) {
467 const Floating &Arg2 = S.Stk.pop<Floating>();
468 const Floating &Arg1 = S.Stk.pop<Floating>();
469 Floating Result = S.allocFloat(Arg1.getSemantics());
470
471 APFloat Copy = Arg1.getAPFloat();
472 Copy.copySign(Arg2.getAPFloat());
473 Result.copy(Copy);
475
476 return true;
477}
478
480 const InterpFrame *Frame, bool IsNumBuiltin) {
481 const Floating &RHS = S.Stk.pop<Floating>();
482 const Floating &LHS = S.Stk.pop<Floating>();
483 Floating Result = S.allocFloat(LHS.getSemantics());
484
485 if (IsNumBuiltin)
486 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
487 else
488 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
490 return true;
491}
492
494 const InterpFrame *Frame, bool IsNumBuiltin) {
495 const Floating &RHS = S.Stk.pop<Floating>();
496 const Floating &LHS = S.Stk.pop<Floating>();
497 Floating Result = S.allocFloat(LHS.getSemantics());
498
499 if (IsNumBuiltin)
500 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
501 else
502 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
504 return true;
505}
506
507/// Defined as __builtin_isnan(...), to accommodate the fact that it can
508/// take a float, double, long double, etc.
509/// But for us, that's all a Floating anyway.
511 const InterpFrame *Frame,
512 const CallExpr *Call) {
513 const Floating &Arg = S.Stk.pop<Floating>();
514
515 pushInteger(S, Arg.isNan(), Call->getType());
516 return true;
517}
518
520 const InterpFrame *Frame,
521 const CallExpr *Call) {
522 const Floating &Arg = S.Stk.pop<Floating>();
523
524 pushInteger(S, Arg.isSignaling(), Call->getType());
525 return true;
526}
527
529 const InterpFrame *Frame, bool CheckSign,
530 const CallExpr *Call) {
531 const Floating &Arg = S.Stk.pop<Floating>();
532 APFloat F = Arg.getAPFloat();
533 bool IsInf = F.isInfinity();
534
535 if (CheckSign)
536 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
537 else
538 pushInteger(S, IsInf, Call->getType());
539 return true;
540}
541
543 const InterpFrame *Frame,
544 const CallExpr *Call) {
545 const Floating &Arg = S.Stk.pop<Floating>();
546
547 pushInteger(S, Arg.isFinite(), Call->getType());
548 return true;
549}
550
552 const InterpFrame *Frame,
553 const CallExpr *Call) {
554 const Floating &Arg = S.Stk.pop<Floating>();
555
556 pushInteger(S, Arg.isNormal(), Call->getType());
557 return true;
558}
559
561 const InterpFrame *Frame,
562 const CallExpr *Call) {
563 const Floating &Arg = S.Stk.pop<Floating>();
564
565 pushInteger(S, Arg.isDenormal(), Call->getType());
566 return true;
567}
568
570 const InterpFrame *Frame,
571 const CallExpr *Call) {
572 const Floating &Arg = S.Stk.pop<Floating>();
573
574 pushInteger(S, Arg.isZero(), Call->getType());
575 return true;
576}
577
579 const InterpFrame *Frame,
580 const CallExpr *Call) {
581 const Floating &Arg = S.Stk.pop<Floating>();
582
583 pushInteger(S, Arg.isNegative(), Call->getType());
584 return true;
585}
586
588 const CallExpr *Call, unsigned ID) {
589 const Floating &RHS = S.Stk.pop<Floating>();
590 const Floating &LHS = S.Stk.pop<Floating>();
591
593 S,
594 [&] {
595 switch (ID) {
596 case Builtin::BI__builtin_isgreater:
597 return LHS > RHS;
598 case Builtin::BI__builtin_isgreaterequal:
599 return LHS >= RHS;
600 case Builtin::BI__builtin_isless:
601 return LHS < RHS;
602 case Builtin::BI__builtin_islessequal:
603 return LHS <= RHS;
604 case Builtin::BI__builtin_islessgreater: {
605 ComparisonCategoryResult Cmp = LHS.compare(RHS);
606 return Cmp == ComparisonCategoryResult::Less ||
608 }
609 case Builtin::BI__builtin_isunordered:
611 default:
612 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
613 "comparison function");
614 }
615 }(),
616 Call->getType());
617 return true;
618}
619
620/// First parameter to __builtin_isfpclass is the floating value, the
621/// second one is an integral value.
623 const InterpFrame *Frame,
624 const CallExpr *Call) {
625 APSInt FPClassArg = popToAPSInt(S, Call->getArg(1));
626 const Floating &F = S.Stk.pop<Floating>();
627
628 int32_t Result = static_cast<int32_t>(
629 (F.classify() & std::move(FPClassArg)).getZExtValue());
630 pushInteger(S, Result, Call->getType());
631
632 return true;
633}
634
635/// Five int values followed by one floating value.
636/// __builtin_fpclassify(int, int, int, int, int, float)
638 const InterpFrame *Frame,
639 const CallExpr *Call) {
640 const Floating &Val = S.Stk.pop<Floating>();
641
642 PrimType IntT = *S.getContext().classify(Call->getArg(0));
643 APSInt Values[5];
644 for (unsigned I = 0; I != 5; ++I)
645 Values[4 - I] = popToAPSInt(S.Stk, IntT);
646
647 unsigned Index;
648 switch (Val.getCategory()) {
649 case APFloat::fcNaN:
650 Index = 0;
651 break;
652 case APFloat::fcInfinity:
653 Index = 1;
654 break;
655 case APFloat::fcNormal:
656 Index = Val.isDenormal() ? 3 : 2;
657 break;
658 case APFloat::fcZero:
659 Index = 4;
660 break;
661 }
662
663 // The last argument is first on the stack.
664 assert(Index <= 4);
665
666 pushInteger(S, Values[Index], Call->getType());
667 return true;
668}
669
670static inline Floating abs(InterpState &S, const Floating &In) {
671 if (!In.isNegative())
672 return In;
673
674 Floating Output = S.allocFloat(In.getSemantics());
675 APFloat New = In.getAPFloat();
676 New.changeSign();
677 Output.copy(New);
678 return Output;
679}
680
681// The C standard says "fabs raises no floating-point exceptions,
682// even if x is a signaling NaN. The returned value is independent of
683// the current rounding direction mode." Therefore constant folding can
684// proceed without regard to the floating point settings.
685// Reference, WG14 N2478 F.10.4.3
687 const InterpFrame *Frame) {
688 const Floating &Val = S.Stk.pop<Floating>();
689 S.Stk.push<Floating>(abs(S, Val));
690 return true;
691}
692
694 const InterpFrame *Frame,
695 const CallExpr *Call) {
696 APSInt Val = popToAPSInt(S, Call->getArg(0));
697 if (Val ==
698 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
699 return false;
700 if (Val.isNegative())
701 Val.negate();
702 pushInteger(S, Val, Call->getType());
703 return true;
704}
705
707 const InterpFrame *Frame,
708 const CallExpr *Call) {
709 APSInt Val;
710 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
711 const Pointer &Arg = S.Stk.pop<Pointer>();
712 Val = convertBoolVectorToInt(Arg);
713 } else {
714 Val = popToAPSInt(S, Call->getArg(0));
715 }
716 pushInteger(S, Val.popcount(), Call->getType());
717 return true;
718}
719
721 const InterpFrame *Frame,
722 const CallExpr *Call) {
723 // This is an unevaluated call, so there are no arguments on the stack.
724 assert(Call->getNumArgs() == 1);
725 const Expr *Arg = Call->getArg(0);
726
727 GCCTypeClass ResultClass =
729 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
730 pushInteger(S, ReturnVal, Call->getType());
731 return true;
732}
733
734// __builtin_expect(long, long)
735// __builtin_expect_with_probability(long, long, double)
737 const InterpFrame *Frame,
738 const CallExpr *Call) {
739 // The return value is simply the value of the first parameter.
740 // We ignore the probability.
741 unsigned NumArgs = Call->getNumArgs();
742 assert(NumArgs == 2 || NumArgs == 3);
743
744 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
745 if (NumArgs == 3)
746 S.Stk.discard<Floating>();
747 discard(S.Stk, ArgT);
748
749 APSInt Val = popToAPSInt(S.Stk, ArgT);
750 pushInteger(S, Val, Call->getType());
751 return true;
752}
753
755 const InterpFrame *Frame,
756 const CallExpr *Call) {
757#ifndef NDEBUG
758 assert(Call->getArg(0)->isLValue());
759 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
760 assert(PtrT == PT_Ptr &&
761 "Unsupported pointer type passed to __builtin_addressof()");
762#endif
763 return true;
764}
765
767 const InterpFrame *Frame,
768 const CallExpr *Call) {
769 return Call->getDirectCallee()->isConstexpr();
770}
771
773 const InterpFrame *Frame,
774 const CallExpr *Call) {
775 APSInt Arg = popToAPSInt(S, Call->getArg(0));
776
778 Arg.getZExtValue());
779 pushInteger(S, Result, Call->getType());
780 return true;
781}
782
783// Two integral values followed by a pointer (lhs, rhs, resultOut)
785 const CallExpr *Call,
786 unsigned BuiltinOp) {
787 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
788 if (ResultPtr.isDummy() || !ResultPtr.isBlockPointer())
789 return false;
790
791 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
792 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
793 APSInt RHS = popToAPSInt(S.Stk, RHST);
794 APSInt LHS = popToAPSInt(S.Stk, LHST);
795 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
796 PrimType ResultT = *S.getContext().classify(ResultType);
797 bool Overflow;
798
800 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
801 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
802 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
803 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
805 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
807 uint64_t LHSSize = LHS.getBitWidth();
808 uint64_t RHSSize = RHS.getBitWidth();
809 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
810 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
811
812 // Add an additional bit if the signedness isn't uniformly agreed to. We
813 // could do this ONLY if there is a signed and an unsigned that both have
814 // MaxBits, but the code to check that is pretty nasty. The issue will be
815 // caught in the shrink-to-result later anyway.
816 if (IsSigned && !AllSigned)
817 ++MaxBits;
818
819 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
820 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
821 Result = APSInt(MaxBits, !IsSigned);
822 }
823
824 // Find largest int.
825 switch (BuiltinOp) {
826 default:
827 llvm_unreachable("Invalid value for BuiltinOp");
828 case Builtin::BI__builtin_add_overflow:
829 case Builtin::BI__builtin_sadd_overflow:
830 case Builtin::BI__builtin_saddl_overflow:
831 case Builtin::BI__builtin_saddll_overflow:
832 case Builtin::BI__builtin_uadd_overflow:
833 case Builtin::BI__builtin_uaddl_overflow:
834 case Builtin::BI__builtin_uaddll_overflow:
835 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
836 : LHS.uadd_ov(RHS, Overflow);
837 break;
838 case Builtin::BI__builtin_sub_overflow:
839 case Builtin::BI__builtin_ssub_overflow:
840 case Builtin::BI__builtin_ssubl_overflow:
841 case Builtin::BI__builtin_ssubll_overflow:
842 case Builtin::BI__builtin_usub_overflow:
843 case Builtin::BI__builtin_usubl_overflow:
844 case Builtin::BI__builtin_usubll_overflow:
845 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
846 : LHS.usub_ov(RHS, Overflow);
847 break;
848 case Builtin::BI__builtin_mul_overflow:
849 case Builtin::BI__builtin_smul_overflow:
850 case Builtin::BI__builtin_smull_overflow:
851 case Builtin::BI__builtin_smulll_overflow:
852 case Builtin::BI__builtin_umul_overflow:
853 case Builtin::BI__builtin_umull_overflow:
854 case Builtin::BI__builtin_umulll_overflow:
855 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
856 : LHS.umul_ov(RHS, Overflow);
857 break;
858 }
859
860 // In the case where multiple sizes are allowed, truncate and see if
861 // the values are the same.
862 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
863 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
864 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
865 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
866 // since it will give us the behavior of a TruncOrSelf in the case where
867 // its parameter <= its size. We previously set Result to be at least the
868 // type-size of the result, so getTypeSize(ResultType) <= Resu
869 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
870 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
871
872 if (!APSInt::isSameValue(Temp, Result))
873 Overflow = true;
874 Result = std::move(Temp);
875 }
876
877 // Write Result to ResultPtr and put Overflow on the stack.
878 assignInteger(S, ResultPtr, ResultT, Result);
879 if (ResultPtr.canBeInitialized())
880 ResultPtr.initialize();
881
882 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
883 S.Stk.push<Boolean>(Overflow);
884 return true;
885}
886
887/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
889 const InterpFrame *Frame,
890 const CallExpr *Call, unsigned BuiltinOp) {
891 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
892 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
893 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
894 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
895 APSInt RHS = popToAPSInt(S.Stk, RHST);
896 APSInt LHS = popToAPSInt(S.Stk, LHST);
897
898 if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer())
899 return false;
900
901 APSInt CarryOut;
902
904 // Copy the number of bits and sign.
905 Result = LHS;
906 CarryOut = LHS;
907
908 bool FirstOverflowed = false;
909 bool SecondOverflowed = false;
910 switch (BuiltinOp) {
911 default:
912 llvm_unreachable("Invalid value for BuiltinOp");
913 case Builtin::BI__builtin_addcb:
914 case Builtin::BI__builtin_addcs:
915 case Builtin::BI__builtin_addc:
916 case Builtin::BI__builtin_addcl:
917 case Builtin::BI__builtin_addcll:
918 Result =
919 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
920 break;
921 case Builtin::BI__builtin_subcb:
922 case Builtin::BI__builtin_subcs:
923 case Builtin::BI__builtin_subc:
924 case Builtin::BI__builtin_subcl:
925 case Builtin::BI__builtin_subcll:
926 Result =
927 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
928 break;
929 }
930 // It is possible for both overflows to happen but CGBuiltin uses an OR so
931 // this is consistent.
932 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
933
934 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
935 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
936 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
937 CarryOutPtr.initialize();
938
939 assert(Call->getType() == Call->getArg(0)->getType());
940 pushInteger(S, Result, Call->getType());
941 return true;
942}
943
945 const InterpFrame *Frame, const CallExpr *Call,
946 unsigned BuiltinOp) {
947
948 std::optional<APSInt> Fallback;
949 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2)
950 Fallback = popToAPSInt(S, Call->getArg(1));
951
952 APSInt Val;
953 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
954 const Pointer &Arg = S.Stk.pop<Pointer>();
955 Val = convertBoolVectorToInt(Arg);
956 } else {
957 Val = popToAPSInt(S, Call->getArg(0));
958 }
959
960 // When the argument is 0, the result of GCC builtins is undefined, whereas
961 // for Microsoft intrinsics, the result is the bit-width of the argument.
962 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
963 BuiltinOp != Builtin::BI__lzcnt &&
964 BuiltinOp != Builtin::BI__lzcnt64;
965
966 if (Val == 0) {
967 if (Fallback) {
968 pushInteger(S, *Fallback, Call->getType());
969 return true;
970 }
971
972 if (ZeroIsUndefined)
973 return false;
974 }
975
976 pushInteger(S, Val.countl_zero(), Call->getType());
977 return true;
978}
979
981 const InterpFrame *Frame, const CallExpr *Call,
982 unsigned BuiltinID) {
983 std::optional<APSInt> Fallback;
984 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2)
985 Fallback = popToAPSInt(S, Call->getArg(1));
986
987 APSInt Val;
988 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
989 const Pointer &Arg = S.Stk.pop<Pointer>();
990 Val = convertBoolVectorToInt(Arg);
991 } else {
992 Val = popToAPSInt(S, Call->getArg(0));
993 }
994
995 if (Val == 0) {
996 if (Fallback) {
997 pushInteger(S, *Fallback, Call->getType());
998 return true;
999 }
1000 return false;
1001 }
1002
1003 pushInteger(S, Val.countr_zero(), Call->getType());
1004 return true;
1005}
1006
1008 const InterpFrame *Frame,
1009 const CallExpr *Call) {
1010 const APSInt &Val = popToAPSInt(S, Call->getArg(0));
1011 if (Val.getBitWidth() == 8)
1012 pushInteger(S, Val, Call->getType());
1013 else
1014 pushInteger(S, Val.byteSwap(), Call->getType());
1015 return true;
1016}
1017
1018/// bool __atomic_always_lock_free(size_t, void const volatile*)
1019/// bool __atomic_is_lock_free(size_t, void const volatile*)
1021 const InterpFrame *Frame,
1022 const CallExpr *Call,
1023 unsigned BuiltinOp) {
1024 auto returnBool = [&S](bool Value) -> bool {
1025 S.Stk.push<Boolean>(Value);
1026 return true;
1027 };
1028
1029 const Pointer &Ptr = S.Stk.pop<Pointer>();
1030 uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
1031
1032 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1033 // of two less than or equal to the maximum inline atomic width, we know it
1034 // is lock-free. If the size isn't a power of two, or greater than the
1035 // maximum alignment where we promote atomics, we know it is not lock-free
1036 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1037 // the answer can only be determined at runtime; for example, 16-byte
1038 // atomics have lock-free implementations on some, but not all,
1039 // x86-64 processors.
1040
1041 // Check power-of-two.
1042 CharUnits Size = CharUnits::fromQuantity(SizeVal);
1043 if (Size.isPowerOfTwo()) {
1044 // Check against inlining width.
1045 unsigned InlineWidthBits =
1047 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1048
1049 // OK, we will inline appropriately-aligned operations of this size,
1050 // and _Atomic(T) is appropriately-aligned.
1051 if (Size == CharUnits::One())
1052 return returnBool(true);
1053
1054 // Same for null pointers.
1055 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1056 if (Ptr.isZero())
1057 return returnBool(true);
1058
1059 if (Ptr.isIntegralPointer()) {
1060 uint64_t IntVal = Ptr.getIntegerRepresentation();
1061 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1062 return returnBool(true);
1063 }
1064
1065 const Expr *PtrArg = Call->getArg(1);
1066 // Otherwise, check if the type's alignment against Size.
1067 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1068 // Drop the potential implicit-cast to 'const volatile void*', getting
1069 // the underlying type.
1070 if (ICE->getCastKind() == CK_BitCast)
1071 PtrArg = ICE->getSubExpr();
1072 }
1073
1074 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1075 QualType PointeeType = PtrTy->getPointeeType();
1076 if (!PointeeType->isIncompleteType() &&
1077 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1078 // OK, we will inline operations on this object.
1079 return returnBool(true);
1080 }
1081 }
1082 }
1083 }
1084
1085 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1086 return returnBool(false);
1087
1088 return false;
1089}
1090
1091/// bool __c11_atomic_is_lock_free(size_t)
1093 CodePtr OpPC,
1094 const InterpFrame *Frame,
1095 const CallExpr *Call) {
1096 uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
1097
1098 CharUnits Size = CharUnits::fromQuantity(SizeVal);
1099 if (Size.isPowerOfTwo()) {
1100 // Check against inlining width.
1101 unsigned InlineWidthBits =
1103 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1104 S.Stk.push<Boolean>(true);
1105 return true;
1106 }
1107 }
1108
1109 return false; // returnBool(false);
1110}
1111
1112/// __builtin_complex(Float A, float B);
1114 const InterpFrame *Frame,
1115 const CallExpr *Call) {
1116 const Floating &Arg2 = S.Stk.pop<Floating>();
1117 const Floating &Arg1 = S.Stk.pop<Floating>();
1118 Pointer &Result = S.Stk.peek<Pointer>();
1119
1120 Result.elem<Floating>(0) = Arg1;
1121 Result.elem<Floating>(1) = Arg2;
1122 Result.initializeAllElements();
1123
1124 return true;
1125}
1126
1127/// __builtin_is_aligned()
1128/// __builtin_align_up()
1129/// __builtin_align_down()
1130/// The first parameter is either an integer or a pointer.
1131/// The second parameter is the requested alignment as an integer.
1133 const InterpFrame *Frame,
1134 const CallExpr *Call,
1135 unsigned BuiltinOp) {
1136 const APSInt &Alignment = popToAPSInt(S, Call->getArg(1));
1137
1138 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1139 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1140 return false;
1141 }
1142 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1143 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1144 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1145 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1146 << MaxValue << Call->getArg(0)->getType() << Alignment;
1147 return false;
1148 }
1149
1150 // The first parameter is either an integer or a pointer.
1151 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1152
1153 if (isIntegralType(FirstArgT)) {
1154 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1155 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1156 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1157 APSInt AlignedVal =
1158 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1159 pushInteger(S, AlignedVal, Call->getType());
1160 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1161 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1162 pushInteger(S, AlignedVal, Call->getType());
1163 } else {
1164 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1165 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1166 }
1167 return true;
1168 }
1169 assert(FirstArgT == PT_Ptr);
1170 const Pointer &Ptr = S.Stk.pop<Pointer>();
1171 if (!Ptr.isBlockPointer())
1172 return false;
1173
1174 unsigned PtrOffset = Ptr.getIndex();
1175 CharUnits BaseAlignment =
1177 CharUnits PtrAlign =
1178 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1179
1180 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1181 if (PtrAlign.getQuantity() >= Alignment) {
1182 S.Stk.push<Boolean>(true);
1183 return true;
1184 }
1185 // If the alignment is not known to be sufficient, some cases could still
1186 // be aligned at run time. However, if the requested alignment is less or
1187 // equal to the base alignment and the offset is not aligned, we know that
1188 // the run-time value can never be aligned.
1189 if (BaseAlignment.getQuantity() >= Alignment &&
1190 PtrAlign.getQuantity() < Alignment) {
1191 S.Stk.push<Boolean>(false);
1192 return true;
1193 }
1194
1195 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1196 << Alignment;
1197 return false;
1198 }
1199
1200 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1201 BuiltinOp == Builtin::BI__builtin_align_up);
1202
1203 // For align_up/align_down, we can return the same value if the alignment
1204 // is known to be greater or equal to the requested value.
1205 if (PtrAlign.getQuantity() >= Alignment) {
1206 S.Stk.push<Pointer>(Ptr);
1207 return true;
1208 }
1209
1210 // The alignment could be greater than the minimum at run-time, so we cannot
1211 // infer much about the resulting pointer value. One case is possible:
1212 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1213 // can infer the correct index if the requested alignment is smaller than
1214 // the base alignment so we can perform the computation on the offset.
1215 if (BaseAlignment.getQuantity() >= Alignment) {
1216 assert(Alignment.getBitWidth() <= 64 &&
1217 "Cannot handle > 64-bit address-space");
1218 uint64_t Alignment64 = Alignment.getZExtValue();
1219 CharUnits NewOffset =
1220 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1221 ? llvm::alignDown(PtrOffset, Alignment64)
1222 : llvm::alignTo(PtrOffset, Alignment64));
1223
1224 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1225 return true;
1226 }
1227
1228 // Otherwise, we cannot constant-evaluate the result.
1229 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1230 return false;
1231}
1232
1233/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1235 const InterpFrame *Frame,
1236 const CallExpr *Call) {
1237 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1238
1239 std::optional<APSInt> ExtraOffset;
1240 if (Call->getNumArgs() == 3)
1241 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1242
1243 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1244 const Pointer &Ptr = S.Stk.pop<Pointer>();
1245
1246 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1247
1248 // If there is a base object, then it must have the correct alignment.
1249 if (Ptr.isBlockPointer()) {
1250 CharUnits BaseAlignment;
1251 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1252 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1253 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1254 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1255
1256 if (BaseAlignment < Align) {
1257 S.CCEDiag(Call->getArg(0),
1258 diag::note_constexpr_baa_insufficient_alignment)
1259 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1260 return false;
1261 }
1262 }
1263
1264 APValue AV = Ptr.toAPValue(S.getASTContext());
1265 CharUnits AVOffset = AV.getLValueOffset();
1266 if (ExtraOffset)
1267 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1268 if (AVOffset.alignTo(Align) != AVOffset) {
1269 if (Ptr.isBlockPointer())
1270 S.CCEDiag(Call->getArg(0),
1271 diag::note_constexpr_baa_insufficient_alignment)
1272 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1273 else
1274 S.CCEDiag(Call->getArg(0),
1275 diag::note_constexpr_baa_value_insufficient_alignment)
1276 << AVOffset.getQuantity() << Align.getQuantity();
1277 return false;
1278 }
1279
1280 S.Stk.push<Pointer>(Ptr);
1281 return true;
1282}
1283
1284/// (CarryIn, LHS, RHS, Result)
1286 CodePtr OpPC,
1287 const InterpFrame *Frame,
1288 const CallExpr *Call,
1289 unsigned BuiltinOp) {
1290 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1291 !Call->getArg(1)->getType()->isIntegerType() ||
1292 !Call->getArg(2)->getType()->isIntegerType())
1293 return false;
1294
1295 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1296
1297 APSInt RHS = popToAPSInt(S, Call->getArg(2));
1298 APSInt LHS = popToAPSInt(S, Call->getArg(1));
1299 APSInt CarryIn = popToAPSInt(S, Call->getArg(0));
1300
1301 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1302 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1303
1304 unsigned BitWidth = LHS.getBitWidth();
1305 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1306 APInt ExResult =
1307 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1308 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1309
1310 APInt Result = ExResult.extractBits(BitWidth, 0);
1311 APSInt CarryOut =
1312 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1313
1314 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1315 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1316 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1317
1318 pushInteger(S, CarryOut, Call->getType());
1319
1320 return true;
1321}
1322
1324 CodePtr OpPC,
1325 const InterpFrame *Frame,
1326 const CallExpr *Call) {
1329 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1330 return true;
1331}
1332
1333static bool
1335 const InterpFrame *Frame,
1336 const CallExpr *Call) {
1337 const auto &Ptr = S.Stk.pop<Pointer>();
1338 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1339
1340 // This should be created for a StringLiteral, so should alway shold at least
1341 // one array element.
1342 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1343 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1344 uint64_t Result = getPointerAuthStableSipHash(R);
1345 pushInteger(S, Result, Call->getType());
1346 return true;
1347}
1348
1350 const InterpFrame *Frame,
1351 const CallExpr *Call) {
1352 const ASTContext &ASTCtx = S.getASTContext();
1353 uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
1354 auto Mode =
1355 ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
1356 auto MaxTokensOpt = ASTCtx.getLangOpts().AllocTokenMax;
1357 uint64_t MaxTokens =
1358 MaxTokensOpt.value_or(0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
1359
1360 // We do not read any of the arguments; discard them.
1361 for (int I = Call->getNumArgs() - 1; I >= 0; --I)
1362 discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
1363
1364 // Note: Type inference from a surrounding cast is not supported in
1365 // constexpr evaluation.
1366 QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
1367 if (AllocType.isNull()) {
1368 S.CCEDiag(Call,
1369 diag::note_constexpr_infer_alloc_token_type_inference_failed);
1370 return false;
1371 }
1372
1373 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
1374 if (!ATMD) {
1375 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
1376 return false;
1377 }
1378
1379 auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
1380 if (!MaybeToken) {
1381 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
1382 return false;
1383 }
1384
1385 pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
1386 return true;
1387}
1388
1390 const InterpFrame *Frame,
1391 const CallExpr *Call) {
1392 // A call to __operator_new is only valid within std::allocate<>::allocate.
1393 // Walk up the call stack to find the appropriate caller and get the
1394 // element type from it.
1395 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1396
1397 if (ElemType.isNull()) {
1398 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1399 ? diag::note_constexpr_new_untyped
1400 : diag::note_constexpr_new);
1401 return false;
1402 }
1403 assert(NewCall);
1404
1405 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1406 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1407 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1408 return false;
1409 }
1410
1411 // We only care about the first parameter (the size), so discard all the
1412 // others.
1413 {
1414 unsigned NumArgs = Call->getNumArgs();
1415 assert(NumArgs >= 1);
1416
1417 // The std::nothrow_t arg never gets put on the stack.
1418 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1419 --NumArgs;
1420 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1421 // First arg is needed.
1422 Args = Args.drop_front();
1423
1424 // Discard the rest.
1425 for (const Expr *Arg : Args)
1426 discard(S.Stk, *S.getContext().classify(Arg));
1427 }
1428
1429 APSInt Bytes = popToAPSInt(S, Call->getArg(0));
1430 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1431 assert(!ElemSize.isZero());
1432 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1433 // elements we should allocate.
1434 APInt NumElems, Remainder;
1435 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1436 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1437 if (Remainder != 0) {
1438 // This likely indicates a bug in the implementation of 'std::allocator'.
1439 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1440 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1441 return false;
1442 }
1443
1444 // NB: The same check we're using in CheckArraySize()
1445 if (NumElems.getActiveBits() >
1447 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1448 // FIXME: NoThrow check?
1449 const SourceInfo &Loc = S.Current->getSource(OpPC);
1450 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1451 << NumElems.getZExtValue();
1452 return false;
1453 }
1454
1455 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1456 return false;
1457
1458 bool IsArray = NumElems.ugt(1);
1459 OptPrimType ElemT = S.getContext().classify(ElemType);
1460 DynamicAllocator &Allocator = S.getAllocator();
1461 if (ElemT) {
1462 Block *B =
1463 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1465 assert(B);
1466 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1467 return true;
1468 }
1469
1470 assert(!ElemT);
1471
1472 // Composite arrays
1473 if (IsArray) {
1474 const Descriptor *Desc =
1475 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1476 Block *B =
1477 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1479 assert(B);
1480 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1481 return true;
1482 }
1483
1484 // Records. Still allocate them as single-element arrays.
1486 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1487
1488 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1490 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1492 assert(B);
1493 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1494 return true;
1495}
1496
1498 const InterpFrame *Frame,
1499 const CallExpr *Call) {
1500 const Expr *Source = nullptr;
1501 const Block *BlockToDelete = nullptr;
1502
1504 S.Stk.discard<Pointer>();
1505 return false;
1506 }
1507
1508 // This is permitted only within a call to std::allocator<T>::deallocate.
1509 if (!S.getStdAllocatorCaller("deallocate")) {
1510 S.FFDiag(Call);
1511 S.Stk.discard<Pointer>();
1512 return true;
1513 }
1514
1515 {
1516 const Pointer &Ptr = S.Stk.pop<Pointer>();
1517
1518 if (Ptr.isZero()) {
1519 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1520 return true;
1521 }
1522
1523 Source = Ptr.getDeclDesc()->asExpr();
1524 BlockToDelete = Ptr.block();
1525
1526 if (!BlockToDelete->isDynamic()) {
1527 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1529 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1530 S.Note(D->getLocation(), diag::note_declared_at);
1531 }
1532 }
1533 assert(BlockToDelete);
1534
1535 DynamicAllocator &Allocator = S.getAllocator();
1536 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1537 std::optional<DynamicAllocator::Form> AllocForm =
1538 Allocator.getAllocationForm(Source);
1539
1540 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1541 // Nothing has been deallocated, this must be a double-delete.
1542 const SourceInfo &Loc = S.Current->getSource(OpPC);
1543 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1544 return false;
1545 }
1546 assert(AllocForm);
1547
1548 return CheckNewDeleteForms(
1549 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1550}
1551
1553 const InterpFrame *Frame,
1554 const CallExpr *Call) {
1555 const Floating &Arg0 = S.Stk.pop<Floating>();
1556 S.Stk.push<Floating>(Arg0);
1557 return true;
1558}
1559
1561 const CallExpr *Call, unsigned ID) {
1562 const Pointer &Arg = S.Stk.pop<Pointer>();
1563 assert(Arg.getFieldDesc()->isPrimitiveArray());
1564
1565 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1566 assert(Call->getType() == ElemType);
1567 PrimType ElemT = *S.getContext().classify(ElemType);
1568 unsigned NumElems = Arg.getNumElems();
1569
1571 T Result = Arg.elem<T>(0);
1572 unsigned BitWidth = Result.bitWidth();
1573 for (unsigned I = 1; I != NumElems; ++I) {
1574 T Elem = Arg.elem<T>(I);
1575 T PrevResult = Result;
1576
1577 if (ID == Builtin::BI__builtin_reduce_add) {
1578 if (T::add(Result, Elem, BitWidth, &Result)) {
1579 unsigned OverflowBits = BitWidth + 1;
1580 (void)handleOverflow(S, OpPC,
1581 (PrevResult.toAPSInt(OverflowBits) +
1582 Elem.toAPSInt(OverflowBits)));
1583 return false;
1584 }
1585 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1586 if (T::mul(Result, Elem, BitWidth, &Result)) {
1587 unsigned OverflowBits = BitWidth * 2;
1588 (void)handleOverflow(S, OpPC,
1589 (PrevResult.toAPSInt(OverflowBits) *
1590 Elem.toAPSInt(OverflowBits)));
1591 return false;
1592 }
1593
1594 } else if (ID == Builtin::BI__builtin_reduce_and) {
1595 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1596 } else if (ID == Builtin::BI__builtin_reduce_or) {
1597 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1598 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1599 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1600 } else if (ID == Builtin::BI__builtin_reduce_min) {
1601 if (Elem < Result)
1602 Result = Elem;
1603 } else if (ID == Builtin::BI__builtin_reduce_max) {
1604 if (Elem > Result)
1605 Result = Elem;
1606 } else {
1607 llvm_unreachable("Unhandled vector reduce builtin");
1608 }
1609 }
1610 pushInteger(S, Result.toAPSInt(), Call->getType());
1611 });
1612
1613 return true;
1614}
1615
1617 const InterpFrame *Frame,
1618 const CallExpr *Call,
1619 unsigned BuiltinID) {
1620 assert(Call->getNumArgs() == 1);
1621 QualType Ty = Call->getArg(0)->getType();
1622 if (Ty->isIntegerType()) {
1623 APSInt Val = popToAPSInt(S, Call->getArg(0));
1624 pushInteger(S, Val.abs(), Call->getType());
1625 return true;
1626 }
1627
1628 if (Ty->isFloatingType()) {
1629 Floating Val = S.Stk.pop<Floating>();
1630 Floating Result = abs(S, Val);
1631 S.Stk.push<Floating>(Result);
1632 return true;
1633 }
1634
1635 // Otherwise, the argument must be a vector.
1636 assert(Call->getArg(0)->getType()->isVectorType());
1637 const Pointer &Arg = S.Stk.pop<Pointer>();
1638 assert(Arg.getFieldDesc()->isPrimitiveArray());
1639 const Pointer &Dst = S.Stk.peek<Pointer>();
1640 assert(Dst.getFieldDesc()->isPrimitiveArray());
1641 assert(Arg.getFieldDesc()->getNumElems() ==
1642 Dst.getFieldDesc()->getNumElems());
1643
1644 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1645 PrimType ElemT = *S.getContext().classify(ElemType);
1646 unsigned NumElems = Arg.getNumElems();
1647 // we can either have a vector of integer or a vector of floating point
1648 for (unsigned I = 0; I != NumElems; ++I) {
1649 if (ElemType->isIntegerType()) {
1651 Dst.elem<T>(I) = T::from(static_cast<T>(
1652 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1654 });
1655 } else {
1656 Floating Val = Arg.elem<Floating>(I);
1657 Dst.elem<Floating>(I) = abs(S, Val);
1658 }
1659 }
1661
1662 return true;
1663}
1664
1665/// Can be called with an integer or vector as the first and only parameter.
1667 CodePtr OpPC,
1668 const InterpFrame *Frame,
1669 const CallExpr *Call,
1670 unsigned BuiltinID) {
1671 bool HasZeroArg = Call->getNumArgs() == 2;
1672 bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg;
1673 assert(Call->getNumArgs() == 1 || HasZeroArg);
1674 if (Call->getArg(0)->getType()->isIntegerType()) {
1675 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1676 APSInt Val = popToAPSInt(S.Stk, ArgT);
1677 std::optional<APSInt> ZeroVal;
1678 if (HasZeroArg) {
1679 ZeroVal = Val;
1680 Val = popToAPSInt(S.Stk, ArgT);
1681 }
1682
1683 if (Val.isZero()) {
1684 if (ZeroVal) {
1685 pushInteger(S, *ZeroVal, Call->getType());
1686 return true;
1687 }
1688 // If we haven't been provided the second argument, the result is
1689 // undefined
1690 S.FFDiag(S.Current->getSource(OpPC),
1691 diag::note_constexpr_countzeroes_zero)
1692 << /*IsTrailing=*/IsCTTZ;
1693 return false;
1694 }
1695
1696 if (BuiltinID == Builtin::BI__builtin_elementwise_clzg) {
1697 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1698 } else {
1699 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1700 }
1701 return true;
1702 }
1703 // Otherwise, the argument must be a vector.
1704 const ASTContext &ASTCtx = S.getASTContext();
1705 Pointer ZeroArg;
1706 if (HasZeroArg) {
1707 assert(Call->getArg(1)->getType()->isVectorType() &&
1708 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1709 Call->getArg(1)->getType()));
1710 (void)ASTCtx;
1711 ZeroArg = S.Stk.pop<Pointer>();
1712 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1713 }
1714 assert(Call->getArg(0)->getType()->isVectorType());
1715 const Pointer &Arg = S.Stk.pop<Pointer>();
1716 assert(Arg.getFieldDesc()->isPrimitiveArray());
1717 const Pointer &Dst = S.Stk.peek<Pointer>();
1718 assert(Dst.getFieldDesc()->isPrimitiveArray());
1719 assert(Arg.getFieldDesc()->getNumElems() ==
1720 Dst.getFieldDesc()->getNumElems());
1721
1722 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1723 PrimType ElemT = *S.getContext().classify(ElemType);
1724 unsigned NumElems = Arg.getNumElems();
1725
1726 // FIXME: Reading from uninitialized vector elements?
1727 for (unsigned I = 0; I != NumElems; ++I) {
1729 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1730 if (EltVal.isZero()) {
1731 if (HasZeroArg) {
1732 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1733 } else {
1734 // If we haven't been provided the second argument, the result is
1735 // undefined
1736 S.FFDiag(S.Current->getSource(OpPC),
1737 diag::note_constexpr_countzeroes_zero)
1738 << /*IsTrailing=*/IsCTTZ;
1739 return false;
1740 }
1741 } else if (IsCTTZ) {
1742 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1743 } else {
1744 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1745 }
1746 Dst.atIndex(I).initialize();
1747 });
1748 }
1749
1750 return true;
1751}
1752
1754 const InterpFrame *Frame,
1755 const CallExpr *Call, unsigned ID) {
1756 assert(Call->getNumArgs() == 3);
1757 const ASTContext &ASTCtx = S.getASTContext();
1758 uint64_t Size = popToUInt64(S, Call->getArg(2));
1759 Pointer SrcPtr = S.Stk.pop<Pointer>().expand();
1760 Pointer DestPtr = S.Stk.pop<Pointer>().expand();
1761
1762 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1763 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1764
1765 bool Move =
1766 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1767 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1768 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1769 ID == Builtin::BI__builtin_wmemcpy ||
1770 ID == Builtin::BI__builtin_wmemmove;
1771
1772 // If the size is zero, we treat this as always being a valid no-op.
1773 if (Size == 0) {
1774 S.Stk.push<Pointer>(DestPtr);
1775 return true;
1776 }
1777
1778 if (SrcPtr.isZero() || DestPtr.isZero()) {
1779 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1780 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1781 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1782 << DiagPtr.toDiagnosticString(ASTCtx);
1783 return false;
1784 }
1785
1786 // Diagnose integral src/dest pointers specially.
1787 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1788 std::string DiagVal = "(void *)";
1789 DiagVal += SrcPtr.isIntegralPointer()
1790 ? std::to_string(SrcPtr.getIntegerRepresentation())
1791 : std::to_string(DestPtr.getIntegerRepresentation());
1792 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1793 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1794 return false;
1795 }
1796
1797 // Can't read from dummy pointers.
1798 if (DestPtr.isDummy() || SrcPtr.isDummy())
1799 return false;
1800
1801 if (DestPtr.getType()->isIncompleteType()) {
1802 S.FFDiag(S.Current->getSource(OpPC),
1803 diag::note_constexpr_memcpy_incomplete_type)
1804 << Move << DestPtr.getType();
1805 return false;
1806 }
1807 if (SrcPtr.getType()->isIncompleteType()) {
1808 S.FFDiag(S.Current->getSource(OpPC),
1809 diag::note_constexpr_memcpy_incomplete_type)
1810 << Move << SrcPtr.getType();
1811 return false;
1812 }
1813
1814 QualType DestElemType = getElemType(DestPtr);
1815 if (DestElemType->isIncompleteType()) {
1816 S.FFDiag(S.Current->getSource(OpPC),
1817 diag::note_constexpr_memcpy_incomplete_type)
1818 << Move << DestElemType;
1819 return false;
1820 }
1821
1822 size_t RemainingDestElems;
1823 if (DestPtr.getFieldDesc()->isArray()) {
1824 RemainingDestElems = DestPtr.isUnknownSizeArray()
1825 ? 0
1826 : (DestPtr.getNumElems() - DestPtr.getIndex());
1827 } else {
1828 RemainingDestElems = 1;
1829 }
1830 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1831
1832 if (WChar) {
1833 uint64_t WCharSize =
1834 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1835 Size *= WCharSize;
1836 }
1837
1838 if (Size % DestElemSize != 0) {
1839 S.FFDiag(S.Current->getSource(OpPC),
1840 diag::note_constexpr_memcpy_unsupported)
1841 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1842 return false;
1843 }
1844
1845 QualType SrcElemType = getElemType(SrcPtr);
1846 size_t RemainingSrcElems;
1847 if (SrcPtr.getFieldDesc()->isArray()) {
1848 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1849 ? 0
1850 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1851 } else {
1852 RemainingSrcElems = 1;
1853 }
1854 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1855
1856 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1857 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1858 << Move << SrcElemType << DestElemType;
1859 return false;
1860 }
1861
1862 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
1863 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1864 << Move << DestElemType;
1865 return false;
1866 }
1867
1868 // Check if we have enough elements to read from and write to.
1869 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1870 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1871 if (Size > RemainingDestBytes || Size > RemainingSrcBytes) {
1872 APInt N = APInt(64, Size / DestElemSize);
1873 S.FFDiag(S.Current->getSource(OpPC),
1874 diag::note_constexpr_memcpy_unsupported)
1875 << Move << WChar << (Size > RemainingSrcBytes ? 1 : 2) << DestElemType
1876 << toString(N, 10, /*Signed=*/false);
1877 return false;
1878 }
1879
1880 // Check for overlapping memory regions.
1881 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1882 // Remove base casts.
1883 Pointer SrcP = SrcPtr;
1884 while (SrcP.isBaseClass())
1885 SrcP = SrcP.getBase();
1886
1887 Pointer DestP = DestPtr;
1888 while (DestP.isBaseClass())
1889 DestP = DestP.getBase();
1890
1891 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1892 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1893
1894 if ((SrcIndex <= DstIndex && (SrcIndex + Size) > DstIndex) ||
1895 (DstIndex <= SrcIndex && (DstIndex + Size) > SrcIndex)) {
1896 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1897 << /*IsWChar=*/false;
1898 return false;
1899 }
1900 }
1901
1902 assert(Size % DestElemSize == 0);
1903 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size).toBits()))
1904 return false;
1905
1906 S.Stk.push<Pointer>(DestPtr);
1907 return true;
1908}
1909
1910/// Determine if T is a character type for which we guarantee that
1911/// sizeof(T) == 1.
1913 return T->isCharType() || T->isChar8Type();
1914}
1915
1917 const InterpFrame *Frame,
1918 const CallExpr *Call, unsigned ID) {
1919 assert(Call->getNumArgs() == 3);
1920 uint64_t Size = popToUInt64(S, Call->getArg(2));
1921 const Pointer &PtrB = S.Stk.pop<Pointer>();
1922 const Pointer &PtrA = S.Stk.pop<Pointer>();
1923
1924 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1925 ID == Builtin::BIwmemcmp)
1926 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1927
1928 if (Size == 0) {
1929 pushInteger(S, 0, Call->getType());
1930 return true;
1931 }
1932
1933 if (!PtrA.isBlockPointer() || !PtrB.isBlockPointer())
1934 return false;
1935
1936 bool IsWide =
1937 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1938
1939 const ASTContext &ASTCtx = S.getASTContext();
1940 QualType ElemTypeA = getElemType(PtrA);
1941 QualType ElemTypeB = getElemType(PtrB);
1942 // FIXME: This is an arbitrary limitation the current constant interpreter
1943 // had. We could remove this.
1944 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
1945 !isOneByteCharacterType(ElemTypeB))) {
1946 S.FFDiag(S.Current->getSource(OpPC),
1947 diag::note_constexpr_memcmp_unsupported)
1948 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1949 << PtrB.getType();
1950 return false;
1951 }
1952
1953 if (PtrA.isDummy() || PtrB.isDummy())
1954 return false;
1955
1956 if (!CheckRange(S, OpPC, PtrA, AK_Read) ||
1957 !CheckRange(S, OpPC, PtrB, AK_Read))
1958 return false;
1959
1960 // Now, read both pointers to a buffer and compare those.
1961 BitcastBuffer BufferA(
1962 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
1963 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1964 // FIXME: The swapping here is UNDOING something we do when reading the
1965 // data into the buffer.
1966 if (ASTCtx.getTargetInfo().isBigEndian())
1967 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1968
1969 BitcastBuffer BufferB(
1970 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
1971 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
1972 // FIXME: The swapping here is UNDOING something we do when reading the
1973 // data into the buffer.
1974 if (ASTCtx.getTargetInfo().isBigEndian())
1975 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1976
1977 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
1978 BufferB.byteSize().getQuantity());
1979
1980 unsigned ElemSize = 1;
1981 if (IsWide)
1982 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1983 // The Size given for the wide variants is in wide-char units. Convert it
1984 // to bytes.
1985 size_t ByteSize = Size * ElemSize;
1986 size_t CmpSize = std::min(MinBufferSize, ByteSize);
1987
1988 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1989 if (IsWide) {
1991 T A = *reinterpret_cast<T *>(BufferA.atByte(I));
1992 T B = *reinterpret_cast<T *>(BufferB.atByte(I));
1993 if (A < B) {
1994 pushInteger(S, -1, Call->getType());
1995 return true;
1996 }
1997 if (A > B) {
1998 pushInteger(S, 1, Call->getType());
1999 return true;
2000 }
2001 });
2002 } else {
2003 std::byte A = BufferA.deref<std::byte>(Bytes(I));
2004 std::byte B = BufferB.deref<std::byte>(Bytes(I));
2005
2006 if (A < B) {
2007 pushInteger(S, -1, Call->getType());
2008 return true;
2009 }
2010 if (A > B) {
2011 pushInteger(S, 1, Call->getType());
2012 return true;
2013 }
2014 }
2015 }
2016
2017 // We compared CmpSize bytes above. If the limiting factor was the Size
2018 // passed, we're done and the result is equality (0).
2019 if (ByteSize <= CmpSize) {
2020 pushInteger(S, 0, Call->getType());
2021 return true;
2022 }
2023
2024 // However, if we read all the available bytes but were instructed to read
2025 // even more, diagnose this as a "read of dereferenced one-past-the-end
2026 // pointer". This is what would happen if we called CheckLoad() on every array
2027 // element.
2028 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2029 << AK_Read << S.Current->getRange(OpPC);
2030 return false;
2031}
2032
2033// __builtin_memchr(ptr, int, int)
2034// __builtin_strchr(ptr, int)
2036 const CallExpr *Call, unsigned ID) {
2037 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2038 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2039 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2040
2041 std::optional<APSInt> MaxLength;
2042 if (Call->getNumArgs() == 3)
2043 MaxLength = popToAPSInt(S, Call->getArg(2));
2044
2045 APSInt Desired = popToAPSInt(S, Call->getArg(1));
2046 const Pointer &Ptr = S.Stk.pop<Pointer>();
2047
2048 if (MaxLength && MaxLength->isZero()) {
2049 S.Stk.push<Pointer>();
2050 return true;
2051 }
2052
2053 if (Ptr.isDummy()) {
2054 if (Ptr.getType()->isIncompleteType())
2055 S.FFDiag(S.Current->getSource(OpPC),
2056 diag::note_constexpr_ltor_incomplete_type)
2057 << Ptr.getType();
2058 return false;
2059 }
2060
2061 // Null is only okay if the given size is 0.
2062 if (Ptr.isZero()) {
2063 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2064 << AK_Read;
2065 return false;
2066 }
2067
2068 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2069 ? Ptr.getFieldDesc()->getElemQualType()
2070 : Ptr.getFieldDesc()->getType();
2071 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2072
2073 // Give up on byte-oriented matching against multibyte elements.
2074 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2075 S.FFDiag(S.Current->getSource(OpPC),
2076 diag::note_constexpr_memchr_unsupported)
2077 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2078 return false;
2079 }
2080
2081 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2082 int64_t DesiredTrunc;
2083 if (S.getASTContext().CharTy->isSignedIntegerType())
2084 DesiredTrunc =
2085 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
2086 else
2087 DesiredTrunc =
2088 Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2089 // strchr compares directly to the passed integer, and therefore
2090 // always fails if given an int that is not a char.
2091 if (Desired != DesiredTrunc) {
2092 S.Stk.push<Pointer>();
2093 return true;
2094 }
2095 }
2096
2097 uint64_t DesiredVal;
2098 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2099 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2100 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2101 DesiredVal = Desired.getZExtValue();
2102 } else {
2103 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2104 }
2105
2106 bool StopAtZero =
2107 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2108 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2109
2110 PrimType ElemT =
2111 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2112
2113 size_t Index = Ptr.getIndex();
2114 size_t Step = 0;
2115 for (;;) {
2116 const Pointer &ElemPtr =
2117 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2118
2119 if (!CheckLoad(S, OpPC, ElemPtr))
2120 return false;
2121
2122 uint64_t V;
2124 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2125
2126 if (V == DesiredVal) {
2127 S.Stk.push<Pointer>(ElemPtr);
2128 return true;
2129 }
2130
2131 if (StopAtZero && V == 0)
2132 break;
2133
2134 ++Step;
2135 if (MaxLength && Step == MaxLength->getZExtValue())
2136 break;
2137 }
2138
2139 S.Stk.push<Pointer>();
2140 return true;
2141}
2142
2143static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2144 const Descriptor *Desc) {
2145 if (Desc->isPrimitive())
2146 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2147 if (Desc->isArray())
2148 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2149 Desc->getNumElems();
2150 if (Desc->isRecord()) {
2151 // Can't use Descriptor::getType() as that may return a pointer type. Look
2152 // at the decl directly.
2153 return ASTCtx
2155 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2156 .getQuantity();
2157 }
2158
2159 return std::nullopt;
2160}
2161
2162/// Compute the byte offset of \p Ptr in the full declaration.
2163static unsigned computePointerOffset(const ASTContext &ASTCtx,
2164 const Pointer &Ptr) {
2165 unsigned Result = 0;
2166
2167 Pointer P = Ptr;
2168 while (P.isField() || P.isArrayElement()) {
2169 P = P.expand();
2170 const Descriptor *D = P.getFieldDesc();
2171
2172 if (P.isArrayElement()) {
2173 unsigned ElemSize =
2175 if (P.isOnePastEnd())
2176 Result += ElemSize * P.getNumElems();
2177 else
2178 Result += ElemSize * P.getIndex();
2179 P = P.expand().getArray();
2180 } else if (P.isBaseClass()) {
2181 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2182 bool IsVirtual = Ptr.isVirtualBaseClass();
2183 P = P.getBase();
2184 const Record *BaseRecord = P.getRecord();
2185
2186 const ASTRecordLayout &Layout =
2187 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2188 if (IsVirtual)
2189 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2190 else
2191 Result += Layout.getBaseClassOffset(RD).getQuantity();
2192 } else if (P.isField()) {
2193 const FieldDecl *FD = P.getField();
2194 const ASTRecordLayout &Layout =
2195 ASTCtx.getASTRecordLayout(FD->getParent());
2196 unsigned FieldIndex = FD->getFieldIndex();
2197 uint64_t FieldOffset =
2198 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2199 .getQuantity();
2200 Result += FieldOffset;
2201 P = P.getBase();
2202 } else
2203 llvm_unreachable("Unhandled descriptor type");
2204 }
2205
2206 return Result;
2207}
2208
2209/// Does Ptr point to the last subobject?
2210static bool pointsToLastObject(const Pointer &Ptr) {
2211 Pointer P = Ptr;
2212 while (!P.isRoot()) {
2213
2214 if (P.isArrayElement()) {
2215 P = P.expand().getArray();
2216 continue;
2217 }
2218 if (P.isBaseClass()) {
2219 if (P.getRecord()->getNumFields() > 0)
2220 return false;
2221 P = P.getBase();
2222 continue;
2223 }
2224
2225 Pointer Base = P.getBase();
2226 if (const Record *R = Base.getRecord()) {
2227 assert(P.getField());
2228 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2229 return false;
2230 }
2231 P = Base;
2232 }
2233
2234 return true;
2235}
2236
2237/// Does Ptr point to the last object AND to a flexible array member?
2238static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2239 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2241 FAMKind StrictFlexArraysLevel =
2242 Ctx.getLangOpts().getStrictFlexArraysLevel();
2243
2244 if (StrictFlexArraysLevel == FAMKind::Default)
2245 return true;
2246
2247 unsigned NumElems = FieldDesc->getNumElems();
2248 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2249 return true;
2250
2251 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2252 return true;
2253 return false;
2254 };
2255
2256 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2257 if (!FieldDesc->isArray())
2258 return false;
2259
2260 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2261 isFlexibleArrayMember(FieldDesc);
2262}
2263
2265 const InterpFrame *Frame,
2266 const CallExpr *Call) {
2267 const ASTContext &ASTCtx = S.getASTContext();
2268 // From the GCC docs:
2269 // Kind is an integer constant from 0 to 3. If the least significant bit is
2270 // clear, objects are whole variables. If it is set, a closest surrounding
2271 // subobject is considered the object a pointer points to. The second bit
2272 // determines if maximum or minimum of remaining bytes is computed.
2273 unsigned Kind = popToUInt64(S, Call->getArg(1));
2274 assert(Kind <= 3 && "unexpected kind");
2275 bool UseFieldDesc = (Kind & 1u);
2276 bool ReportMinimum = (Kind & 2u);
2277 const Pointer &Ptr = S.Stk.pop<Pointer>();
2278
2279 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2280 // "If there are any side effects in them, it returns (size_t) -1
2281 // for type 0 or 1 and (size_t) 0 for type 2 or 3."
2282 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2283 return true;
2284 }
2285
2286 if (Ptr.isZero() || !Ptr.isBlockPointer())
2287 return false;
2288
2289 // We can't load through pointers.
2290 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2291 return false;
2292
2293 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2294 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2295 assert(DeclDesc);
2296
2297 if (!UseFieldDesc || DetermineForCompleteObject) {
2298 // Lower bound, so we can't fall back to this.
2299 if (ReportMinimum && !DetermineForCompleteObject)
2300 return false;
2301
2302 // Can't read beyond the pointer decl desc.
2303 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2304 return false;
2305 } else {
2306 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2307 // If we cannot determine the size of the initial allocation, then we
2308 // can't given an accurate upper-bound. However, we are still able to give
2309 // conservative lower-bounds for Type=3.
2310 if (Kind == 1)
2311 return false;
2312 }
2313 }
2314
2315 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2316 assert(Desc);
2317
2318 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2319 if (!FullSize)
2320 return false;
2321
2322 unsigned ByteOffset;
2323 if (UseFieldDesc) {
2324 if (Ptr.isBaseClass())
2325 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2326 computePointerOffset(ASTCtx, Ptr);
2327 else {
2328 if (Ptr.inArray())
2329 ByteOffset =
2330 computePointerOffset(ASTCtx, Ptr) -
2331 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2332 else
2333 ByteOffset = 0;
2334 }
2335 } else
2336 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2337
2338 assert(ByteOffset <= *FullSize);
2339 unsigned Result = *FullSize - ByteOffset;
2340
2341 pushInteger(S, Result, Call->getType());
2342 return true;
2343}
2344
2346 const CallExpr *Call) {
2347
2348 if (!S.inConstantContext())
2349 return false;
2350
2351 const Pointer &Ptr = S.Stk.pop<Pointer>();
2352
2353 auto Error = [&](int Diag) {
2354 bool CalledFromStd = false;
2355 const auto *Callee = S.Current->getCallee();
2356 if (Callee && Callee->isInStdNamespace()) {
2357 const IdentifierInfo *Identifier = Callee->getIdentifier();
2358 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2359 }
2360 S.CCEDiag(CalledFromStd
2362 : S.Current->getSource(OpPC),
2363 diag::err_invalid_is_within_lifetime)
2364 << (CalledFromStd ? "std::is_within_lifetime"
2365 : "__builtin_is_within_lifetime")
2366 << Diag;
2367 return false;
2368 };
2369
2370 if (Ptr.isZero())
2371 return Error(0);
2372 if (Ptr.isOnePastEnd())
2373 return Error(1);
2374
2375 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2376 if (!Ptr.isActive()) {
2377 Result = false;
2378 } else {
2379 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2380 return false;
2381 if (!CheckMutable(S, OpPC, Ptr))
2382 return false;
2383 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2384 return false;
2385 }
2386
2387 // Check if we're currently running an initializer.
2388 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2389 return Error(2);
2390 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2391 return Error(2);
2392
2393 pushInteger(S, Result, Call->getType());
2394 return true;
2395}
2396
2398 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2399 llvm::function_ref<APInt(const APSInt &)> Fn) {
2400 assert(Call->getNumArgs() == 1);
2401
2402 // Single integer case.
2403 if (!Call->getArg(0)->getType()->isVectorType()) {
2404 assert(Call->getType()->isIntegerType());
2405 APSInt Src = popToAPSInt(S, Call->getArg(0));
2406 APInt Result = Fn(Src);
2407 pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
2408 return true;
2409 }
2410
2411 // Vector case.
2412 const Pointer &Arg = S.Stk.pop<Pointer>();
2413 assert(Arg.getFieldDesc()->isPrimitiveArray());
2414 const Pointer &Dst = S.Stk.peek<Pointer>();
2415 assert(Dst.getFieldDesc()->isPrimitiveArray());
2416 assert(Arg.getFieldDesc()->getNumElems() ==
2417 Dst.getFieldDesc()->getNumElems());
2418
2419 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
2420 PrimType ElemT = *S.getContext().classify(ElemType);
2421 unsigned NumElems = Arg.getNumElems();
2422 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2423
2424 for (unsigned I = 0; I != NumElems; ++I) {
2426 APSInt Src = Arg.elem<T>(I).toAPSInt();
2427 APInt Result = Fn(Src);
2428 Dst.elem<T>(I) = static_cast<T>(APSInt(std::move(Result), DestUnsigned));
2429 });
2430 }
2432
2433 return true;
2434}
2435
2437 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2438 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2439 assert(Call->getNumArgs() == 2);
2440
2441 // Single integer case.
2442 if (!Call->getArg(0)->getType()->isVectorType()) {
2443 assert(!Call->getArg(1)->getType()->isVectorType());
2444 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2445 APSInt LHS = popToAPSInt(S, Call->getArg(0));
2446 APInt Result = Fn(LHS, RHS);
2447 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2448 return true;
2449 }
2450
2451 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2452 assert(VT->getElementType()->isIntegralOrEnumerationType());
2453 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2454 unsigned NumElems = VT->getNumElements();
2455 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2456
2457 // Vector + Scalar case.
2458 if (!Call->getArg(1)->getType()->isVectorType()) {
2459 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2460
2461 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2462 const Pointer &LHS = S.Stk.pop<Pointer>();
2463 const Pointer &Dst = S.Stk.peek<Pointer>();
2464
2465 for (unsigned I = 0; I != NumElems; ++I) {
2467 Dst.elem<T>(I) = static_cast<T>(
2468 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2469 });
2470 }
2472 return true;
2473 }
2474
2475 // Vector case.
2476 assert(Call->getArg(0)->getType()->isVectorType() &&
2477 Call->getArg(1)->getType()->isVectorType());
2478 assert(VT->getElementType() ==
2479 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2480 assert(VT->getNumElements() ==
2481 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2482 assert(VT->getElementType()->isIntegralOrEnumerationType());
2483
2484 const Pointer &RHS = S.Stk.pop<Pointer>();
2485 const Pointer &LHS = S.Stk.pop<Pointer>();
2486 const Pointer &Dst = S.Stk.peek<Pointer>();
2487 for (unsigned I = 0; I != NumElems; ++I) {
2489 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2490 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2491 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2492 });
2493 }
2495
2496 return true;
2497}
2498
2499static bool
2501 llvm::function_ref<APInt(const APSInt &)> PackFn) {
2502 const auto *VT0 = E->getArg(0)->getType()->castAs<VectorType>();
2503 [[maybe_unused]] const auto *VT1 =
2504 E->getArg(1)->getType()->castAs<VectorType>();
2505 assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType");
2506 assert(VT0->getElementType() == VT1->getElementType() &&
2507 VT0->getNumElements() == VT1->getNumElements() &&
2508 "pack builtin VT0 and VT1 ElementType must be same");
2509
2510 const Pointer &RHS = S.Stk.pop<Pointer>();
2511 const Pointer &LHS = S.Stk.pop<Pointer>();
2512 const Pointer &Dst = S.Stk.peek<Pointer>();
2513
2514 const ASTContext &ASTCtx = S.getASTContext();
2515 unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType());
2516 unsigned LHSVecLen = VT0->getNumElements();
2517 unsigned SrcPerLane = 128 / SrcBits;
2518 unsigned Lanes = LHSVecLen * SrcBits / 128;
2519
2520 PrimType SrcT = *S.getContext().classify(VT0->getElementType());
2521 PrimType DstT = *S.getContext().classify(getElemType(Dst));
2522 bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType();
2523
2524 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
2525 unsigned BaseSrc = Lane * SrcPerLane;
2526 unsigned BaseDst = Lane * (2 * SrcPerLane);
2527
2528 for (unsigned I = 0; I != SrcPerLane; ++I) {
2530 APSInt A = LHS.elem<T>(BaseSrc + I).toAPSInt();
2531 APSInt B = RHS.elem<T>(BaseSrc + I).toAPSInt();
2532
2533 assignInteger(S, Dst.atIndex(BaseDst + I), DstT,
2534 APSInt(PackFn(A), IsUnsigend));
2535 assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT,
2536 APSInt(PackFn(B), IsUnsigend));
2537 });
2538 }
2539 }
2540
2541 Dst.initializeAllElements();
2542 return true;
2543}
2544
2546 const CallExpr *Call,
2547 unsigned BuiltinID) {
2548 assert(Call->getNumArgs() == 2);
2549
2550 QualType Arg0Type = Call->getArg(0)->getType();
2551
2552 // TODO: Support floating-point types.
2553 if (!(Arg0Type->isIntegerType() ||
2554 (Arg0Type->isVectorType() &&
2555 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2556 return false;
2557
2558 if (!Arg0Type->isVectorType()) {
2559 assert(!Call->getArg(1)->getType()->isVectorType());
2560 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2561 APSInt LHS = popToAPSInt(S, Arg0Type);
2562 APInt Result;
2563 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2564 Result = std::max(LHS, RHS);
2565 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2566 Result = std::min(LHS, RHS);
2567 } else {
2568 llvm_unreachable("Wrong builtin ID");
2569 }
2570
2571 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2572 return true;
2573 }
2574
2575 // Vector case.
2576 assert(Call->getArg(0)->getType()->isVectorType() &&
2577 Call->getArg(1)->getType()->isVectorType());
2578 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2579 assert(VT->getElementType() ==
2580 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2581 assert(VT->getNumElements() ==
2582 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2583 assert(VT->getElementType()->isIntegralOrEnumerationType());
2584
2585 const Pointer &RHS = S.Stk.pop<Pointer>();
2586 const Pointer &LHS = S.Stk.pop<Pointer>();
2587 const Pointer &Dst = S.Stk.peek<Pointer>();
2588 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2589 unsigned NumElems = VT->getNumElements();
2590 for (unsigned I = 0; I != NumElems; ++I) {
2591 APSInt Elem1;
2592 APSInt Elem2;
2594 Elem1 = LHS.elem<T>(I).toAPSInt();
2595 Elem2 = RHS.elem<T>(I).toAPSInt();
2596 });
2597
2598 APSInt Result;
2599 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2600 Result = APSInt(std::max(Elem1, Elem2),
2601 Call->getType()->isUnsignedIntegerOrEnumerationType());
2602 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2603 Result = APSInt(std::min(Elem1, Elem2),
2604 Call->getType()->isUnsignedIntegerOrEnumerationType());
2605 } else {
2606 llvm_unreachable("Wrong builtin ID");
2607 }
2608
2610 { Dst.elem<T>(I) = static_cast<T>(Result); });
2611 }
2612 Dst.initializeAllElements();
2613
2614 return true;
2615}
2616
2618 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2619 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &,
2620 const APSInt &)>
2621 Fn) {
2622 assert(Call->getArg(0)->getType()->isVectorType() &&
2623 Call->getArg(1)->getType()->isVectorType());
2624 const Pointer &RHS = S.Stk.pop<Pointer>();
2625 const Pointer &LHS = S.Stk.pop<Pointer>();
2626 const Pointer &Dst = S.Stk.peek<Pointer>();
2627
2628 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2629 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2630 unsigned NumElems = VT->getNumElements();
2631 const auto *DestVT = Call->getType()->castAs<VectorType>();
2632 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2633 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2634
2635 unsigned DstElem = 0;
2636 for (unsigned I = 0; I != NumElems; I += 2) {
2637 APSInt Result;
2639 APSInt LoLHS = LHS.elem<T>(I).toAPSInt();
2640 APSInt HiLHS = LHS.elem<T>(I + 1).toAPSInt();
2641 APSInt LoRHS = RHS.elem<T>(I).toAPSInt();
2642 APSInt HiRHS = RHS.elem<T>(I + 1).toAPSInt();
2643 Result = APSInt(Fn(LoLHS, HiLHS, LoRHS, HiRHS), DestUnsigned);
2644 });
2645
2646 INT_TYPE_SWITCH_NO_BOOL(DestElemT,
2647 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2648 ++DstElem;
2649 }
2650
2651 Dst.initializeAllElements();
2652 return true;
2653}
2654
2656 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2657 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2658 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2659 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2660 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2661
2662 const Pointer &RHS = S.Stk.pop<Pointer>();
2663 const Pointer &LHS = S.Stk.pop<Pointer>();
2664 const Pointer &Dst = S.Stk.peek<Pointer>();
2665 unsigned NumElts = VT->getNumElements();
2666 unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType());
2667 unsigned EltsPerLane = 128 / EltBits;
2668 unsigned Lanes = NumElts * EltBits / 128;
2669 unsigned DestIndex = 0;
2670
2671 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2672 unsigned LaneStart = Lane * EltsPerLane;
2673 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2675 APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt();
2676 APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt();
2677 APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2678 Dst.elem<T>(DestIndex++) = static_cast<T>(ResL);
2679 });
2680 }
2681
2682 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2684 APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt();
2685 APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt();
2686 APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2687 Dst.elem<T>(DestIndex++) = static_cast<T>(ResR);
2688 });
2689 }
2690 }
2691 Dst.initializeAllElements();
2692 return true;
2693}
2694
2696 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2697 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2698 llvm::RoundingMode)>
2699 Fn) {
2700 const Pointer &RHS = S.Stk.pop<Pointer>();
2701 const Pointer &LHS = S.Stk.pop<Pointer>();
2702 const Pointer &Dst = S.Stk.peek<Pointer>();
2703 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2704 llvm::RoundingMode RM = getRoundingMode(FPO);
2705 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2706
2707 unsigned NumElts = VT->getNumElements();
2708 unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType());
2709 unsigned NumLanes = NumElts * EltBits / 128;
2710 unsigned NumElemsPerLane = NumElts / NumLanes;
2711 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
2712
2713 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
2714 using T = PrimConv<PT_Float>::T;
2715 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2716 APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2717 APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2718 Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM));
2719 }
2720 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2721 APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2722 APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2723 Dst.elem<T>(L + E + HalfElemsPerLane) =
2724 static_cast<T>(Fn(Elem1, Elem2, RM));
2725 }
2726 }
2727 Dst.initializeAllElements();
2728 return true;
2729}
2730
2732 const CallExpr *Call) {
2733 // Addsub: alternates between subtraction and addition
2734 // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
2735 const Pointer &RHS = S.Stk.pop<Pointer>();
2736 const Pointer &LHS = S.Stk.pop<Pointer>();
2737 const Pointer &Dst = S.Stk.peek<Pointer>();
2738 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2739 llvm::RoundingMode RM = getRoundingMode(FPO);
2740 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2741 unsigned NumElems = VT->getNumElements();
2742
2743 using T = PrimConv<PT_Float>::T;
2744 for (unsigned I = 0; I != NumElems; ++I) {
2745 APFloat LElem = LHS.elem<T>(I).getAPFloat();
2746 APFloat RElem = RHS.elem<T>(I).getAPFloat();
2747 if (I % 2 == 0) {
2748 // Even indices: subtract
2749 LElem.subtract(RElem, RM);
2750 } else {
2751 // Odd indices: add
2752 LElem.add(RElem, RM);
2753 }
2754 Dst.elem<T>(I) = static_cast<T>(LElem);
2755 }
2756 Dst.initializeAllElements();
2757 return true;
2758}
2759
2761 const CallExpr *Call) {
2762 // PCLMULQDQ: carry-less multiplication of selected 64-bit halves
2763 // imm8 bit 0: selects lower (0) or upper (1) 64 bits of first operand
2764 // imm8 bit 4: selects lower (0) or upper (1) 64 bits of second operand
2765 assert(Call->getArg(0)->getType()->isVectorType() &&
2766 Call->getArg(1)->getType()->isVectorType());
2767
2768 // Extract imm8 argument
2769 APSInt Imm8 = popToAPSInt(S, Call->getArg(2));
2770 bool SelectUpperA = (Imm8 & 0x01) != 0;
2771 bool SelectUpperB = (Imm8 & 0x10) != 0;
2772
2773 const Pointer &RHS = S.Stk.pop<Pointer>();
2774 const Pointer &LHS = S.Stk.pop<Pointer>();
2775 const Pointer &Dst = S.Stk.peek<Pointer>();
2776
2777 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2778 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2779 unsigned NumElems = VT->getNumElements();
2780 const auto *DestVT = Call->getType()->castAs<VectorType>();
2781 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2782 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2783
2784 // Process each 128-bit lane (2 elements at a time)
2785 for (unsigned Lane = 0; Lane < NumElems; Lane += 2) {
2786 APSInt A0, A1, B0, B1;
2788 A0 = LHS.elem<T>(Lane + 0).toAPSInt();
2789 A1 = LHS.elem<T>(Lane + 1).toAPSInt();
2790 B0 = RHS.elem<T>(Lane + 0).toAPSInt();
2791 B1 = RHS.elem<T>(Lane + 1).toAPSInt();
2792 });
2793
2794 // Select the appropriate 64-bit values based on imm8
2795 APInt A = SelectUpperA ? A1 : A0;
2796 APInt B = SelectUpperB ? B1 : B0;
2797
2798 // Extend both operands to 128 bits for carry-less multiplication
2799 APInt A128 = A.zext(128);
2800 APInt B128 = B.zext(128);
2801
2802 // Use APIntOps::clmul for carry-less multiplication
2803 APInt Result = llvm::APIntOps::clmul(A128, B128);
2804
2805 // Split the 128-bit result into two 64-bit halves
2806 APSInt ResultLow(Result.extractBits(64, 0), DestUnsigned);
2807 APSInt ResultHigh(Result.extractBits(64, 64), DestUnsigned);
2808
2809 INT_TYPE_SWITCH_NO_BOOL(DestElemT, {
2810 Dst.elem<T>(Lane + 0) = static_cast<T>(ResultLow);
2811 Dst.elem<T>(Lane + 1) = static_cast<T>(ResultHigh);
2812 });
2813 }
2814
2815 Dst.initializeAllElements();
2816 return true;
2817}
2818
2820 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2821 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2822 const APFloat &, llvm::RoundingMode)>
2823 Fn) {
2824 assert(Call->getNumArgs() == 3);
2825
2826 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2827 llvm::RoundingMode RM = getRoundingMode(FPO);
2828 QualType Arg1Type = Call->getArg(0)->getType();
2829 QualType Arg2Type = Call->getArg(1)->getType();
2830 QualType Arg3Type = Call->getArg(2)->getType();
2831
2832 // Non-vector floating point types.
2833 if (!Arg1Type->isVectorType()) {
2834 assert(!Arg2Type->isVectorType());
2835 assert(!Arg3Type->isVectorType());
2836 (void)Arg2Type;
2837 (void)Arg3Type;
2838
2839 const Floating &Z = S.Stk.pop<Floating>();
2840 const Floating &Y = S.Stk.pop<Floating>();
2841 const Floating &X = S.Stk.pop<Floating>();
2842 APFloat F = Fn(X.getAPFloat(), Y.getAPFloat(), Z.getAPFloat(), RM);
2843 Floating Result = S.allocFloat(X.getSemantics());
2844 Result.copy(F);
2845 S.Stk.push<Floating>(Result);
2846 return true;
2847 }
2848
2849 // Vector type.
2850 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2851 Arg3Type->isVectorType());
2852
2853 const VectorType *VecTy = Arg1Type->castAs<VectorType>();
2854 QualType ElemQT = VecTy->getElementType();
2855 unsigned NumElems = VecTy->getNumElements();
2856
2857 assert(ElemQT == Arg2Type->castAs<VectorType>()->getElementType() &&
2858 ElemQT == Arg3Type->castAs<VectorType>()->getElementType());
2859 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2860 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2861 assert(ElemQT->isRealFloatingType());
2862 (void)ElemQT;
2863
2864 const Pointer &VZ = S.Stk.pop<Pointer>();
2865 const Pointer &VY = S.Stk.pop<Pointer>();
2866 const Pointer &VX = S.Stk.pop<Pointer>();
2867 const Pointer &Dst = S.Stk.peek<Pointer>();
2868 for (unsigned I = 0; I != NumElems; ++I) {
2869 using T = PrimConv<PT_Float>::T;
2870 APFloat X = VX.elem<T>(I).getAPFloat();
2871 APFloat Y = VY.elem<T>(I).getAPFloat();
2872 APFloat Z = VZ.elem<T>(I).getAPFloat();
2873 APFloat F = Fn(X, Y, Z, RM);
2874 Dst.elem<Floating>(I) = Floating(F);
2875 }
2877 return true;
2878}
2879
2880/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2882 const CallExpr *Call) {
2883 const Pointer &RHS = S.Stk.pop<Pointer>();
2884 const Pointer &LHS = S.Stk.pop<Pointer>();
2885 APSInt Mask = popToAPSInt(S, Call->getArg(0));
2886 const Pointer &Dst = S.Stk.peek<Pointer>();
2887
2888 assert(LHS.getNumElems() == RHS.getNumElems());
2889 assert(LHS.getNumElems() == Dst.getNumElems());
2890 unsigned NumElems = LHS.getNumElems();
2891 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2892 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2893
2894 for (unsigned I = 0; I != NumElems; ++I) {
2895 if (ElemT == PT_Float) {
2896 assert(DstElemT == PT_Float);
2897 Dst.elem<Floating>(I) =
2898 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2899 } else {
2900 APSInt Elem;
2901 INT_TYPE_SWITCH(ElemT, {
2902 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2903 });
2904 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2905 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2906 }
2907 }
2909
2910 return true;
2911}
2912
2913/// Scalar variant of AVX512 predicated select:
2914/// Result[i] = (Mask bit 0) ? LHS[i] : RHS[i], but only element 0 may change.
2915/// All other elements are taken from RHS.
2917 const CallExpr *Call) {
2918 unsigned N =
2919 Call->getArg(1)->getType()->getAs<VectorType>()->getNumElements();
2920
2921 const Pointer &W = S.Stk.pop<Pointer>();
2922 const Pointer &A = S.Stk.pop<Pointer>();
2923 APSInt U = popToAPSInt(S, Call->getArg(0));
2924 const Pointer &Dst = S.Stk.peek<Pointer>();
2925
2926 bool TakeA0 = U.getZExtValue() & 1ULL;
2927
2928 for (unsigned I = TakeA0; I != N; ++I)
2929 Dst.elem<Floating>(I) = W.elem<Floating>(I);
2930 if (TakeA0)
2931 Dst.elem<Floating>(0) = A.elem<Floating>(0);
2932
2934 return true;
2935}
2936
2938 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2939 llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) {
2940 const Pointer &RHS = S.Stk.pop<Pointer>();
2941 const Pointer &LHS = S.Stk.pop<Pointer>();
2942
2943 assert(LHS.getNumElems() == RHS.getNumElems());
2944
2945 unsigned SourceLen = LHS.getNumElems();
2946 QualType ElemQT = getElemType(LHS);
2947 OptPrimType ElemPT = S.getContext().classify(ElemQT);
2948 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
2949
2950 APInt AWide(LaneWidth * SourceLen, 0);
2951 APInt BWide(LaneWidth * SourceLen, 0);
2952
2953 for (unsigned I = 0; I != SourceLen; ++I) {
2954 APInt ALane;
2955 APInt BLane;
2956
2957 if (ElemQT->isIntegerType()) { // Get value.
2958 INT_TYPE_SWITCH_NO_BOOL(*ElemPT, {
2959 ALane = LHS.elem<T>(I).toAPSInt();
2960 BLane = RHS.elem<T>(I).toAPSInt();
2961 });
2962 } else if (ElemQT->isFloatingType()) { // Get only sign bit.
2963 using T = PrimConv<PT_Float>::T;
2964 ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2965 BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2966 } else { // Must be integer or floating type.
2967 return false;
2968 }
2969 AWide.insertBits(ALane, I * LaneWidth);
2970 BWide.insertBits(BLane, I * LaneWidth);
2971 }
2972 pushInteger(S, Fn(AWide, BWide), Call->getType());
2973 return true;
2974}
2975
2977 const CallExpr *Call) {
2978 assert(Call->getNumArgs() == 1);
2979
2980 const Pointer &Source = S.Stk.pop<Pointer>();
2981
2982 unsigned SourceLen = Source.getNumElems();
2983 QualType ElemQT = getElemType(Source);
2984 OptPrimType ElemT = S.getContext().classify(ElemQT);
2985 unsigned ResultLen =
2986 S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
2987 APInt Result(ResultLen, 0);
2988
2989 for (unsigned I = 0; I != SourceLen; ++I) {
2990 APInt Elem;
2991 if (ElemQT->isIntegerType()) {
2992 INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
2993 } else if (ElemQT->isRealFloatingType()) {
2994 using T = PrimConv<PT_Float>::T;
2995 Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
2996 } else {
2997 return false;
2998 }
2999 Result.setBitVal(I, Elem.isNegative());
3000 }
3001 pushInteger(S, Result, Call->getType());
3002 return true;
3003}
3004
3006 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3007 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
3008 Fn) {
3009 assert(Call->getNumArgs() == 3);
3010
3011 QualType Arg0Type = Call->getArg(0)->getType();
3012 QualType Arg2Type = Call->getArg(2)->getType();
3013 // Non-vector integer types.
3014 if (!Arg0Type->isVectorType()) {
3015 const APSInt &Op2 = popToAPSInt(S, Arg2Type);
3016 const APSInt &Op1 = popToAPSInt(S, Call->getArg(1));
3017 const APSInt &Op0 = popToAPSInt(S, Arg0Type);
3018 APSInt Result = APSInt(Fn(Op0, Op1, Op2), Op0.isUnsigned());
3019 pushInteger(S, Result, Call->getType());
3020 return true;
3021 }
3022
3023 const auto *VecT = Arg0Type->castAs<VectorType>();
3024 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3025 unsigned NumElems = VecT->getNumElements();
3026 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3027
3028 // Vector + Vector + Scalar case.
3029 if (!Arg2Type->isVectorType()) {
3030 APSInt Op2 = popToAPSInt(S, Arg2Type);
3031
3032 const Pointer &Op1 = S.Stk.pop<Pointer>();
3033 const Pointer &Op0 = S.Stk.pop<Pointer>();
3034 const Pointer &Dst = S.Stk.peek<Pointer>();
3035 for (unsigned I = 0; I != NumElems; ++I) {
3037 Dst.elem<T>(I) = static_cast<T>(APSInt(
3038 Fn(Op0.elem<T>(I).toAPSInt(), Op1.elem<T>(I).toAPSInt(), Op2),
3039 DestUnsigned));
3040 });
3041 }
3043
3044 return true;
3045 }
3046
3047 // Vector type.
3048 const Pointer &Op2 = S.Stk.pop<Pointer>();
3049 const Pointer &Op1 = S.Stk.pop<Pointer>();
3050 const Pointer &Op0 = S.Stk.pop<Pointer>();
3051 const Pointer &Dst = S.Stk.peek<Pointer>();
3052 for (unsigned I = 0; I != NumElems; ++I) {
3053 APSInt Val0, Val1, Val2;
3055 Val0 = Op0.elem<T>(I).toAPSInt();
3056 Val1 = Op1.elem<T>(I).toAPSInt();
3057 Val2 = Op2.elem<T>(I).toAPSInt();
3058 });
3059 APSInt Result = APSInt(Fn(Val0, Val1, Val2), Val0.isUnsigned());
3061 { Dst.elem<T>(I) = static_cast<T>(Result); });
3062 }
3064
3065 return true;
3066}
3067
3069 const CallExpr *Call,
3070 unsigned ID) {
3071 assert(Call->getNumArgs() == 2);
3072
3073 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3074 uint64_t Index = ImmAPS.getZExtValue();
3075
3076 const Pointer &Src = S.Stk.pop<Pointer>();
3077 if (!Src.getFieldDesc()->isPrimitiveArray())
3078 return false;
3079
3080 const Pointer &Dst = S.Stk.peek<Pointer>();
3081 if (!Dst.getFieldDesc()->isPrimitiveArray())
3082 return false;
3083
3084 unsigned SrcElems = Src.getNumElems();
3085 unsigned DstElems = Dst.getNumElems();
3086
3087 unsigned NumLanes = SrcElems / DstElems;
3088 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3089 unsigned ExtractPos = Lane * DstElems;
3090
3091 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3092
3093 TYPE_SWITCH(ElemT, {
3094 for (unsigned I = 0; I != DstElems; ++I) {
3095 Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
3096 }
3097 });
3098
3100 return true;
3101}
3102
3104 CodePtr OpPC,
3105 const CallExpr *Call,
3106 unsigned ID) {
3107 assert(Call->getNumArgs() == 4);
3108
3109 APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
3110 const Pointer &Merge = S.Stk.pop<Pointer>();
3111 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3112 const Pointer &Src = S.Stk.pop<Pointer>();
3113
3114 if (!Src.getFieldDesc()->isPrimitiveArray() ||
3115 !Merge.getFieldDesc()->isPrimitiveArray())
3116 return false;
3117
3118 const Pointer &Dst = S.Stk.peek<Pointer>();
3119 if (!Dst.getFieldDesc()->isPrimitiveArray())
3120 return false;
3121
3122 unsigned SrcElems = Src.getNumElems();
3123 unsigned DstElems = Dst.getNumElems();
3124
3125 unsigned NumLanes = SrcElems / DstElems;
3126 unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
3127 unsigned Base = Lane * DstElems;
3128
3129 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3130
3131 TYPE_SWITCH(ElemT, {
3132 for (unsigned I = 0; I != DstElems; ++I) {
3133 if (MaskAPS[I])
3134 Dst.elem<T>(I) = Src.elem<T>(Base + I);
3135 else
3136 Dst.elem<T>(I) = Merge.elem<T>(I);
3137 }
3138 });
3139
3141 return true;
3142}
3143
3145 const CallExpr *Call,
3146 unsigned ID) {
3147 assert(Call->getNumArgs() == 3);
3148
3149 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3150 uint64_t Index = ImmAPS.getZExtValue();
3151
3152 const Pointer &SubVec = S.Stk.pop<Pointer>();
3153 if (!SubVec.getFieldDesc()->isPrimitiveArray())
3154 return false;
3155
3156 const Pointer &BaseVec = S.Stk.pop<Pointer>();
3157 if (!BaseVec.getFieldDesc()->isPrimitiveArray())
3158 return false;
3159
3160 const Pointer &Dst = S.Stk.peek<Pointer>();
3161
3162 unsigned BaseElements = BaseVec.getNumElems();
3163 unsigned SubElements = SubVec.getNumElems();
3164
3165 assert(SubElements != 0 && BaseElements != 0 &&
3166 (BaseElements % SubElements) == 0);
3167
3168 unsigned NumLanes = BaseElements / SubElements;
3169 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3170 unsigned InsertPos = Lane * SubElements;
3171
3172 PrimType ElemT = BaseVec.getFieldDesc()->getPrimType();
3173
3174 TYPE_SWITCH(ElemT, {
3175 for (unsigned I = 0; I != BaseElements; ++I)
3176 Dst.elem<T>(I) = BaseVec.elem<T>(I);
3177 for (unsigned I = 0; I != SubElements; ++I)
3178 Dst.elem<T>(InsertPos + I) = SubVec.elem<T>(I);
3179 });
3180
3182 return true;
3183}
3184
3186 const CallExpr *Call) {
3187 assert(Call->getNumArgs() == 1);
3188
3189 const Pointer &Source = S.Stk.pop<Pointer>();
3190 const Pointer &Dest = S.Stk.peek<Pointer>();
3191
3192 unsigned SourceLen = Source.getNumElems();
3193 QualType ElemQT = getElemType(Source);
3194 OptPrimType ElemT = S.getContext().classify(ElemQT);
3195 unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
3196
3197 bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
3198 ->castAs<VectorType>()
3199 ->getElementType()
3201
3202 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3203 APSInt MinIndex(ElemBitWidth, DestUnsigned);
3204 APSInt MinVal = Source.elem<T>(0).toAPSInt();
3205
3206 for (unsigned I = 1; I != SourceLen; ++I) {
3207 APSInt Val = Source.elem<T>(I).toAPSInt();
3208 if (MinVal.ugt(Val)) {
3209 MinVal = Val;
3210 MinIndex = I;
3211 }
3212 }
3213
3214 Dest.elem<T>(0) = static_cast<T>(MinVal);
3215 Dest.elem<T>(1) = static_cast<T>(MinIndex);
3216 for (unsigned I = 2; I != SourceLen; ++I) {
3217 Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
3218 }
3219 });
3220 Dest.initializeAllElements();
3221 return true;
3222}
3223
3225 const CallExpr *Call, bool MaskZ) {
3226 assert(Call->getNumArgs() == 5);
3227
3228 APInt U = popToAPSInt(S, Call->getArg(4)); // Lane mask
3229 APInt Imm = popToAPSInt(S, Call->getArg(3)); // Ternary truth table
3230 const Pointer &C = S.Stk.pop<Pointer>();
3231 const Pointer &B = S.Stk.pop<Pointer>();
3232 const Pointer &A = S.Stk.pop<Pointer>();
3233 const Pointer &Dst = S.Stk.peek<Pointer>();
3234
3235 unsigned DstLen = A.getNumElems();
3236 QualType ElemQT = getElemType(A);
3237 OptPrimType ElemT = S.getContext().classify(ElemQT);
3238 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
3239 bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType();
3240
3241 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3242 for (unsigned I = 0; I != DstLen; ++I) {
3243 APInt ALane = A.elem<T>(I).toAPSInt();
3244 APInt BLane = B.elem<T>(I).toAPSInt();
3245 APInt CLane = C.elem<T>(I).toAPSInt();
3246 APInt RLane(LaneWidth, 0);
3247 if (U[I]) { // If lane not masked, compute ternary logic.
3248 for (unsigned Bit = 0; Bit != LaneWidth; ++Bit) {
3249 unsigned ABit = ALane[Bit];
3250 unsigned BBit = BLane[Bit];
3251 unsigned CBit = CLane[Bit];
3252 unsigned Idx = (ABit << 2) | (BBit << 1) | (CBit);
3253 RLane.setBitVal(Bit, Imm[Idx]);
3254 }
3255 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3256 } else if (MaskZ) { // If zero masked, zero the lane.
3257 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3258 } else { // Just masked, put in A lane.
3259 Dst.elem<T>(I) = static_cast<T>(APSInt(ALane, DstUnsigned));
3260 }
3261 }
3262 });
3263 Dst.initializeAllElements();
3264 return true;
3265}
3266
3268 const CallExpr *Call, unsigned ID) {
3269 assert(Call->getNumArgs() == 2);
3270
3271 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3272 const Pointer &Vec = S.Stk.pop<Pointer>();
3273 if (!Vec.getFieldDesc()->isPrimitiveArray())
3274 return false;
3275
3276 unsigned NumElems = Vec.getNumElems();
3277 unsigned Index =
3278 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3279
3280 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3281 // FIXME(#161685): Replace float+int split with a numeric-only type switch
3282 if (ElemT == PT_Float) {
3283 S.Stk.push<Floating>(Vec.elem<Floating>(Index));
3284 return true;
3285 }
3287 APSInt V = Vec.elem<T>(Index).toAPSInt();
3288 pushInteger(S, V, Call->getType());
3289 });
3290
3291 return true;
3292}
3293
3295 const CallExpr *Call, unsigned ID) {
3296 assert(Call->getNumArgs() == 3);
3297
3298 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3299 APSInt ValAPS = popToAPSInt(S, Call->getArg(1));
3300
3301 const Pointer &Base = S.Stk.pop<Pointer>();
3302 if (!Base.getFieldDesc()->isPrimitiveArray())
3303 return false;
3304
3305 const Pointer &Dst = S.Stk.peek<Pointer>();
3306
3307 unsigned NumElems = Base.getNumElems();
3308 unsigned Index =
3309 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3310
3311 PrimType ElemT = Base.getFieldDesc()->getPrimType();
3313 for (unsigned I = 0; I != NumElems; ++I)
3314 Dst.elem<T>(I) = Base.elem<T>(I);
3315 Dst.elem<T>(Index) = static_cast<T>(ValAPS);
3316 });
3317
3319 return true;
3320}
3321
3322static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
3323 bool IsUnsigned) {
3324 switch (Imm & 0x7) {
3325 case 0x00: // _MM_CMPINT_EQ
3326 return (A == B);
3327 case 0x01: // _MM_CMPINT_LT
3328 return IsUnsigned ? A.ult(B) : A.slt(B);
3329 case 0x02: // _MM_CMPINT_LE
3330 return IsUnsigned ? A.ule(B) : A.sle(B);
3331 case 0x03: // _MM_CMPINT_FALSE
3332 return false;
3333 case 0x04: // _MM_CMPINT_NE
3334 return (A != B);
3335 case 0x05: // _MM_CMPINT_NLT
3336 return IsUnsigned ? A.ugt(B) : A.sgt(B);
3337 case 0x06: // _MM_CMPINT_NLE
3338 return IsUnsigned ? A.uge(B) : A.sge(B);
3339 case 0x07: // _MM_CMPINT_TRUE
3340 return true;
3341 default:
3342 llvm_unreachable("Invalid Op");
3343 }
3344}
3345
3347 const CallExpr *Call, unsigned ID,
3348 bool IsUnsigned) {
3349 assert(Call->getNumArgs() == 4);
3350
3351 APSInt Mask = popToAPSInt(S, Call->getArg(3));
3352 APSInt Opcode = popToAPSInt(S, Call->getArg(2));
3353 unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
3354 const Pointer &RHS = S.Stk.pop<Pointer>();
3355 const Pointer &LHS = S.Stk.pop<Pointer>();
3356
3357 assert(LHS.getNumElems() == RHS.getNumElems());
3358
3359 APInt RetMask = APInt::getZero(LHS.getNumElems());
3360 unsigned VectorLen = LHS.getNumElems();
3361 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
3362
3363 for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
3364 APSInt A, B;
3366 A = LHS.elem<T>(ElemNum).toAPSInt();
3367 B = RHS.elem<T>(ElemNum).toAPSInt();
3368 });
3369 RetMask.setBitVal(ElemNum,
3370 Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
3371 }
3372 pushInteger(S, RetMask, Call->getType());
3373 return true;
3374}
3375
3377 const CallExpr *Call) {
3378 assert(Call->getNumArgs() == 1);
3379
3380 QualType Arg0Type = Call->getArg(0)->getType();
3381 const auto *VecT = Arg0Type->castAs<VectorType>();
3382 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3383 unsigned NumElems = VecT->getNumElements();
3384 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3385 const Pointer &Src = S.Stk.pop<Pointer>();
3386 const Pointer &Dst = S.Stk.peek<Pointer>();
3387
3388 for (unsigned I = 0; I != NumElems; ++I) {
3390 APSInt ElemI = Src.elem<T>(I).toAPSInt();
3391 APInt ConflictMask(ElemI.getBitWidth(), 0);
3392 for (unsigned J = 0; J != I; ++J) {
3393 APSInt ElemJ = Src.elem<T>(J).toAPSInt();
3394 ConflictMask.setBitVal(J, ElemI == ElemJ);
3395 }
3396 Dst.elem<T>(I) = static_cast<T>(APSInt(ConflictMask, DestUnsigned));
3397 });
3398 }
3400 return true;
3401}
3402
3404 const CallExpr *Call,
3405 unsigned ID) {
3406 assert(Call->getNumArgs() == 1);
3407
3408 const Pointer &Vec = S.Stk.pop<Pointer>();
3409 unsigned RetWidth = S.getASTContext().getIntWidth(Call->getType());
3410 APInt RetMask(RetWidth, 0);
3411
3412 unsigned VectorLen = Vec.getNumElems();
3413 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3414
3415 for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
3416 APSInt A;
3417 INT_TYPE_SWITCH_NO_BOOL(ElemT, { A = Vec.elem<T>(ElemNum).toAPSInt(); });
3418 unsigned MSB = A[A.getBitWidth() - 1];
3419 RetMask.setBitVal(ElemNum, MSB);
3420 }
3421 pushInteger(S, RetMask, Call->getType());
3422 return true;
3423}
3425 const CallExpr *Call,
3426 bool HasRoundingMask) {
3427 APSInt Rounding, MaskInt;
3428 Pointer Src, B, A;
3429
3430 if (HasRoundingMask) {
3431 assert(Call->getNumArgs() == 5);
3432 Rounding = popToAPSInt(S, Call->getArg(4));
3433 MaskInt = popToAPSInt(S, Call->getArg(3));
3434 Src = S.Stk.pop<Pointer>();
3435 B = S.Stk.pop<Pointer>();
3436 A = S.Stk.pop<Pointer>();
3437 if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B) ||
3438 !CheckLoad(S, OpPC, Src))
3439 return false;
3440 } else {
3441 assert(Call->getNumArgs() == 2);
3442 B = S.Stk.pop<Pointer>();
3443 A = S.Stk.pop<Pointer>();
3444 if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B))
3445 return false;
3446 }
3447
3448 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3449 unsigned NumElems = DstVTy->getNumElements();
3450 const Pointer &Dst = S.Stk.peek<Pointer>();
3451
3452 // Copy all elements except lane 0 (overwritten below) from A to Dst.
3453 for (unsigned I = 1; I != NumElems; ++I)
3454 Dst.elem<Floating>(I) = A.elem<Floating>(I);
3455
3456 // Convert element 0 from double to float, or use Src if masked off.
3457 if (!HasRoundingMask || (MaskInt.getZExtValue() & 0x1)) {
3458 assert(S.getASTContext().FloatTy == DstVTy->getElementType() &&
3459 "cvtsd2ss requires float element type in destination vector");
3460
3461 Floating Conv = S.allocFloat(
3462 S.getASTContext().getFloatTypeSemantics(DstVTy->getElementType()));
3463 APFloat SrcVal = B.elem<Floating>(0).getAPFloat();
3464 if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
3465 return false;
3466 Dst.elem<Floating>(0) = Conv;
3467 } else {
3468 Dst.elem<Floating>(0) = Src.elem<Floating>(0);
3469 }
3470
3472 return true;
3473}
3474
3476 const CallExpr *Call, bool IsMasked,
3477 bool HasRounding) {
3478
3479 APSInt MaskVal;
3480 Pointer PassThrough;
3481 Pointer Src;
3482 APSInt Rounding;
3483
3484 if (IsMasked) {
3485 // Pop in reverse order.
3486 if (HasRounding) {
3487 Rounding = popToAPSInt(S, Call->getArg(3));
3488 MaskVal = popToAPSInt(S, Call->getArg(2));
3489 PassThrough = S.Stk.pop<Pointer>();
3490 Src = S.Stk.pop<Pointer>();
3491 } else {
3492 MaskVal = popToAPSInt(S, Call->getArg(2));
3493 PassThrough = S.Stk.pop<Pointer>();
3494 Src = S.Stk.pop<Pointer>();
3495 }
3496
3497 if (!CheckLoad(S, OpPC, PassThrough))
3498 return false;
3499 } else {
3500 // Pop source only.
3501 Src = S.Stk.pop<Pointer>();
3502 }
3503
3504 if (!CheckLoad(S, OpPC, Src))
3505 return false;
3506
3507 const auto *RetVTy = Call->getType()->castAs<VectorType>();
3508 unsigned RetElems = RetVTy->getNumElements();
3509 unsigned SrcElems = Src.getNumElems();
3510 const Pointer &Dst = S.Stk.peek<Pointer>();
3511
3512 // Initialize destination with passthrough or zeros.
3513 for (unsigned I = 0; I != RetElems; ++I)
3514 if (IsMasked)
3515 Dst.elem<Floating>(I) = PassThrough.elem<Floating>(I);
3516 else
3517 Dst.elem<Floating>(I) = Floating(APFloat(0.0f));
3518
3519 assert(S.getASTContext().FloatTy == RetVTy->getElementType() &&
3520 "cvtpd2ps requires float element type in return vector");
3521
3522 // Convert double to float for enabled elements (only process source elements
3523 // that exist).
3524 for (unsigned I = 0; I != SrcElems; ++I) {
3525 if (IsMasked && !MaskVal[I])
3526 continue;
3527
3528 APFloat SrcVal = Src.elem<Floating>(I).getAPFloat();
3529
3530 Floating Conv = S.allocFloat(
3531 S.getASTContext().getFloatTypeSemantics(RetVTy->getElementType()));
3532 if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
3533 return false;
3534 Dst.elem<Floating>(I) = Conv;
3535 }
3536
3538 return true;
3539}
3540
3542 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3543 llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
3544 GetSourceIndex) {
3545
3546 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
3547
3548 unsigned ShuffleMask = 0;
3549 Pointer A, MaskVector, B;
3550 bool IsVectorMask = false;
3551 bool IsSingleOperand = (Call->getNumArgs() == 2);
3552
3553 if (IsSingleOperand) {
3554 QualType MaskType = Call->getArg(1)->getType();
3555 if (MaskType->isVectorType()) {
3556 IsVectorMask = true;
3557 MaskVector = S.Stk.pop<Pointer>();
3558 A = S.Stk.pop<Pointer>();
3559 B = A;
3560 } else if (MaskType->isIntegerType()) {
3561 ShuffleMask = popToAPSInt(S, Call->getArg(1)).getZExtValue();
3562 A = S.Stk.pop<Pointer>();
3563 B = A;
3564 } else {
3565 return false;
3566 }
3567 } else {
3568 QualType Arg2Type = Call->getArg(2)->getType();
3569 if (Arg2Type->isVectorType()) {
3570 IsVectorMask = true;
3571 B = S.Stk.pop<Pointer>();
3572 MaskVector = S.Stk.pop<Pointer>();
3573 A = S.Stk.pop<Pointer>();
3574 } else if (Arg2Type->isIntegerType()) {
3575 ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
3576 B = S.Stk.pop<Pointer>();
3577 A = S.Stk.pop<Pointer>();
3578 } else {
3579 return false;
3580 }
3581 }
3582
3583 QualType Arg0Type = Call->getArg(0)->getType();
3584 const auto *VecT = Arg0Type->castAs<VectorType>();
3585 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3586 unsigned NumElems = VecT->getNumElements();
3587
3588 const Pointer &Dst = S.Stk.peek<Pointer>();
3589
3590 PrimType MaskElemT = PT_Uint32;
3591 if (IsVectorMask) {
3592 QualType Arg1Type = Call->getArg(1)->getType();
3593 const auto *MaskVecT = Arg1Type->castAs<VectorType>();
3594 QualType MaskElemType = MaskVecT->getElementType();
3595 MaskElemT = *S.getContext().classify(MaskElemType);
3596 }
3597
3598 for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
3599 if (IsVectorMask) {
3600 INT_TYPE_SWITCH(MaskElemT, {
3601 ShuffleMask = static_cast<unsigned>(MaskVector.elem<T>(DstIdx));
3602 });
3603 }
3604
3605 auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
3606
3607 if (SrcIdx < 0) {
3608 // Zero out this element
3609 if (ElemT == PT_Float) {
3610 Dst.elem<Floating>(DstIdx) = Floating(
3611 S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
3612 } else {
3613 INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
3614 }
3615 } else {
3616 const Pointer &Src = (SrcVecIdx == 0) ? A : B;
3617 TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
3618 }
3619 }
3621
3622 return true;
3623}
3624
3626 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3627 llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
3628 llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
3629
3630 assert(Call->getNumArgs() == 2);
3631
3632 const Pointer &Count = S.Stk.pop<Pointer>();
3633 const Pointer &Source = S.Stk.pop<Pointer>();
3634
3635 QualType SourceType = Call->getArg(0)->getType();
3636 QualType CountType = Call->getArg(1)->getType();
3637 assert(SourceType->isVectorType() && CountType->isVectorType());
3638
3639 const auto *SourceVecT = SourceType->castAs<VectorType>();
3640 const auto *CountVecT = CountType->castAs<VectorType>();
3641 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3642 PrimType CountElemT = *S.getContext().classify(CountVecT->getElementType());
3643
3644 const Pointer &Dst = S.Stk.peek<Pointer>();
3645
3646 unsigned DestEltWidth =
3647 S.getASTContext().getTypeSize(SourceVecT->getElementType());
3648 bool IsDestUnsigned = SourceVecT->getElementType()->isUnsignedIntegerType();
3649 unsigned DestLen = SourceVecT->getNumElements();
3650 unsigned CountEltWidth =
3651 S.getASTContext().getTypeSize(CountVecT->getElementType());
3652 unsigned NumBitsInQWord = 64;
3653 unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
3654
3655 uint64_t CountLQWord = 0;
3656 for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
3657 uint64_t Elt = 0;
3658 INT_TYPE_SWITCH(CountElemT,
3659 { Elt = static_cast<uint64_t>(Count.elem<T>(EltIdx)); });
3660 CountLQWord |= (Elt << (EltIdx * CountEltWidth));
3661 }
3662
3663 for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
3664 APSInt Elt;
3665 INT_TYPE_SWITCH(SourceElemT, { Elt = Source.elem<T>(EltIdx).toAPSInt(); });
3666
3667 APInt Result;
3668 if (CountLQWord < DestEltWidth) {
3669 Result = ShiftOp(Elt, CountLQWord);
3670 } else {
3671 Result = OverflowOp(Elt, DestEltWidth);
3672 }
3673 if (IsDestUnsigned) {
3674 INT_TYPE_SWITCH(SourceElemT, {
3675 Dst.elem<T>(EltIdx) = T::from(Result.getZExtValue());
3676 });
3677 } else {
3678 INT_TYPE_SWITCH(SourceElemT, {
3679 Dst.elem<T>(EltIdx) = T::from(Result.getSExtValue());
3680 });
3681 }
3682 }
3683
3685 return true;
3686}
3687
3689 const CallExpr *Call) {
3690
3691 assert(Call->getNumArgs() == 3);
3692
3693 QualType SourceType = Call->getArg(0)->getType();
3694 QualType ShuffleMaskType = Call->getArg(1)->getType();
3695 QualType ZeroMaskType = Call->getArg(2)->getType();
3696 if (!SourceType->isVectorType() || !ShuffleMaskType->isVectorType() ||
3697 !ZeroMaskType->isIntegerType()) {
3698 return false;
3699 }
3700
3701 Pointer Source, ShuffleMask;
3702 APSInt ZeroMask = popToAPSInt(S, Call->getArg(2));
3703 ShuffleMask = S.Stk.pop<Pointer>();
3704 Source = S.Stk.pop<Pointer>();
3705
3706 const auto *SourceVecT = SourceType->castAs<VectorType>();
3707 const auto *ShuffleMaskVecT = ShuffleMaskType->castAs<VectorType>();
3708 assert(SourceVecT->getNumElements() == ShuffleMaskVecT->getNumElements());
3709 assert(ZeroMask.getBitWidth() == SourceVecT->getNumElements());
3710
3711 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3712 PrimType ShuffleMaskElemT =
3713 *S.getContext().classify(ShuffleMaskVecT->getElementType());
3714
3715 unsigned NumBytesInQWord = 8;
3716 unsigned NumBitsInByte = 8;
3717 unsigned NumBytes = SourceVecT->getNumElements();
3718 unsigned NumQWords = NumBytes / NumBytesInQWord;
3719 unsigned RetWidth = ZeroMask.getBitWidth();
3720 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
3721
3722 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3723 APInt SourceQWord(64, 0);
3724 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3725 uint64_t Byte = 0;
3726 INT_TYPE_SWITCH(SourceElemT, {
3727 Byte = static_cast<uint64_t>(
3728 Source.elem<T>(QWordId * NumBytesInQWord + ByteIdx));
3729 });
3730 SourceQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3731 }
3732
3733 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3734 unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
3735 unsigned M = 0;
3736 INT_TYPE_SWITCH(ShuffleMaskElemT, {
3737 M = static_cast<unsigned>(ShuffleMask.elem<T>(SelIdx)) & 0x3F;
3738 });
3739
3740 if (ZeroMask[SelIdx]) {
3741 RetMask.setBitVal(SelIdx, SourceQWord[M]);
3742 }
3743 }
3744 }
3745
3746 pushInteger(S, RetMask, Call->getType());
3747 return true;
3748}
3749
3751 const CallExpr *Call) {
3752 // Arguments are: vector of floats, rounding immediate
3753 assert(Call->getNumArgs() == 2);
3754
3755 APSInt Imm = popToAPSInt(S, Call->getArg(1));
3756 const Pointer &Src = S.Stk.pop<Pointer>();
3757 const Pointer &Dst = S.Stk.peek<Pointer>();
3758
3759 assert(Src.getFieldDesc()->isPrimitiveArray());
3760 assert(Dst.getFieldDesc()->isPrimitiveArray());
3761
3762 const auto *SrcVTy = Call->getArg(0)->getType()->castAs<VectorType>();
3763 unsigned SrcNumElems = SrcVTy->getNumElements();
3764 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3765 unsigned DstNumElems = DstVTy->getNumElements();
3766
3767 const llvm::fltSemantics &HalfSem =
3769
3770 // imm[2] == 1 means use MXCSR rounding mode.
3771 // In that case, we can only evaluate if the conversion is exact.
3772 int ImmVal = Imm.getZExtValue();
3773 bool UseMXCSR = (ImmVal & 4) != 0;
3774 bool IsFPConstrained =
3775 Call->getFPFeaturesInEffect(S.getASTContext().getLangOpts())
3776 .isFPConstrained();
3777
3778 llvm::RoundingMode RM;
3779 if (!UseMXCSR) {
3780 switch (ImmVal & 3) {
3781 case 0:
3782 RM = llvm::RoundingMode::NearestTiesToEven;
3783 break;
3784 case 1:
3785 RM = llvm::RoundingMode::TowardNegative;
3786 break;
3787 case 2:
3788 RM = llvm::RoundingMode::TowardPositive;
3789 break;
3790 case 3:
3791 RM = llvm::RoundingMode::TowardZero;
3792 break;
3793 default:
3794 llvm_unreachable("Invalid immediate rounding mode");
3795 }
3796 } else {
3797 // For MXCSR, we must check for exactness. We can use any rounding mode
3798 // for the trial conversion since the result is the same if it's exact.
3799 RM = llvm::RoundingMode::NearestTiesToEven;
3800 }
3801
3802 QualType DstElemQT = Dst.getFieldDesc()->getElemQualType();
3803 PrimType DstElemT = *S.getContext().classify(DstElemQT);
3804
3805 for (unsigned I = 0; I != SrcNumElems; ++I) {
3806 Floating SrcVal = Src.elem<Floating>(I);
3807 APFloat DstVal = SrcVal.getAPFloat();
3808
3809 bool LostInfo;
3810 APFloat::opStatus St = DstVal.convert(HalfSem, RM, &LostInfo);
3811
3812 if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
3813 S.FFDiag(S.Current->getSource(OpPC),
3814 diag::note_constexpr_dynamic_rounding);
3815 return false;
3816 }
3817
3818 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
3819 // Convert the destination value's bit pattern to an unsigned integer,
3820 // then reconstruct the element using the target type's 'from' method.
3821 uint64_t RawBits = DstVal.bitcastToAPInt().getZExtValue();
3822 Dst.elem<T>(I) = T::from(RawBits);
3823 });
3824 }
3825
3826 // Zero out remaining elements if the destination has more elements
3827 // (e.g., vcvtps2ph converting 4 floats to 8 shorts).
3828 if (DstNumElems > SrcNumElems) {
3829 for (unsigned I = SrcNumElems; I != DstNumElems; ++I) {
3830 INT_TYPE_SWITCH_NO_BOOL(DstElemT, { Dst.elem<T>(I) = T::from(0); });
3831 }
3832 }
3833
3834 Dst.initializeAllElements();
3835 return true;
3836}
3837
3839 const CallExpr *Call) {
3840 assert(Call->getNumArgs() == 2);
3841
3842 QualType ATy = Call->getArg(0)->getType();
3843 QualType BTy = Call->getArg(1)->getType();
3844 if (!ATy->isVectorType() || !BTy->isVectorType()) {
3845 return false;
3846 }
3847
3848 const Pointer &BPtr = S.Stk.pop<Pointer>();
3849 const Pointer &APtr = S.Stk.pop<Pointer>();
3850 const auto *AVecT = ATy->castAs<VectorType>();
3851 assert(AVecT->getNumElements() ==
3852 BTy->castAs<VectorType>()->getNumElements());
3853
3854 PrimType ElemT = *S.getContext().classify(AVecT->getElementType());
3855
3856 unsigned NumBytesInQWord = 8;
3857 unsigned NumBitsInByte = 8;
3858 unsigned NumBytes = AVecT->getNumElements();
3859 unsigned NumQWords = NumBytes / NumBytesInQWord;
3860 const Pointer &Dst = S.Stk.peek<Pointer>();
3861
3862 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3863 APInt BQWord(64, 0);
3864 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3865 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3866 INT_TYPE_SWITCH(ElemT, {
3867 uint64_t Byte = static_cast<uint64_t>(BPtr.elem<T>(Idx));
3868 BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3869 });
3870 }
3871
3872 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3873 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3874 uint64_t Ctrl = 0;
3876 ElemT, { Ctrl = static_cast<uint64_t>(APtr.elem<T>(Idx)) & 0x3F; });
3877
3878 APInt Byte(8, 0);
3879 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
3880 Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]);
3881 }
3882 INT_TYPE_SWITCH(ElemT,
3883 { Dst.elem<T>(Idx) = T::from(Byte.getZExtValue()); });
3884 }
3885 }
3886
3888
3889 return true;
3890}
3891
3893 const CallExpr *Call,
3894 bool Inverse) {
3895 assert(Call->getNumArgs() == 3);
3896 QualType XType = Call->getArg(0)->getType();
3897 QualType AType = Call->getArg(1)->getType();
3898 QualType ImmType = Call->getArg(2)->getType();
3899 if (!XType->isVectorType() || !AType->isVectorType() ||
3900 !ImmType->isIntegerType()) {
3901 return false;
3902 }
3903
3904 Pointer X, A;
3905 APSInt Imm = popToAPSInt(S, Call->getArg(2));
3906 A = S.Stk.pop<Pointer>();
3907 X = S.Stk.pop<Pointer>();
3908
3909 const Pointer &Dst = S.Stk.peek<Pointer>();
3910 const auto *AVecT = AType->castAs<VectorType>();
3911 assert(XType->castAs<VectorType>()->getNumElements() ==
3912 AVecT->getNumElements());
3913 unsigned NumBytesInQWord = 8;
3914 unsigned NumBytes = AVecT->getNumElements();
3915 unsigned NumBitsInQWord = 64;
3916 unsigned NumQWords = NumBytes / NumBytesInQWord;
3917 unsigned NumBitsInByte = 8;
3918 PrimType AElemT = *S.getContext().classify(AVecT->getElementType());
3919
3920 // computing A*X + Imm
3921 for (unsigned QWordIdx = 0; QWordIdx != NumQWords; ++QWordIdx) {
3922 // Extract the QWords from X, A
3923 APInt XQWord(NumBitsInQWord, 0);
3924 APInt AQWord(NumBitsInQWord, 0);
3925 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3926 unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
3927 uint8_t XByte;
3928 uint8_t AByte;
3929 INT_TYPE_SWITCH(AElemT, {
3930 XByte = static_cast<uint8_t>(X.elem<T>(Idx));
3931 AByte = static_cast<uint8_t>(A.elem<T>(Idx));
3932 });
3933
3934 XQWord.insertBits(APInt(NumBitsInByte, XByte), ByteIdx * NumBitsInByte);
3935 AQWord.insertBits(APInt(NumBitsInByte, AByte), ByteIdx * NumBitsInByte);
3936 }
3937
3938 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3939 unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
3940 uint8_t XByte =
3941 XQWord.lshr(ByteIdx * NumBitsInByte).getLoBits(8).getZExtValue();
3942 INT_TYPE_SWITCH(AElemT, {
3943 Dst.elem<T>(Idx) = T::from(GFNIAffine(XByte, AQWord, Imm, Inverse));
3944 });
3945 }
3946 }
3947 Dst.initializeAllElements();
3948 return true;
3949}
3950
3952 const CallExpr *Call) {
3953 assert(Call->getNumArgs() == 2);
3954
3955 QualType AType = Call->getArg(0)->getType();
3956 QualType BType = Call->getArg(1)->getType();
3957 if (!AType->isVectorType() || !BType->isVectorType()) {
3958 return false;
3959 }
3960
3961 Pointer A, B;
3962 B = S.Stk.pop<Pointer>();
3963 A = S.Stk.pop<Pointer>();
3964
3965 const Pointer &Dst = S.Stk.peek<Pointer>();
3966 const auto *AVecT = AType->castAs<VectorType>();
3967 assert(AVecT->getNumElements() ==
3968 BType->castAs<VectorType>()->getNumElements());
3969
3970 PrimType AElemT = *S.getContext().classify(AVecT->getElementType());
3971 unsigned NumBytes = A.getNumElems();
3972
3973 for (unsigned ByteIdx = 0; ByteIdx != NumBytes; ++ByteIdx) {
3974 uint8_t AByte, BByte;
3975 INT_TYPE_SWITCH(AElemT, {
3976 AByte = static_cast<uint8_t>(A.elem<T>(ByteIdx));
3977 BByte = static_cast<uint8_t>(B.elem<T>(ByteIdx));
3978 Dst.elem<T>(ByteIdx) = T::from(GFNIMul(AByte, BByte));
3979 });
3980 }
3981
3982 Dst.initializeAllElements();
3983 return true;
3984}
3985
3987 uint32_t BuiltinID) {
3988 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
3989 return Invalid(S, OpPC);
3990
3991 const InterpFrame *Frame = S.Current;
3992 switch (BuiltinID) {
3993 case Builtin::BI__builtin_is_constant_evaluated:
3995
3996 case Builtin::BI__builtin_assume:
3997 case Builtin::BI__assume:
3998 return interp__builtin_assume(S, OpPC, Frame, Call);
3999
4000 case Builtin::BI__builtin_strcmp:
4001 case Builtin::BIstrcmp:
4002 case Builtin::BI__builtin_strncmp:
4003 case Builtin::BIstrncmp:
4004 case Builtin::BI__builtin_wcsncmp:
4005 case Builtin::BIwcsncmp:
4006 case Builtin::BI__builtin_wcscmp:
4007 case Builtin::BIwcscmp:
4008 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
4009
4010 case Builtin::BI__builtin_strlen:
4011 case Builtin::BIstrlen:
4012 case Builtin::BI__builtin_wcslen:
4013 case Builtin::BIwcslen:
4014 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
4015
4016 case Builtin::BI__builtin_nan:
4017 case Builtin::BI__builtin_nanf:
4018 case Builtin::BI__builtin_nanl:
4019 case Builtin::BI__builtin_nanf16:
4020 case Builtin::BI__builtin_nanf128:
4021 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
4022
4023 case Builtin::BI__builtin_nans:
4024 case Builtin::BI__builtin_nansf:
4025 case Builtin::BI__builtin_nansl:
4026 case Builtin::BI__builtin_nansf16:
4027 case Builtin::BI__builtin_nansf128:
4028 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
4029
4030 case Builtin::BI__builtin_huge_val:
4031 case Builtin::BI__builtin_huge_valf:
4032 case Builtin::BI__builtin_huge_vall:
4033 case Builtin::BI__builtin_huge_valf16:
4034 case Builtin::BI__builtin_huge_valf128:
4035 case Builtin::BI__builtin_inf:
4036 case Builtin::BI__builtin_inff:
4037 case Builtin::BI__builtin_infl:
4038 case Builtin::BI__builtin_inff16:
4039 case Builtin::BI__builtin_inff128:
4040 return interp__builtin_inf(S, OpPC, Frame, Call);
4041
4042 case Builtin::BI__builtin_copysign:
4043 case Builtin::BI__builtin_copysignf:
4044 case Builtin::BI__builtin_copysignl:
4045 case Builtin::BI__builtin_copysignf128:
4046 return interp__builtin_copysign(S, OpPC, Frame);
4047
4048 case Builtin::BI__builtin_fmin:
4049 case Builtin::BI__builtin_fminf:
4050 case Builtin::BI__builtin_fminl:
4051 case Builtin::BI__builtin_fminf16:
4052 case Builtin::BI__builtin_fminf128:
4053 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
4054
4055 case Builtin::BI__builtin_fminimum_num:
4056 case Builtin::BI__builtin_fminimum_numf:
4057 case Builtin::BI__builtin_fminimum_numl:
4058 case Builtin::BI__builtin_fminimum_numf16:
4059 case Builtin::BI__builtin_fminimum_numf128:
4060 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
4061
4062 case Builtin::BI__builtin_fmax:
4063 case Builtin::BI__builtin_fmaxf:
4064 case Builtin::BI__builtin_fmaxl:
4065 case Builtin::BI__builtin_fmaxf16:
4066 case Builtin::BI__builtin_fmaxf128:
4067 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
4068
4069 case Builtin::BI__builtin_fmaximum_num:
4070 case Builtin::BI__builtin_fmaximum_numf:
4071 case Builtin::BI__builtin_fmaximum_numl:
4072 case Builtin::BI__builtin_fmaximum_numf16:
4073 case Builtin::BI__builtin_fmaximum_numf128:
4074 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
4075
4076 case Builtin::BI__builtin_isnan:
4077 return interp__builtin_isnan(S, OpPC, Frame, Call);
4078
4079 case Builtin::BI__builtin_issignaling:
4080 return interp__builtin_issignaling(S, OpPC, Frame, Call);
4081
4082 case Builtin::BI__builtin_isinf:
4083 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
4084
4085 case Builtin::BI__builtin_isinf_sign:
4086 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
4087
4088 case Builtin::BI__builtin_isfinite:
4089 return interp__builtin_isfinite(S, OpPC, Frame, Call);
4090
4091 case Builtin::BI__builtin_isnormal:
4092 return interp__builtin_isnormal(S, OpPC, Frame, Call);
4093
4094 case Builtin::BI__builtin_issubnormal:
4095 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
4096
4097 case Builtin::BI__builtin_iszero:
4098 return interp__builtin_iszero(S, OpPC, Frame, Call);
4099
4100 case Builtin::BI__builtin_signbit:
4101 case Builtin::BI__builtin_signbitf:
4102 case Builtin::BI__builtin_signbitl:
4103 return interp__builtin_signbit(S, OpPC, Frame, Call);
4104
4105 case Builtin::BI__builtin_isgreater:
4106 case Builtin::BI__builtin_isgreaterequal:
4107 case Builtin::BI__builtin_isless:
4108 case Builtin::BI__builtin_islessequal:
4109 case Builtin::BI__builtin_islessgreater:
4110 case Builtin::BI__builtin_isunordered:
4111 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
4112
4113 case Builtin::BI__builtin_isfpclass:
4114 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
4115
4116 case Builtin::BI__builtin_fpclassify:
4117 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
4118
4119 case Builtin::BI__builtin_fabs:
4120 case Builtin::BI__builtin_fabsf:
4121 case Builtin::BI__builtin_fabsl:
4122 case Builtin::BI__builtin_fabsf128:
4123 return interp__builtin_fabs(S, OpPC, Frame);
4124
4125 case Builtin::BI__builtin_abs:
4126 case Builtin::BI__builtin_labs:
4127 case Builtin::BI__builtin_llabs:
4128 return interp__builtin_abs(S, OpPC, Frame, Call);
4129
4130 case Builtin::BI__builtin_popcount:
4131 case Builtin::BI__builtin_popcountl:
4132 case Builtin::BI__builtin_popcountll:
4133 case Builtin::BI__builtin_popcountg:
4134 case Builtin::BI__popcnt16: // Microsoft variants of popcount
4135 case Builtin::BI__popcnt:
4136 case Builtin::BI__popcnt64:
4137 return interp__builtin_popcount(S, OpPC, Frame, Call);
4138
4139 case Builtin::BI__builtin_parity:
4140 case Builtin::BI__builtin_parityl:
4141 case Builtin::BI__builtin_parityll:
4143 S, OpPC, Call, [](const APSInt &Val) {
4144 return APInt(Val.getBitWidth(), Val.popcount() % 2);
4145 });
4146 case Builtin::BI__builtin_clrsb:
4147 case Builtin::BI__builtin_clrsbl:
4148 case Builtin::BI__builtin_clrsbll:
4150 S, OpPC, Call, [](const APSInt &Val) {
4151 return APInt(Val.getBitWidth(),
4152 Val.getBitWidth() - Val.getSignificantBits());
4153 });
4154 case Builtin::BI__builtin_bitreverse8:
4155 case Builtin::BI__builtin_bitreverse16:
4156 case Builtin::BI__builtin_bitreverse32:
4157 case Builtin::BI__builtin_bitreverse64:
4159 S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
4160
4161 case Builtin::BI__builtin_classify_type:
4162 return interp__builtin_classify_type(S, OpPC, Frame, Call);
4163
4164 case Builtin::BI__builtin_expect:
4165 case Builtin::BI__builtin_expect_with_probability:
4166 return interp__builtin_expect(S, OpPC, Frame, Call);
4167
4168 case Builtin::BI__builtin_rotateleft8:
4169 case Builtin::BI__builtin_rotateleft16:
4170 case Builtin::BI__builtin_rotateleft32:
4171 case Builtin::BI__builtin_rotateleft64:
4172 case Builtin::BI_rotl8: // Microsoft variants of rotate left
4173 case Builtin::BI_rotl16:
4174 case Builtin::BI_rotl:
4175 case Builtin::BI_lrotl:
4176 case Builtin::BI_rotl64:
4178 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
4179 return Value.rotl(Amount);
4180 });
4181
4182 case Builtin::BI__builtin_rotateright8:
4183 case Builtin::BI__builtin_rotateright16:
4184 case Builtin::BI__builtin_rotateright32:
4185 case Builtin::BI__builtin_rotateright64:
4186 case Builtin::BI_rotr8: // Microsoft variants of rotate right
4187 case Builtin::BI_rotr16:
4188 case Builtin::BI_rotr:
4189 case Builtin::BI_lrotr:
4190 case Builtin::BI_rotr64:
4192 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
4193 return Value.rotr(Amount);
4194 });
4195
4196 case Builtin::BI__builtin_ffs:
4197 case Builtin::BI__builtin_ffsl:
4198 case Builtin::BI__builtin_ffsll:
4200 S, OpPC, Call, [](const APSInt &Val) {
4201 return APInt(Val.getBitWidth(),
4202 Val.isZero() ? 0u : Val.countTrailingZeros() + 1u);
4203 });
4204
4205 case Builtin::BIaddressof:
4206 case Builtin::BI__addressof:
4207 case Builtin::BI__builtin_addressof:
4208 assert(isNoopBuiltin(BuiltinID));
4209 return interp__builtin_addressof(S, OpPC, Frame, Call);
4210
4211 case Builtin::BIas_const:
4212 case Builtin::BIforward:
4213 case Builtin::BIforward_like:
4214 case Builtin::BImove:
4215 case Builtin::BImove_if_noexcept:
4216 assert(isNoopBuiltin(BuiltinID));
4217 return interp__builtin_move(S, OpPC, Frame, Call);
4218
4219 case Builtin::BI__builtin_eh_return_data_regno:
4221
4222 case Builtin::BI__builtin_launder:
4223 assert(isNoopBuiltin(BuiltinID));
4224 return true;
4225
4226 case Builtin::BI__builtin_add_overflow:
4227 case Builtin::BI__builtin_sub_overflow:
4228 case Builtin::BI__builtin_mul_overflow:
4229 case Builtin::BI__builtin_sadd_overflow:
4230 case Builtin::BI__builtin_uadd_overflow:
4231 case Builtin::BI__builtin_uaddl_overflow:
4232 case Builtin::BI__builtin_uaddll_overflow:
4233 case Builtin::BI__builtin_usub_overflow:
4234 case Builtin::BI__builtin_usubl_overflow:
4235 case Builtin::BI__builtin_usubll_overflow:
4236 case Builtin::BI__builtin_umul_overflow:
4237 case Builtin::BI__builtin_umull_overflow:
4238 case Builtin::BI__builtin_umulll_overflow:
4239 case Builtin::BI__builtin_saddl_overflow:
4240 case Builtin::BI__builtin_saddll_overflow:
4241 case Builtin::BI__builtin_ssub_overflow:
4242 case Builtin::BI__builtin_ssubl_overflow:
4243 case Builtin::BI__builtin_ssubll_overflow:
4244 case Builtin::BI__builtin_smul_overflow:
4245 case Builtin::BI__builtin_smull_overflow:
4246 case Builtin::BI__builtin_smulll_overflow:
4247 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
4248
4249 case Builtin::BI__builtin_addcb:
4250 case Builtin::BI__builtin_addcs:
4251 case Builtin::BI__builtin_addc:
4252 case Builtin::BI__builtin_addcl:
4253 case Builtin::BI__builtin_addcll:
4254 case Builtin::BI__builtin_subcb:
4255 case Builtin::BI__builtin_subcs:
4256 case Builtin::BI__builtin_subc:
4257 case Builtin::BI__builtin_subcl:
4258 case Builtin::BI__builtin_subcll:
4259 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
4260
4261 case Builtin::BI__builtin_clz:
4262 case Builtin::BI__builtin_clzl:
4263 case Builtin::BI__builtin_clzll:
4264 case Builtin::BI__builtin_clzs:
4265 case Builtin::BI__builtin_clzg:
4266 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
4267 case Builtin::BI__lzcnt:
4268 case Builtin::BI__lzcnt64:
4269 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
4270
4271 case Builtin::BI__builtin_ctz:
4272 case Builtin::BI__builtin_ctzl:
4273 case Builtin::BI__builtin_ctzll:
4274 case Builtin::BI__builtin_ctzs:
4275 case Builtin::BI__builtin_ctzg:
4276 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
4277
4278 case Builtin::BI__builtin_elementwise_clzg:
4279 case Builtin::BI__builtin_elementwise_ctzg:
4281 BuiltinID);
4282 case Builtin::BI__builtin_bswapg:
4283 case Builtin::BI__builtin_bswap16:
4284 case Builtin::BI__builtin_bswap32:
4285 case Builtin::BI__builtin_bswap64:
4286 return interp__builtin_bswap(S, OpPC, Frame, Call);
4287
4288 case Builtin::BI__atomic_always_lock_free:
4289 case Builtin::BI__atomic_is_lock_free:
4290 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
4291
4292 case Builtin::BI__c11_atomic_is_lock_free:
4294
4295 case Builtin::BI__builtin_complex:
4296 return interp__builtin_complex(S, OpPC, Frame, Call);
4297
4298 case Builtin::BI__builtin_is_aligned:
4299 case Builtin::BI__builtin_align_up:
4300 case Builtin::BI__builtin_align_down:
4301 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
4302
4303 case Builtin::BI__builtin_assume_aligned:
4304 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
4305
4306 case clang::X86::BI__builtin_ia32_bextr_u32:
4307 case clang::X86::BI__builtin_ia32_bextr_u64:
4308 case clang::X86::BI__builtin_ia32_bextri_u32:
4309 case clang::X86::BI__builtin_ia32_bextri_u64:
4311 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4312 unsigned BitWidth = Val.getBitWidth();
4313 uint64_t Shift = Idx.extractBitsAsZExtValue(8, 0);
4314 uint64_t Length = Idx.extractBitsAsZExtValue(8, 8);
4315 if (Length > BitWidth) {
4316 Length = BitWidth;
4317 }
4318
4319 // Handle out of bounds cases.
4320 if (Length == 0 || Shift >= BitWidth)
4321 return APInt(BitWidth, 0);
4322
4323 uint64_t Result = Val.getZExtValue() >> Shift;
4324 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
4325 return APInt(BitWidth, Result);
4326 });
4327
4328 case clang::X86::BI__builtin_ia32_bzhi_si:
4329 case clang::X86::BI__builtin_ia32_bzhi_di:
4331 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4332 unsigned BitWidth = Val.getBitWidth();
4333 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
4334 APSInt Result = Val;
4335
4336 if (Index < BitWidth)
4337 Result.clearHighBits(BitWidth - Index);
4338
4339 return Result;
4340 });
4341
4342 case clang::X86::BI__builtin_ia32_ktestcqi:
4343 case clang::X86::BI__builtin_ia32_ktestchi:
4344 case clang::X86::BI__builtin_ia32_ktestcsi:
4345 case clang::X86::BI__builtin_ia32_ktestcdi:
4347 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4348 return APInt(sizeof(unsigned char) * 8, (~A & B) == 0);
4349 });
4350
4351 case clang::X86::BI__builtin_ia32_ktestzqi:
4352 case clang::X86::BI__builtin_ia32_ktestzhi:
4353 case clang::X86::BI__builtin_ia32_ktestzsi:
4354 case clang::X86::BI__builtin_ia32_ktestzdi:
4356 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4357 return APInt(sizeof(unsigned char) * 8, (A & B) == 0);
4358 });
4359
4360 case clang::X86::BI__builtin_ia32_kortestcqi:
4361 case clang::X86::BI__builtin_ia32_kortestchi:
4362 case clang::X86::BI__builtin_ia32_kortestcsi:
4363 case clang::X86::BI__builtin_ia32_kortestcdi:
4365 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4366 return APInt(sizeof(unsigned char) * 8, ~(A | B) == 0);
4367 });
4368
4369 case clang::X86::BI__builtin_ia32_kortestzqi:
4370 case clang::X86::BI__builtin_ia32_kortestzhi:
4371 case clang::X86::BI__builtin_ia32_kortestzsi:
4372 case clang::X86::BI__builtin_ia32_kortestzdi:
4374 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4375 return APInt(sizeof(unsigned char) * 8, (A | B) == 0);
4376 });
4377
4378 case clang::X86::BI__builtin_ia32_kshiftliqi:
4379 case clang::X86::BI__builtin_ia32_kshiftlihi:
4380 case clang::X86::BI__builtin_ia32_kshiftlisi:
4381 case clang::X86::BI__builtin_ia32_kshiftlidi:
4383 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4384 unsigned Amt = RHS.getZExtValue() & 0xFF;
4385 if (Amt >= LHS.getBitWidth())
4386 return APInt::getZero(LHS.getBitWidth());
4387 return LHS.shl(Amt);
4388 });
4389
4390 case clang::X86::BI__builtin_ia32_kshiftriqi:
4391 case clang::X86::BI__builtin_ia32_kshiftrihi:
4392 case clang::X86::BI__builtin_ia32_kshiftrisi:
4393 case clang::X86::BI__builtin_ia32_kshiftridi:
4395 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4396 unsigned Amt = RHS.getZExtValue() & 0xFF;
4397 if (Amt >= LHS.getBitWidth())
4398 return APInt::getZero(LHS.getBitWidth());
4399 return LHS.lshr(Amt);
4400 });
4401
4402 case clang::X86::BI__builtin_ia32_lzcnt_u16:
4403 case clang::X86::BI__builtin_ia32_lzcnt_u32:
4404 case clang::X86::BI__builtin_ia32_lzcnt_u64:
4406 S, OpPC, Call, [](const APSInt &Src) {
4407 return APInt(Src.getBitWidth(), Src.countLeadingZeros());
4408 });
4409
4410 case clang::X86::BI__builtin_ia32_tzcnt_u16:
4411 case clang::X86::BI__builtin_ia32_tzcnt_u32:
4412 case clang::X86::BI__builtin_ia32_tzcnt_u64:
4414 S, OpPC, Call, [](const APSInt &Src) {
4415 return APInt(Src.getBitWidth(), Src.countTrailingZeros());
4416 });
4417
4418 case clang::X86::BI__builtin_ia32_pdep_si:
4419 case clang::X86::BI__builtin_ia32_pdep_di:
4421 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4422 unsigned BitWidth = Val.getBitWidth();
4423 APInt Result = APInt::getZero(BitWidth);
4424
4425 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4426 if (Mask[I])
4427 Result.setBitVal(I, Val[P++]);
4428 }
4429
4430 return Result;
4431 });
4432
4433 case clang::X86::BI__builtin_ia32_pext_si:
4434 case clang::X86::BI__builtin_ia32_pext_di:
4436 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4437 unsigned BitWidth = Val.getBitWidth();
4438 APInt Result = APInt::getZero(BitWidth);
4439
4440 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4441 if (Mask[I])
4442 Result.setBitVal(P++, Val[I]);
4443 }
4444
4445 return Result;
4446 });
4447
4448 case clang::X86::BI__builtin_ia32_addcarryx_u32:
4449 case clang::X86::BI__builtin_ia32_addcarryx_u64:
4450 case clang::X86::BI__builtin_ia32_subborrow_u32:
4451 case clang::X86::BI__builtin_ia32_subborrow_u64:
4453 BuiltinID);
4454
4455 case Builtin::BI__builtin_os_log_format_buffer_size:
4457
4458 case Builtin::BI__builtin_ptrauth_string_discriminator:
4460
4461 case Builtin::BI__builtin_infer_alloc_token:
4463
4464 case Builtin::BI__noop:
4465 pushInteger(S, 0, Call->getType());
4466 return true;
4467
4468 case Builtin::BI__builtin_operator_new:
4469 return interp__builtin_operator_new(S, OpPC, Frame, Call);
4470
4471 case Builtin::BI__builtin_operator_delete:
4472 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
4473
4474 case Builtin::BI__arithmetic_fence:
4476
4477 case Builtin::BI__builtin_reduce_add:
4478 case Builtin::BI__builtin_reduce_mul:
4479 case Builtin::BI__builtin_reduce_and:
4480 case Builtin::BI__builtin_reduce_or:
4481 case Builtin::BI__builtin_reduce_xor:
4482 case Builtin::BI__builtin_reduce_min:
4483 case Builtin::BI__builtin_reduce_max:
4484 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
4485
4486 case Builtin::BI__builtin_elementwise_popcount:
4488 S, OpPC, Call, [](const APSInt &Src) {
4489 return APInt(Src.getBitWidth(), Src.popcount());
4490 });
4491 case Builtin::BI__builtin_elementwise_bitreverse:
4493 S, OpPC, Call, [](const APSInt &Src) { return Src.reverseBits(); });
4494
4495 case Builtin::BI__builtin_elementwise_abs:
4496 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
4497
4498 case Builtin::BI__builtin_memcpy:
4499 case Builtin::BImemcpy:
4500 case Builtin::BI__builtin_wmemcpy:
4501 case Builtin::BIwmemcpy:
4502 case Builtin::BI__builtin_memmove:
4503 case Builtin::BImemmove:
4504 case Builtin::BI__builtin_wmemmove:
4505 case Builtin::BIwmemmove:
4506 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
4507
4508 case Builtin::BI__builtin_memcmp:
4509 case Builtin::BImemcmp:
4510 case Builtin::BI__builtin_bcmp:
4511 case Builtin::BIbcmp:
4512 case Builtin::BI__builtin_wmemcmp:
4513 case Builtin::BIwmemcmp:
4514 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
4515
4516 case Builtin::BImemchr:
4517 case Builtin::BI__builtin_memchr:
4518 case Builtin::BIstrchr:
4519 case Builtin::BI__builtin_strchr:
4520 case Builtin::BIwmemchr:
4521 case Builtin::BI__builtin_wmemchr:
4522 case Builtin::BIwcschr:
4523 case Builtin::BI__builtin_wcschr:
4524 case Builtin::BI__builtin_char_memchr:
4525 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
4526
4527 case Builtin::BI__builtin_object_size:
4528 case Builtin::BI__builtin_dynamic_object_size:
4529 return interp__builtin_object_size(S, OpPC, Frame, Call);
4530
4531 case Builtin::BI__builtin_is_within_lifetime:
4533
4534 case Builtin::BI__builtin_elementwise_add_sat:
4536 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4537 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
4538 });
4539
4540 case Builtin::BI__builtin_elementwise_sub_sat:
4542 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4543 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
4544 });
4545 case X86::BI__builtin_ia32_extract128i256:
4546 case X86::BI__builtin_ia32_vextractf128_pd256:
4547 case X86::BI__builtin_ia32_vextractf128_ps256:
4548 case X86::BI__builtin_ia32_vextractf128_si256:
4549 return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
4550
4551 case X86::BI__builtin_ia32_extractf32x4_256_mask:
4552 case X86::BI__builtin_ia32_extractf32x4_mask:
4553 case X86::BI__builtin_ia32_extractf32x8_mask:
4554 case X86::BI__builtin_ia32_extractf64x2_256_mask:
4555 case X86::BI__builtin_ia32_extractf64x2_512_mask:
4556 case X86::BI__builtin_ia32_extractf64x4_mask:
4557 case X86::BI__builtin_ia32_extracti32x4_256_mask:
4558 case X86::BI__builtin_ia32_extracti32x4_mask:
4559 case X86::BI__builtin_ia32_extracti32x8_mask:
4560 case X86::BI__builtin_ia32_extracti64x2_256_mask:
4561 case X86::BI__builtin_ia32_extracti64x2_512_mask:
4562 case X86::BI__builtin_ia32_extracti64x4_mask:
4563 return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
4564
4565 case clang::X86::BI__builtin_ia32_pmulhrsw128:
4566 case clang::X86::BI__builtin_ia32_pmulhrsw256:
4567 case clang::X86::BI__builtin_ia32_pmulhrsw512:
4569 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4570 return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
4571 .extractBits(16, 1);
4572 });
4573
4574 case clang::X86::BI__builtin_ia32_movmskps:
4575 case clang::X86::BI__builtin_ia32_movmskpd:
4576 case clang::X86::BI__builtin_ia32_pmovmskb128:
4577 case clang::X86::BI__builtin_ia32_pmovmskb256:
4578 case clang::X86::BI__builtin_ia32_movmskps256:
4579 case clang::X86::BI__builtin_ia32_movmskpd256: {
4580 return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
4581 }
4582
4583 case X86::BI__builtin_ia32_psignb128:
4584 case X86::BI__builtin_ia32_psignb256:
4585 case X86::BI__builtin_ia32_psignw128:
4586 case X86::BI__builtin_ia32_psignw256:
4587 case X86::BI__builtin_ia32_psignd128:
4588 case X86::BI__builtin_ia32_psignd256:
4590 S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
4591 if (BElem.isZero())
4592 return APInt::getZero(AElem.getBitWidth());
4593 if (BElem.isNegative())
4594 return -AElem;
4595 return AElem;
4596 });
4597
4598 case clang::X86::BI__builtin_ia32_pavgb128:
4599 case clang::X86::BI__builtin_ia32_pavgw128:
4600 case clang::X86::BI__builtin_ia32_pavgb256:
4601 case clang::X86::BI__builtin_ia32_pavgw256:
4602 case clang::X86::BI__builtin_ia32_pavgb512:
4603 case clang::X86::BI__builtin_ia32_pavgw512:
4605 llvm::APIntOps::avgCeilU);
4606
4607 case clang::X86::BI__builtin_ia32_pmaddubsw128:
4608 case clang::X86::BI__builtin_ia32_pmaddubsw256:
4609 case clang::X86::BI__builtin_ia32_pmaddubsw512:
4611 S, OpPC, Call,
4612 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4613 const APSInt &HiRHS) {
4614 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4615 return (LoLHS.zext(BitWidth) * LoRHS.sext(BitWidth))
4616 .sadd_sat((HiLHS.zext(BitWidth) * HiRHS.sext(BitWidth)));
4617 });
4618
4619 case clang::X86::BI__builtin_ia32_pmaddwd128:
4620 case clang::X86::BI__builtin_ia32_pmaddwd256:
4621 case clang::X86::BI__builtin_ia32_pmaddwd512:
4623 S, OpPC, Call,
4624 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4625 const APSInt &HiRHS) {
4626 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4627 return (LoLHS.sext(BitWidth) * LoRHS.sext(BitWidth)) +
4628 (HiLHS.sext(BitWidth) * HiRHS.sext(BitWidth));
4629 });
4630
4631 case clang::X86::BI__builtin_ia32_pmulhuw128:
4632 case clang::X86::BI__builtin_ia32_pmulhuw256:
4633 case clang::X86::BI__builtin_ia32_pmulhuw512:
4635 llvm::APIntOps::mulhu);
4636
4637 case clang::X86::BI__builtin_ia32_pmulhw128:
4638 case clang::X86::BI__builtin_ia32_pmulhw256:
4639 case clang::X86::BI__builtin_ia32_pmulhw512:
4641 llvm::APIntOps::mulhs);
4642
4643 case clang::X86::BI__builtin_ia32_psllv2di:
4644 case clang::X86::BI__builtin_ia32_psllv4di:
4645 case clang::X86::BI__builtin_ia32_psllv4si:
4646 case clang::X86::BI__builtin_ia32_psllv8di:
4647 case clang::X86::BI__builtin_ia32_psllv8hi:
4648 case clang::X86::BI__builtin_ia32_psllv8si:
4649 case clang::X86::BI__builtin_ia32_psllv16hi:
4650 case clang::X86::BI__builtin_ia32_psllv16si:
4651 case clang::X86::BI__builtin_ia32_psllv32hi:
4652 case clang::X86::BI__builtin_ia32_psllwi128:
4653 case clang::X86::BI__builtin_ia32_psllwi256:
4654 case clang::X86::BI__builtin_ia32_psllwi512:
4655 case clang::X86::BI__builtin_ia32_pslldi128:
4656 case clang::X86::BI__builtin_ia32_pslldi256:
4657 case clang::X86::BI__builtin_ia32_pslldi512:
4658 case clang::X86::BI__builtin_ia32_psllqi128:
4659 case clang::X86::BI__builtin_ia32_psllqi256:
4660 case clang::X86::BI__builtin_ia32_psllqi512:
4662 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4663 if (RHS.uge(LHS.getBitWidth())) {
4664 return APInt::getZero(LHS.getBitWidth());
4665 }
4666 return LHS.shl(RHS.getZExtValue());
4667 });
4668
4669 case clang::X86::BI__builtin_ia32_psrav4si:
4670 case clang::X86::BI__builtin_ia32_psrav8di:
4671 case clang::X86::BI__builtin_ia32_psrav8hi:
4672 case clang::X86::BI__builtin_ia32_psrav8si:
4673 case clang::X86::BI__builtin_ia32_psrav16hi:
4674 case clang::X86::BI__builtin_ia32_psrav16si:
4675 case clang::X86::BI__builtin_ia32_psrav32hi:
4676 case clang::X86::BI__builtin_ia32_psravq128:
4677 case clang::X86::BI__builtin_ia32_psravq256:
4678 case clang::X86::BI__builtin_ia32_psrawi128:
4679 case clang::X86::BI__builtin_ia32_psrawi256:
4680 case clang::X86::BI__builtin_ia32_psrawi512:
4681 case clang::X86::BI__builtin_ia32_psradi128:
4682 case clang::X86::BI__builtin_ia32_psradi256:
4683 case clang::X86::BI__builtin_ia32_psradi512:
4684 case clang::X86::BI__builtin_ia32_psraqi128:
4685 case clang::X86::BI__builtin_ia32_psraqi256:
4686 case clang::X86::BI__builtin_ia32_psraqi512:
4688 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4689 if (RHS.uge(LHS.getBitWidth())) {
4690 return LHS.ashr(LHS.getBitWidth() - 1);
4691 }
4692 return LHS.ashr(RHS.getZExtValue());
4693 });
4694
4695 case clang::X86::BI__builtin_ia32_psrlv2di:
4696 case clang::X86::BI__builtin_ia32_psrlv4di:
4697 case clang::X86::BI__builtin_ia32_psrlv4si:
4698 case clang::X86::BI__builtin_ia32_psrlv8di:
4699 case clang::X86::BI__builtin_ia32_psrlv8hi:
4700 case clang::X86::BI__builtin_ia32_psrlv8si:
4701 case clang::X86::BI__builtin_ia32_psrlv16hi:
4702 case clang::X86::BI__builtin_ia32_psrlv16si:
4703 case clang::X86::BI__builtin_ia32_psrlv32hi:
4704 case clang::X86::BI__builtin_ia32_psrlwi128:
4705 case clang::X86::BI__builtin_ia32_psrlwi256:
4706 case clang::X86::BI__builtin_ia32_psrlwi512:
4707 case clang::X86::BI__builtin_ia32_psrldi128:
4708 case clang::X86::BI__builtin_ia32_psrldi256:
4709 case clang::X86::BI__builtin_ia32_psrldi512:
4710 case clang::X86::BI__builtin_ia32_psrlqi128:
4711 case clang::X86::BI__builtin_ia32_psrlqi256:
4712 case clang::X86::BI__builtin_ia32_psrlqi512:
4714 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4715 if (RHS.uge(LHS.getBitWidth())) {
4716 return APInt::getZero(LHS.getBitWidth());
4717 }
4718 return LHS.lshr(RHS.getZExtValue());
4719 });
4720 case clang::X86::BI__builtin_ia32_packsswb128:
4721 case clang::X86::BI__builtin_ia32_packsswb256:
4722 case clang::X86::BI__builtin_ia32_packsswb512:
4723 case clang::X86::BI__builtin_ia32_packssdw128:
4724 case clang::X86::BI__builtin_ia32_packssdw256:
4725 case clang::X86::BI__builtin_ia32_packssdw512:
4726 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4727 return APInt(Src).truncSSat(Src.getBitWidth() / 2);
4728 });
4729 case clang::X86::BI__builtin_ia32_packusdw128:
4730 case clang::X86::BI__builtin_ia32_packusdw256:
4731 case clang::X86::BI__builtin_ia32_packusdw512:
4732 case clang::X86::BI__builtin_ia32_packuswb128:
4733 case clang::X86::BI__builtin_ia32_packuswb256:
4734 case clang::X86::BI__builtin_ia32_packuswb512:
4735 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4736 unsigned DstBits = Src.getBitWidth() / 2;
4737 if (Src.isNegative())
4738 return APInt::getZero(DstBits);
4739 if (Src.isIntN(DstBits))
4740 return APInt(Src).trunc(DstBits);
4741 return APInt::getAllOnes(DstBits);
4742 });
4743
4744 case clang::X86::BI__builtin_ia32_selectss_128:
4745 case clang::X86::BI__builtin_ia32_selectsd_128:
4746 case clang::X86::BI__builtin_ia32_selectsh_128:
4747 case clang::X86::BI__builtin_ia32_selectsbf_128:
4749 case clang::X86::BI__builtin_ia32_vprotbi:
4750 case clang::X86::BI__builtin_ia32_vprotdi:
4751 case clang::X86::BI__builtin_ia32_vprotqi:
4752 case clang::X86::BI__builtin_ia32_vprotwi:
4753 case clang::X86::BI__builtin_ia32_prold128:
4754 case clang::X86::BI__builtin_ia32_prold256:
4755 case clang::X86::BI__builtin_ia32_prold512:
4756 case clang::X86::BI__builtin_ia32_prolq128:
4757 case clang::X86::BI__builtin_ia32_prolq256:
4758 case clang::X86::BI__builtin_ia32_prolq512:
4760 S, OpPC, Call,
4761 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
4762
4763 case clang::X86::BI__builtin_ia32_prord128:
4764 case clang::X86::BI__builtin_ia32_prord256:
4765 case clang::X86::BI__builtin_ia32_prord512:
4766 case clang::X86::BI__builtin_ia32_prorq128:
4767 case clang::X86::BI__builtin_ia32_prorq256:
4768 case clang::X86::BI__builtin_ia32_prorq512:
4770 S, OpPC, Call,
4771 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
4772
4773 case Builtin::BI__builtin_elementwise_max:
4774 case Builtin::BI__builtin_elementwise_min:
4775 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
4776
4777 case clang::X86::BI__builtin_ia32_phaddw128:
4778 case clang::X86::BI__builtin_ia32_phaddw256:
4779 case clang::X86::BI__builtin_ia32_phaddd128:
4780 case clang::X86::BI__builtin_ia32_phaddd256:
4782 S, OpPC, Call,
4783 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
4784 case clang::X86::BI__builtin_ia32_phaddsw128:
4785 case clang::X86::BI__builtin_ia32_phaddsw256:
4787 S, OpPC, Call,
4788 [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); });
4789 case clang::X86::BI__builtin_ia32_phsubw128:
4790 case clang::X86::BI__builtin_ia32_phsubw256:
4791 case clang::X86::BI__builtin_ia32_phsubd128:
4792 case clang::X86::BI__builtin_ia32_phsubd256:
4794 S, OpPC, Call,
4795 [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; });
4796 case clang::X86::BI__builtin_ia32_phsubsw128:
4797 case clang::X86::BI__builtin_ia32_phsubsw256:
4799 S, OpPC, Call,
4800 [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); });
4801 case clang::X86::BI__builtin_ia32_haddpd:
4802 case clang::X86::BI__builtin_ia32_haddps:
4803 case clang::X86::BI__builtin_ia32_haddpd256:
4804 case clang::X86::BI__builtin_ia32_haddps256:
4806 S, OpPC, Call,
4807 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4808 APFloat F = LHS;
4809 F.add(RHS, RM);
4810 return F;
4811 });
4812 case clang::X86::BI__builtin_ia32_hsubpd:
4813 case clang::X86::BI__builtin_ia32_hsubps:
4814 case clang::X86::BI__builtin_ia32_hsubpd256:
4815 case clang::X86::BI__builtin_ia32_hsubps256:
4817 S, OpPC, Call,
4818 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4819 APFloat F = LHS;
4820 F.subtract(RHS, RM);
4821 return F;
4822 });
4823 case clang::X86::BI__builtin_ia32_addsubpd:
4824 case clang::X86::BI__builtin_ia32_addsubps:
4825 case clang::X86::BI__builtin_ia32_addsubpd256:
4826 case clang::X86::BI__builtin_ia32_addsubps256:
4827 return interp__builtin_ia32_addsub(S, OpPC, Call);
4828
4829 case clang::X86::BI__builtin_ia32_pmuldq128:
4830 case clang::X86::BI__builtin_ia32_pmuldq256:
4831 case clang::X86::BI__builtin_ia32_pmuldq512:
4833 S, OpPC, Call,
4834 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4835 const APSInt &HiRHS) {
4836 return llvm::APIntOps::mulsExtended(LoLHS, LoRHS);
4837 });
4838
4839 case clang::X86::BI__builtin_ia32_pmuludq128:
4840 case clang::X86::BI__builtin_ia32_pmuludq256:
4841 case clang::X86::BI__builtin_ia32_pmuludq512:
4843 S, OpPC, Call,
4844 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4845 const APSInt &HiRHS) {
4846 return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
4847 });
4848
4849 case clang::X86::BI__builtin_ia32_pclmulqdq128:
4850 case clang::X86::BI__builtin_ia32_pclmulqdq256:
4851 case clang::X86::BI__builtin_ia32_pclmulqdq512:
4852 return interp__builtin_ia32_pclmulqdq(S, OpPC, Call);
4853
4854 case Builtin::BI__builtin_elementwise_fma:
4856 S, OpPC, Call,
4857 [](const APFloat &X, const APFloat &Y, const APFloat &Z,
4858 llvm::RoundingMode RM) {
4859 APFloat F = X;
4860 F.fusedMultiplyAdd(Y, Z, RM);
4861 return F;
4862 });
4863
4864 case X86::BI__builtin_ia32_vpmadd52luq128:
4865 case X86::BI__builtin_ia32_vpmadd52luq256:
4866 case X86::BI__builtin_ia32_vpmadd52luq512:
4868 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4869 return A + (B.trunc(52) * C.trunc(52)).zext(64);
4870 });
4871 case X86::BI__builtin_ia32_vpmadd52huq128:
4872 case X86::BI__builtin_ia32_vpmadd52huq256:
4873 case X86::BI__builtin_ia32_vpmadd52huq512:
4875 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4876 return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64);
4877 });
4878
4879 case X86::BI__builtin_ia32_vpshldd128:
4880 case X86::BI__builtin_ia32_vpshldd256:
4881 case X86::BI__builtin_ia32_vpshldd512:
4882 case X86::BI__builtin_ia32_vpshldq128:
4883 case X86::BI__builtin_ia32_vpshldq256:
4884 case X86::BI__builtin_ia32_vpshldq512:
4885 case X86::BI__builtin_ia32_vpshldw128:
4886 case X86::BI__builtin_ia32_vpshldw256:
4887 case X86::BI__builtin_ia32_vpshldw512:
4889 S, OpPC, Call,
4890 [](const APSInt &Hi, const APSInt &Lo, const APSInt &Amt) {
4891 return llvm::APIntOps::fshl(Hi, Lo, Amt);
4892 });
4893
4894 case X86::BI__builtin_ia32_vpshrdd128:
4895 case X86::BI__builtin_ia32_vpshrdd256:
4896 case X86::BI__builtin_ia32_vpshrdd512:
4897 case X86::BI__builtin_ia32_vpshrdq128:
4898 case X86::BI__builtin_ia32_vpshrdq256:
4899 case X86::BI__builtin_ia32_vpshrdq512:
4900 case X86::BI__builtin_ia32_vpshrdw128:
4901 case X86::BI__builtin_ia32_vpshrdw256:
4902 case X86::BI__builtin_ia32_vpshrdw512:
4903 // NOTE: Reversed Hi/Lo operands.
4905 S, OpPC, Call,
4906 [](const APSInt &Lo, const APSInt &Hi, const APSInt &Amt) {
4907 return llvm::APIntOps::fshr(Hi, Lo, Amt);
4908 });
4909 case X86::BI__builtin_ia32_vpconflictsi_128:
4910 case X86::BI__builtin_ia32_vpconflictsi_256:
4911 case X86::BI__builtin_ia32_vpconflictsi_512:
4912 case X86::BI__builtin_ia32_vpconflictdi_128:
4913 case X86::BI__builtin_ia32_vpconflictdi_256:
4914 case X86::BI__builtin_ia32_vpconflictdi_512:
4915 return interp__builtin_ia32_vpconflict(S, OpPC, Call);
4916 case clang::X86::BI__builtin_ia32_blendpd:
4917 case clang::X86::BI__builtin_ia32_blendpd256:
4918 case clang::X86::BI__builtin_ia32_blendps:
4919 case clang::X86::BI__builtin_ia32_blendps256:
4920 case clang::X86::BI__builtin_ia32_pblendw128:
4921 case clang::X86::BI__builtin_ia32_pblendw256:
4922 case clang::X86::BI__builtin_ia32_pblendd128:
4923 case clang::X86::BI__builtin_ia32_pblendd256:
4925 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4926 // Bit index for mask.
4927 unsigned MaskBit = (ShuffleMask >> (DstIdx % 8)) & 0x1;
4928 unsigned SrcVecIdx = MaskBit ? 1 : 0; // 1 = TrueVec, 0 = FalseVec
4929 return std::pair<unsigned, int>{SrcVecIdx, static_cast<int>(DstIdx)};
4930 });
4931
4932
4933
4934 case clang::X86::BI__builtin_ia32_blendvpd:
4935 case clang::X86::BI__builtin_ia32_blendvpd256:
4936 case clang::X86::BI__builtin_ia32_blendvps:
4937 case clang::X86::BI__builtin_ia32_blendvps256:
4939 S, OpPC, Call,
4940 [](const APFloat &F, const APFloat &T, const APFloat &C,
4941 llvm::RoundingMode) { return C.isNegative() ? T : F; });
4942
4943 case clang::X86::BI__builtin_ia32_pblendvb128:
4944 case clang::X86::BI__builtin_ia32_pblendvb256:
4946 S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
4947 return ((APInt)C).isNegative() ? T : F;
4948 });
4949 case X86::BI__builtin_ia32_ptestz128:
4950 case X86::BI__builtin_ia32_ptestz256:
4951 case X86::BI__builtin_ia32_vtestzps:
4952 case X86::BI__builtin_ia32_vtestzps256:
4953 case X86::BI__builtin_ia32_vtestzpd:
4954 case X86::BI__builtin_ia32_vtestzpd256:
4956 S, OpPC, Call,
4957 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
4958 case X86::BI__builtin_ia32_ptestc128:
4959 case X86::BI__builtin_ia32_ptestc256:
4960 case X86::BI__builtin_ia32_vtestcps:
4961 case X86::BI__builtin_ia32_vtestcps256:
4962 case X86::BI__builtin_ia32_vtestcpd:
4963 case X86::BI__builtin_ia32_vtestcpd256:
4965 S, OpPC, Call,
4966 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
4967 case X86::BI__builtin_ia32_ptestnzc128:
4968 case X86::BI__builtin_ia32_ptestnzc256:
4969 case X86::BI__builtin_ia32_vtestnzcps:
4970 case X86::BI__builtin_ia32_vtestnzcps256:
4971 case X86::BI__builtin_ia32_vtestnzcpd:
4972 case X86::BI__builtin_ia32_vtestnzcpd256:
4974 S, OpPC, Call, [](const APInt &A, const APInt &B) {
4975 return ((A & B) != 0) && ((~A & B) != 0);
4976 });
4977 case X86::BI__builtin_ia32_selectb_128:
4978 case X86::BI__builtin_ia32_selectb_256:
4979 case X86::BI__builtin_ia32_selectb_512:
4980 case X86::BI__builtin_ia32_selectw_128:
4981 case X86::BI__builtin_ia32_selectw_256:
4982 case X86::BI__builtin_ia32_selectw_512:
4983 case X86::BI__builtin_ia32_selectd_128:
4984 case X86::BI__builtin_ia32_selectd_256:
4985 case X86::BI__builtin_ia32_selectd_512:
4986 case X86::BI__builtin_ia32_selectq_128:
4987 case X86::BI__builtin_ia32_selectq_256:
4988 case X86::BI__builtin_ia32_selectq_512:
4989 case X86::BI__builtin_ia32_selectph_128:
4990 case X86::BI__builtin_ia32_selectph_256:
4991 case X86::BI__builtin_ia32_selectph_512:
4992 case X86::BI__builtin_ia32_selectpbf_128:
4993 case X86::BI__builtin_ia32_selectpbf_256:
4994 case X86::BI__builtin_ia32_selectpbf_512:
4995 case X86::BI__builtin_ia32_selectps_128:
4996 case X86::BI__builtin_ia32_selectps_256:
4997 case X86::BI__builtin_ia32_selectps_512:
4998 case X86::BI__builtin_ia32_selectpd_128:
4999 case X86::BI__builtin_ia32_selectpd_256:
5000 case X86::BI__builtin_ia32_selectpd_512:
5001 return interp__builtin_select(S, OpPC, Call);
5002
5003 case X86::BI__builtin_ia32_shufps:
5004 case X86::BI__builtin_ia32_shufps256:
5005 case X86::BI__builtin_ia32_shufps512:
5007 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5008 unsigned NumElemPerLane = 4;
5009 unsigned NumSelectableElems = NumElemPerLane / 2;
5010 unsigned BitsPerElem = 2;
5011 unsigned IndexMask = 0x3;
5012 unsigned MaskBits = 8;
5013 unsigned Lane = DstIdx / NumElemPerLane;
5014 unsigned ElemInLane = DstIdx % NumElemPerLane;
5015 unsigned LaneOffset = Lane * NumElemPerLane;
5016 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
5017 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
5018 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
5019 return std::pair<unsigned, int>{SrcIdx,
5020 static_cast<int>(LaneOffset + Index)};
5021 });
5022 case X86::BI__builtin_ia32_shufpd:
5023 case X86::BI__builtin_ia32_shufpd256:
5024 case X86::BI__builtin_ia32_shufpd512:
5026 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5027 unsigned NumElemPerLane = 2;
5028 unsigned NumSelectableElems = NumElemPerLane / 2;
5029 unsigned BitsPerElem = 1;
5030 unsigned IndexMask = 0x1;
5031 unsigned MaskBits = 8;
5032 unsigned Lane = DstIdx / NumElemPerLane;
5033 unsigned ElemInLane = DstIdx % NumElemPerLane;
5034 unsigned LaneOffset = Lane * NumElemPerLane;
5035 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
5036 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
5037 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
5038 return std::pair<unsigned, int>{SrcIdx,
5039 static_cast<int>(LaneOffset + Index)};
5040 });
5041
5042 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
5043 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
5044 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi:
5045 return interp_builtin_ia32_gfni_affine(S, OpPC, Call, true);
5046 case X86::BI__builtin_ia32_vgf2p8affineqb_v16qi:
5047 case X86::BI__builtin_ia32_vgf2p8affineqb_v32qi:
5048 case X86::BI__builtin_ia32_vgf2p8affineqb_v64qi:
5049 return interp_builtin_ia32_gfni_affine(S, OpPC, Call, false);
5050
5051 case X86::BI__builtin_ia32_vgf2p8mulb_v16qi:
5052 case X86::BI__builtin_ia32_vgf2p8mulb_v32qi:
5053 case X86::BI__builtin_ia32_vgf2p8mulb_v64qi:
5054 return interp__builtin_ia32_gfni_mul(S, OpPC, Call);
5055
5056 case X86::BI__builtin_ia32_insertps128:
5058 S, OpPC, Call, [](unsigned DstIdx, unsigned Mask) {
5059 // Bits [3:0]: zero mask - if bit is set, zero this element
5060 if ((Mask & (1 << DstIdx)) != 0) {
5061 return std::pair<unsigned, int>{0, -1};
5062 }
5063 // Bits [7:6]: select element from source vector Y (0-3)
5064 // Bits [5:4]: select destination position (0-3)
5065 unsigned SrcElem = (Mask >> 6) & 0x3;
5066 unsigned DstElem = (Mask >> 4) & 0x3;
5067 if (DstIdx == DstElem) {
5068 // Insert element from source vector (B) at this position
5069 return std::pair<unsigned, int>{1, static_cast<int>(SrcElem)};
5070 } else {
5071 // Copy from destination vector (A)
5072 return std::pair<unsigned, int>{0, static_cast<int>(DstIdx)};
5073 }
5074 });
5075 case X86::BI__builtin_ia32_permvarsi256:
5076 case X86::BI__builtin_ia32_permvarsf256:
5077 case X86::BI__builtin_ia32_permvardf512:
5078 case X86::BI__builtin_ia32_permvardi512:
5079 case X86::BI__builtin_ia32_permvarhi128:
5081 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5082 int Offset = ShuffleMask & 0x7;
5083 return std::pair<unsigned, int>{0, Offset};
5084 });
5085 case X86::BI__builtin_ia32_permvarqi128:
5086 case X86::BI__builtin_ia32_permvarhi256:
5087 case X86::BI__builtin_ia32_permvarsi512:
5088 case X86::BI__builtin_ia32_permvarsf512:
5090 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5091 int Offset = ShuffleMask & 0xF;
5092 return std::pair<unsigned, int>{0, Offset};
5093 });
5094 case X86::BI__builtin_ia32_permvardi256:
5095 case X86::BI__builtin_ia32_permvardf256:
5097 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5098 int Offset = ShuffleMask & 0x3;
5099 return std::pair<unsigned, int>{0, Offset};
5100 });
5101 case X86::BI__builtin_ia32_permvarqi256:
5102 case X86::BI__builtin_ia32_permvarhi512:
5104 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5105 int Offset = ShuffleMask & 0x1F;
5106 return std::pair<unsigned, int>{0, Offset};
5107 });
5108 case X86::BI__builtin_ia32_permvarqi512:
5110 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5111 int Offset = ShuffleMask & 0x3F;
5112 return std::pair<unsigned, int>{0, Offset};
5113 });
5114 case X86::BI__builtin_ia32_vpermi2varq128:
5115 case X86::BI__builtin_ia32_vpermi2varpd128:
5117 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5118 int Offset = ShuffleMask & 0x1;
5119 unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
5120 return std::pair<unsigned, int>{SrcIdx, Offset};
5121 });
5122 case X86::BI__builtin_ia32_vpermi2vard128:
5123 case X86::BI__builtin_ia32_vpermi2varps128:
5124 case X86::BI__builtin_ia32_vpermi2varq256:
5125 case X86::BI__builtin_ia32_vpermi2varpd256:
5127 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5128 int Offset = ShuffleMask & 0x3;
5129 unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
5130 return std::pair<unsigned, int>{SrcIdx, Offset};
5131 });
5132 case X86::BI__builtin_ia32_vpermi2varhi128:
5133 case X86::BI__builtin_ia32_vpermi2vard256:
5134 case X86::BI__builtin_ia32_vpermi2varps256:
5135 case X86::BI__builtin_ia32_vpermi2varq512:
5136 case X86::BI__builtin_ia32_vpermi2varpd512:
5138 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5139 int Offset = ShuffleMask & 0x7;
5140 unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
5141 return std::pair<unsigned, int>{SrcIdx, Offset};
5142 });
5143 case X86::BI__builtin_ia32_vpermi2varqi128:
5144 case X86::BI__builtin_ia32_vpermi2varhi256:
5145 case X86::BI__builtin_ia32_vpermi2vard512:
5146 case X86::BI__builtin_ia32_vpermi2varps512:
5148 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5149 int Offset = ShuffleMask & 0xF;
5150 unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
5151 return std::pair<unsigned, int>{SrcIdx, Offset};
5152 });
5153 case X86::BI__builtin_ia32_vpermi2varqi256:
5154 case X86::BI__builtin_ia32_vpermi2varhi512:
5156 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5157 int Offset = ShuffleMask & 0x1F;
5158 unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
5159 return std::pair<unsigned, int>{SrcIdx, Offset};
5160 });
5161 case X86::BI__builtin_ia32_vpermi2varqi512:
5163 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5164 int Offset = ShuffleMask & 0x3F;
5165 unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
5166 return std::pair<unsigned, int>{SrcIdx, Offset};
5167 });
5168 case X86::BI__builtin_ia32_pshufb128:
5169 case X86::BI__builtin_ia32_pshufb256:
5170 case X86::BI__builtin_ia32_pshufb512:
5172 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5173 uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
5174 if (Ctlb & 0x80)
5175 return std::make_pair(0, -1);
5176
5177 unsigned LaneBase = (DstIdx / 16) * 16;
5178 unsigned SrcOffset = Ctlb & 0x0F;
5179 unsigned SrcIdx = LaneBase + SrcOffset;
5180 return std::make_pair(0, static_cast<int>(SrcIdx));
5181 });
5182
5183 case X86::BI__builtin_ia32_pshuflw:
5184 case X86::BI__builtin_ia32_pshuflw256:
5185 case X86::BI__builtin_ia32_pshuflw512:
5187 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5188 unsigned LaneBase = (DstIdx / 8) * 8;
5189 unsigned LaneIdx = DstIdx % 8;
5190 if (LaneIdx < 4) {
5191 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
5192 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
5193 }
5194
5195 return std::make_pair(0, static_cast<int>(DstIdx));
5196 });
5197
5198 case X86::BI__builtin_ia32_pshufhw:
5199 case X86::BI__builtin_ia32_pshufhw256:
5200 case X86::BI__builtin_ia32_pshufhw512:
5202 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5203 unsigned LaneBase = (DstIdx / 8) * 8;
5204 unsigned LaneIdx = DstIdx % 8;
5205 if (LaneIdx >= 4) {
5206 unsigned Sel = (ShuffleMask >> (2 * (LaneIdx - 4))) & 0x3;
5207 return std::make_pair(0, static_cast<int>(LaneBase + 4 + Sel));
5208 }
5209
5210 return std::make_pair(0, static_cast<int>(DstIdx));
5211 });
5212
5213 case X86::BI__builtin_ia32_pshufd:
5214 case X86::BI__builtin_ia32_pshufd256:
5215 case X86::BI__builtin_ia32_pshufd512:
5216 case X86::BI__builtin_ia32_vpermilps:
5217 case X86::BI__builtin_ia32_vpermilps256:
5218 case X86::BI__builtin_ia32_vpermilps512:
5220 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5221 unsigned LaneBase = (DstIdx / 4) * 4;
5222 unsigned LaneIdx = DstIdx % 4;
5223 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
5224 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
5225 });
5226
5227 case X86::BI__builtin_ia32_vpermilvarpd:
5228 case X86::BI__builtin_ia32_vpermilvarpd256:
5229 case X86::BI__builtin_ia32_vpermilvarpd512:
5231 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5232 unsigned NumElemPerLane = 2;
5233 unsigned Lane = DstIdx / NumElemPerLane;
5234 unsigned Offset = ShuffleMask & 0b10 ? 1 : 0;
5235 return std::make_pair(
5236 0, static_cast<int>(Lane * NumElemPerLane + Offset));
5237 });
5238
5239 case X86::BI__builtin_ia32_vpermilvarps:
5240 case X86::BI__builtin_ia32_vpermilvarps256:
5241 case X86::BI__builtin_ia32_vpermilvarps512:
5243 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5244 unsigned NumElemPerLane = 4;
5245 unsigned Lane = DstIdx / NumElemPerLane;
5246 unsigned Offset = ShuffleMask & 0b11;
5247 return std::make_pair(
5248 0, static_cast<int>(Lane * NumElemPerLane + Offset));
5249 });
5250
5251 case X86::BI__builtin_ia32_vpermilpd:
5252 case X86::BI__builtin_ia32_vpermilpd256:
5253 case X86::BI__builtin_ia32_vpermilpd512:
5255 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
5256 unsigned NumElemPerLane = 2;
5257 unsigned BitsPerElem = 1;
5258 unsigned MaskBits = 8;
5259 unsigned IndexMask = 0x1;
5260 unsigned Lane = DstIdx / NumElemPerLane;
5261 unsigned LaneOffset = Lane * NumElemPerLane;
5262 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
5263 unsigned Index = (Control >> BitIndex) & IndexMask;
5264 return std::make_pair(0, static_cast<int>(LaneOffset + Index));
5265 });
5266
5267 case X86::BI__builtin_ia32_permdf256:
5268 case X86::BI__builtin_ia32_permdi256:
5270 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
5271 // permute4x64 operates on 4 64-bit elements
5272 // For element i (0-3), extract bits [2*i+1:2*i] from Control
5273 unsigned Index = (Control >> (2 * DstIdx)) & 0x3;
5274 return std::make_pair(0, static_cast<int>(Index));
5275 });
5276
5277 case X86::BI__builtin_ia32_vpmultishiftqb128:
5278 case X86::BI__builtin_ia32_vpmultishiftqb256:
5279 case X86::BI__builtin_ia32_vpmultishiftqb512:
5280 return interp__builtin_ia32_multishiftqb(S, OpPC, Call);
5281 case X86::BI__builtin_ia32_kandqi:
5282 case X86::BI__builtin_ia32_kandhi:
5283 case X86::BI__builtin_ia32_kandsi:
5284 case X86::BI__builtin_ia32_kanddi:
5286 S, OpPC, Call,
5287 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
5288
5289 case X86::BI__builtin_ia32_kandnqi:
5290 case X86::BI__builtin_ia32_kandnhi:
5291 case X86::BI__builtin_ia32_kandnsi:
5292 case X86::BI__builtin_ia32_kandndi:
5294 S, OpPC, Call,
5295 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
5296
5297 case X86::BI__builtin_ia32_korqi:
5298 case X86::BI__builtin_ia32_korhi:
5299 case X86::BI__builtin_ia32_korsi:
5300 case X86::BI__builtin_ia32_kordi:
5302 S, OpPC, Call,
5303 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
5304
5305 case X86::BI__builtin_ia32_kxnorqi:
5306 case X86::BI__builtin_ia32_kxnorhi:
5307 case X86::BI__builtin_ia32_kxnorsi:
5308 case X86::BI__builtin_ia32_kxnordi:
5310 S, OpPC, Call,
5311 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
5312
5313 case X86::BI__builtin_ia32_kxorqi:
5314 case X86::BI__builtin_ia32_kxorhi:
5315 case X86::BI__builtin_ia32_kxorsi:
5316 case X86::BI__builtin_ia32_kxordi:
5318 S, OpPC, Call,
5319 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
5320
5321 case X86::BI__builtin_ia32_knotqi:
5322 case X86::BI__builtin_ia32_knothi:
5323 case X86::BI__builtin_ia32_knotsi:
5324 case X86::BI__builtin_ia32_knotdi:
5326 S, OpPC, Call, [](const APSInt &Src) { return ~Src; });
5327
5328 case X86::BI__builtin_ia32_kaddqi:
5329 case X86::BI__builtin_ia32_kaddhi:
5330 case X86::BI__builtin_ia32_kaddsi:
5331 case X86::BI__builtin_ia32_kadddi:
5333 S, OpPC, Call,
5334 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
5335
5336 case X86::BI__builtin_ia32_kmovb:
5337 case X86::BI__builtin_ia32_kmovw:
5338 case X86::BI__builtin_ia32_kmovd:
5339 case X86::BI__builtin_ia32_kmovq:
5341 S, OpPC, Call, [](const APSInt &Src) { return Src; });
5342
5343 case X86::BI__builtin_ia32_kunpckhi:
5344 case X86::BI__builtin_ia32_kunpckdi:
5345 case X86::BI__builtin_ia32_kunpcksi:
5347 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
5348 // Generic kunpack: extract lower half of each operand and concatenate
5349 // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
5350 unsigned BW = A.getBitWidth();
5351 return APSInt(A.trunc(BW / 2).concat(B.trunc(BW / 2)),
5352 A.isUnsigned());
5353 });
5354
5355 case X86::BI__builtin_ia32_phminposuw128:
5356 return interp__builtin_ia32_phminposuw(S, OpPC, Call);
5357
5358 case X86::BI__builtin_ia32_psraq128:
5359 case X86::BI__builtin_ia32_psraq256:
5360 case X86::BI__builtin_ia32_psraq512:
5361 case X86::BI__builtin_ia32_psrad128:
5362 case X86::BI__builtin_ia32_psrad256:
5363 case X86::BI__builtin_ia32_psrad512:
5364 case X86::BI__builtin_ia32_psraw128:
5365 case X86::BI__builtin_ia32_psraw256:
5366 case X86::BI__builtin_ia32_psraw512:
5368 S, OpPC, Call,
5369 [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); },
5370 [](const APInt &Elt, unsigned Width) { return Elt.ashr(Width - 1); });
5371
5372 case X86::BI__builtin_ia32_psllq128:
5373 case X86::BI__builtin_ia32_psllq256:
5374 case X86::BI__builtin_ia32_psllq512:
5375 case X86::BI__builtin_ia32_pslld128:
5376 case X86::BI__builtin_ia32_pslld256:
5377 case X86::BI__builtin_ia32_pslld512:
5378 case X86::BI__builtin_ia32_psllw128:
5379 case X86::BI__builtin_ia32_psllw256:
5380 case X86::BI__builtin_ia32_psllw512:
5382 S, OpPC, Call,
5383 [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); },
5384 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5385
5386 case X86::BI__builtin_ia32_psrlq128:
5387 case X86::BI__builtin_ia32_psrlq256:
5388 case X86::BI__builtin_ia32_psrlq512:
5389 case X86::BI__builtin_ia32_psrld128:
5390 case X86::BI__builtin_ia32_psrld256:
5391 case X86::BI__builtin_ia32_psrld512:
5392 case X86::BI__builtin_ia32_psrlw128:
5393 case X86::BI__builtin_ia32_psrlw256:
5394 case X86::BI__builtin_ia32_psrlw512:
5396 S, OpPC, Call,
5397 [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); },
5398 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5399
5400 case X86::BI__builtin_ia32_pternlogd128_mask:
5401 case X86::BI__builtin_ia32_pternlogd256_mask:
5402 case X86::BI__builtin_ia32_pternlogd512_mask:
5403 case X86::BI__builtin_ia32_pternlogq128_mask:
5404 case X86::BI__builtin_ia32_pternlogq256_mask:
5405 case X86::BI__builtin_ia32_pternlogq512_mask:
5406 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/false);
5407 case X86::BI__builtin_ia32_pternlogd128_maskz:
5408 case X86::BI__builtin_ia32_pternlogd256_maskz:
5409 case X86::BI__builtin_ia32_pternlogd512_maskz:
5410 case X86::BI__builtin_ia32_pternlogq128_maskz:
5411 case X86::BI__builtin_ia32_pternlogq256_maskz:
5412 case X86::BI__builtin_ia32_pternlogq512_maskz:
5413 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/true);
5414 case Builtin::BI__builtin_elementwise_fshl:
5416 llvm::APIntOps::fshl);
5417 case Builtin::BI__builtin_elementwise_fshr:
5419 llvm::APIntOps::fshr);
5420
5421 case X86::BI__builtin_ia32_shuf_f32x4_256:
5422 case X86::BI__builtin_ia32_shuf_i32x4_256:
5423 case X86::BI__builtin_ia32_shuf_f64x2_256:
5424 case X86::BI__builtin_ia32_shuf_i64x2_256:
5425 case X86::BI__builtin_ia32_shuf_f32x4:
5426 case X86::BI__builtin_ia32_shuf_i32x4:
5427 case X86::BI__builtin_ia32_shuf_f64x2:
5428 case X86::BI__builtin_ia32_shuf_i64x2: {
5429 // Destination and sources A, B all have the same type.
5430 QualType VecQT = Call->getArg(0)->getType();
5431 const auto *VecT = VecQT->castAs<VectorType>();
5432 unsigned NumElems = VecT->getNumElements();
5433 unsigned ElemBits = S.getASTContext().getTypeSize(VecT->getElementType());
5434 unsigned LaneBits = 128u;
5435 unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
5436 unsigned NumElemsPerLane = LaneBits / ElemBits;
5437
5439 S, OpPC, Call,
5440 [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) {
5441 // DstIdx determines source. ShuffleMask selects lane in source.
5442 unsigned BitsPerElem = NumLanes / 2;
5443 unsigned IndexMask = (1u << BitsPerElem) - 1;
5444 unsigned Lane = DstIdx / NumElemsPerLane;
5445 unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
5446 unsigned BitIdx = BitsPerElem * Lane;
5447 unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
5448 unsigned ElemInLane = DstIdx % NumElemsPerLane;
5449 unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
5450 return std::pair<unsigned, int>{SrcIdx, IdxToPick};
5451 });
5452 }
5453
5454 case X86::BI__builtin_ia32_insertf32x4_256:
5455 case X86::BI__builtin_ia32_inserti32x4_256:
5456 case X86::BI__builtin_ia32_insertf64x2_256:
5457 case X86::BI__builtin_ia32_inserti64x2_256:
5458 case X86::BI__builtin_ia32_insertf32x4:
5459 case X86::BI__builtin_ia32_inserti32x4:
5460 case X86::BI__builtin_ia32_insertf64x2_512:
5461 case X86::BI__builtin_ia32_inserti64x2_512:
5462 case X86::BI__builtin_ia32_insertf32x8:
5463 case X86::BI__builtin_ia32_inserti32x8:
5464 case X86::BI__builtin_ia32_insertf64x4:
5465 case X86::BI__builtin_ia32_inserti64x4:
5466 case X86::BI__builtin_ia32_vinsertf128_ps256:
5467 case X86::BI__builtin_ia32_vinsertf128_pd256:
5468 case X86::BI__builtin_ia32_vinsertf128_si256:
5469 case X86::BI__builtin_ia32_insert128i256:
5470 return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
5471
5472 case clang::X86::BI__builtin_ia32_vcvtps2ph:
5473 case clang::X86::BI__builtin_ia32_vcvtps2ph256:
5474 return interp__builtin_ia32_vcvtps2ph(S, OpPC, Call);
5475
5476 case X86::BI__builtin_ia32_vec_ext_v4hi:
5477 case X86::BI__builtin_ia32_vec_ext_v16qi:
5478 case X86::BI__builtin_ia32_vec_ext_v8hi:
5479 case X86::BI__builtin_ia32_vec_ext_v4si:
5480 case X86::BI__builtin_ia32_vec_ext_v2di:
5481 case X86::BI__builtin_ia32_vec_ext_v32qi:
5482 case X86::BI__builtin_ia32_vec_ext_v16hi:
5483 case X86::BI__builtin_ia32_vec_ext_v8si:
5484 case X86::BI__builtin_ia32_vec_ext_v4di:
5485 case X86::BI__builtin_ia32_vec_ext_v4sf:
5486 return interp__builtin_vec_ext(S, OpPC, Call, BuiltinID);
5487
5488 case X86::BI__builtin_ia32_vec_set_v4hi:
5489 case X86::BI__builtin_ia32_vec_set_v16qi:
5490 case X86::BI__builtin_ia32_vec_set_v8hi:
5491 case X86::BI__builtin_ia32_vec_set_v4si:
5492 case X86::BI__builtin_ia32_vec_set_v2di:
5493 case X86::BI__builtin_ia32_vec_set_v32qi:
5494 case X86::BI__builtin_ia32_vec_set_v16hi:
5495 case X86::BI__builtin_ia32_vec_set_v8si:
5496 case X86::BI__builtin_ia32_vec_set_v4di:
5497 return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
5498
5499 case X86::BI__builtin_ia32_cvtb2mask128:
5500 case X86::BI__builtin_ia32_cvtb2mask256:
5501 case X86::BI__builtin_ia32_cvtb2mask512:
5502 case X86::BI__builtin_ia32_cvtw2mask128:
5503 case X86::BI__builtin_ia32_cvtw2mask256:
5504 case X86::BI__builtin_ia32_cvtw2mask512:
5505 case X86::BI__builtin_ia32_cvtd2mask128:
5506 case X86::BI__builtin_ia32_cvtd2mask256:
5507 case X86::BI__builtin_ia32_cvtd2mask512:
5508 case X86::BI__builtin_ia32_cvtq2mask128:
5509 case X86::BI__builtin_ia32_cvtq2mask256:
5510 case X86::BI__builtin_ia32_cvtq2mask512:
5511 return interp__builtin_ia32_cvt_vec2mask(S, OpPC, Call, BuiltinID);
5512
5513 case X86::BI__builtin_ia32_cvtsd2ss:
5514 return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, false);
5515
5516 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
5517 return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, true);
5518
5519 case X86::BI__builtin_ia32_cvtpd2ps:
5520 case X86::BI__builtin_ia32_cvtpd2ps256:
5521 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, false, false);
5522 case X86::BI__builtin_ia32_cvtpd2ps_mask:
5523 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, false);
5524 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
5525 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, true);
5526
5527 case X86::BI__builtin_ia32_cmpb128_mask:
5528 case X86::BI__builtin_ia32_cmpw128_mask:
5529 case X86::BI__builtin_ia32_cmpd128_mask:
5530 case X86::BI__builtin_ia32_cmpq128_mask:
5531 case X86::BI__builtin_ia32_cmpb256_mask:
5532 case X86::BI__builtin_ia32_cmpw256_mask:
5533 case X86::BI__builtin_ia32_cmpd256_mask:
5534 case X86::BI__builtin_ia32_cmpq256_mask:
5535 case X86::BI__builtin_ia32_cmpb512_mask:
5536 case X86::BI__builtin_ia32_cmpw512_mask:
5537 case X86::BI__builtin_ia32_cmpd512_mask:
5538 case X86::BI__builtin_ia32_cmpq512_mask:
5539 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5540 /*IsUnsigned=*/false);
5541
5542 case X86::BI__builtin_ia32_ucmpb128_mask:
5543 case X86::BI__builtin_ia32_ucmpw128_mask:
5544 case X86::BI__builtin_ia32_ucmpd128_mask:
5545 case X86::BI__builtin_ia32_ucmpq128_mask:
5546 case X86::BI__builtin_ia32_ucmpb256_mask:
5547 case X86::BI__builtin_ia32_ucmpw256_mask:
5548 case X86::BI__builtin_ia32_ucmpd256_mask:
5549 case X86::BI__builtin_ia32_ucmpq256_mask:
5550 case X86::BI__builtin_ia32_ucmpb512_mask:
5551 case X86::BI__builtin_ia32_ucmpw512_mask:
5552 case X86::BI__builtin_ia32_ucmpd512_mask:
5553 case X86::BI__builtin_ia32_ucmpq512_mask:
5554 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5555 /*IsUnsigned=*/true);
5556
5557 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
5558 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
5559 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
5561
5562 case X86::BI__builtin_ia32_pslldqi128_byteshift:
5563 case X86::BI__builtin_ia32_pslldqi256_byteshift:
5564 case X86::BI__builtin_ia32_pslldqi512_byteshift:
5565 // These SLLDQ intrinsics always operate on byte elements (8 bits).
5566 // The lane width is hardcoded to 16 to match the SIMD register size,
5567 // but the algorithm processes one byte per iteration,
5568 // so APInt(8, ...) is correct and intentional.
5570 S, OpPC, Call,
5571 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5572 unsigned LaneBase = (DstIdx / 16) * 16;
5573 unsigned LaneIdx = DstIdx % 16;
5574 if (LaneIdx < Shift)
5575 return std::make_pair(0, -1);
5576
5577 return std::make_pair(0,
5578 static_cast<int>(LaneBase + LaneIdx - Shift));
5579 });
5580
5581 case X86::BI__builtin_ia32_psrldqi128_byteshift:
5582 case X86::BI__builtin_ia32_psrldqi256_byteshift:
5583 case X86::BI__builtin_ia32_psrldqi512_byteshift:
5584 // These SRLDQ intrinsics always operate on byte elements (8 bits).
5585 // The lane width is hardcoded to 16 to match the SIMD register size,
5586 // but the algorithm processes one byte per iteration,
5587 // so APInt(8, ...) is correct and intentional.
5589 S, OpPC, Call,
5590 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5591 unsigned LaneBase = (DstIdx / 16) * 16;
5592 unsigned LaneIdx = DstIdx % 16;
5593 if (LaneIdx + Shift < 16)
5594 return std::make_pair(0,
5595 static_cast<int>(LaneBase + LaneIdx + Shift));
5596
5597 return std::make_pair(0, -1);
5598 });
5599
5600 case X86::BI__builtin_ia32_palignr128:
5601 case X86::BI__builtin_ia32_palignr256:
5602 case X86::BI__builtin_ia32_palignr512:
5604 S, OpPC, Call, [](unsigned DstIdx, unsigned Shift) {
5605 // Default to -1 → zero-fill this destination element
5606 unsigned VecIdx = 1;
5607 int ElemIdx = -1;
5608
5609 int Lane = DstIdx / 16;
5610 int Offset = DstIdx % 16;
5611
5612 // Elements come from VecB first, then VecA after the shift boundary
5613 unsigned ShiftedIdx = Offset + (Shift & 0xFF);
5614 if (ShiftedIdx < 16) { // from VecB
5615 ElemIdx = ShiftedIdx + (Lane * 16);
5616 } else if (ShiftedIdx < 32) { // from VecA
5617 VecIdx = 0;
5618 ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
5619 }
5620
5621 return std::pair<unsigned, int>{VecIdx, ElemIdx};
5622 });
5623
5624 case X86::BI__builtin_ia32_alignd128:
5625 case X86::BI__builtin_ia32_alignd256:
5626 case X86::BI__builtin_ia32_alignd512:
5627 case X86::BI__builtin_ia32_alignq128:
5628 case X86::BI__builtin_ia32_alignq256:
5629 case X86::BI__builtin_ia32_alignq512: {
5630 unsigned NumElems = Call->getType()->castAs<VectorType>()->getNumElements();
5632 S, OpPC, Call, [NumElems](unsigned DstIdx, unsigned Shift) {
5633 unsigned Imm = Shift & 0xFF;
5634 unsigned EffectiveShift = Imm & (NumElems - 1);
5635 unsigned SourcePos = DstIdx + EffectiveShift;
5636 unsigned VecIdx = SourcePos < NumElems ? 1u : 0u;
5637 unsigned ElemIdx = SourcePos & (NumElems - 1);
5638 return std::pair<unsigned, int>{VecIdx, static_cast<int>(ElemIdx)};
5639 });
5640 }
5641
5642 default:
5643 S.FFDiag(S.Current->getLocation(OpPC),
5644 diag::note_invalid_subexpr_in_const_expr)
5645 << S.Current->getRange(OpPC);
5646
5647 return false;
5648 }
5649
5650 llvm_unreachable("Unhandled builtin ID");
5651}
5652
5654 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
5656 unsigned N = E->getNumComponents();
5657 assert(N > 0);
5658
5659 unsigned ArrayIndex = 0;
5660 QualType CurrentType = E->getTypeSourceInfo()->getType();
5661 for (unsigned I = 0; I != N; ++I) {
5662 const OffsetOfNode &Node = E->getComponent(I);
5663 switch (Node.getKind()) {
5664 case OffsetOfNode::Field: {
5665 const FieldDecl *MemberDecl = Node.getField();
5666 const auto *RD = CurrentType->getAsRecordDecl();
5667 if (!RD || RD->isInvalidDecl())
5668 return false;
5670 unsigned FieldIndex = MemberDecl->getFieldIndex();
5671 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
5672 Result +=
5674 CurrentType = MemberDecl->getType().getNonReferenceType();
5675 break;
5676 }
5677 case OffsetOfNode::Array: {
5678 // When generating bytecode, we put all the index expressions as Sint64 on
5679 // the stack.
5680 int64_t Index = ArrayIndices[ArrayIndex];
5681 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
5682 if (!AT)
5683 return false;
5684 CurrentType = AT->getElementType();
5685 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
5686 Result += Index * ElementSize;
5687 ++ArrayIndex;
5688 break;
5689 }
5690 case OffsetOfNode::Base: {
5691 const CXXBaseSpecifier *BaseSpec = Node.getBase();
5692 if (BaseSpec->isVirtual())
5693 return false;
5694
5695 // Find the layout of the class whose base we are looking into.
5696 const auto *RD = CurrentType->getAsCXXRecordDecl();
5697 if (!RD || RD->isInvalidDecl())
5698 return false;
5700
5701 // Find the base class itself.
5702 CurrentType = BaseSpec->getType();
5703 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
5704 if (!BaseRD)
5705 return false;
5706
5707 // Add the offset to the base.
5708 Result += RL.getBaseClassOffset(BaseRD);
5709 break;
5710 }
5712 llvm_unreachable("Dependent OffsetOfExpr?");
5713 }
5714 }
5715
5716 IntResult = Result.getQuantity();
5717
5718 return true;
5719}
5720
5722 const Pointer &Ptr, const APSInt &IntValue) {
5723
5724 const Record *R = Ptr.getRecord();
5725 assert(R);
5726 assert(R->getNumFields() == 1);
5727
5728 unsigned FieldOffset = R->getField(0u)->Offset;
5729 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
5730 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
5731
5732 INT_TYPE_SWITCH(FieldT,
5733 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
5734 FieldPtr.initialize();
5735 return true;
5736}
5737
5738static void zeroAll(Pointer &Dest) {
5739 const Descriptor *Desc = Dest.getFieldDesc();
5740
5741 if (Desc->isPrimitive()) {
5742 TYPE_SWITCH(Desc->getPrimType(), {
5743 Dest.deref<T>().~T();
5744 new (&Dest.deref<T>()) T();
5745 });
5746 return;
5747 }
5748
5749 if (Desc->isRecord()) {
5750 const Record *R = Desc->ElemRecord;
5751 for (const Record::Field &F : R->fields()) {
5752 Pointer FieldPtr = Dest.atField(F.Offset);
5753 zeroAll(FieldPtr);
5754 }
5755 return;
5756 }
5757
5758 if (Desc->isPrimitiveArray()) {
5759 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5760 TYPE_SWITCH(Desc->getPrimType(), {
5761 Dest.deref<T>().~T();
5762 new (&Dest.deref<T>()) T();
5763 });
5764 }
5765 return;
5766 }
5767
5768 if (Desc->isCompositeArray()) {
5769 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5770 Pointer ElemPtr = Dest.atIndex(I).narrow();
5771 zeroAll(ElemPtr);
5772 }
5773 return;
5774 }
5775}
5776
5777static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5778 Pointer &Dest, bool Activate);
5779static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
5780 Pointer &Dest, bool Activate = false) {
5781 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5782 const Descriptor *DestDesc = Dest.getFieldDesc();
5783
5784 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
5785 Pointer DestField = Dest.atField(F.Offset);
5786 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
5787 TYPE_SWITCH(*FT, {
5788 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
5789 if (Src.atField(F.Offset).isInitialized())
5790 DestField.initialize();
5791 if (Activate)
5792 DestField.activate();
5793 });
5794 return true;
5795 }
5796 // Composite field.
5797 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
5798 };
5799
5800 assert(SrcDesc->isRecord());
5801 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
5802 const Record *R = DestDesc->ElemRecord;
5803 for (const Record::Field &F : R->fields()) {
5804 if (R->isUnion()) {
5805 // For unions, only copy the active field. Zero all others.
5806 const Pointer &SrcField = Src.atField(F.Offset);
5807 if (SrcField.isActive()) {
5808 if (!copyField(F, /*Activate=*/true))
5809 return false;
5810 } else {
5811 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
5812 return false;
5813 Pointer DestField = Dest.atField(F.Offset);
5814 zeroAll(DestField);
5815 }
5816 } else {
5817 if (!copyField(F, Activate))
5818 return false;
5819 }
5820 }
5821
5822 for (const Record::Base &B : R->bases()) {
5823 Pointer DestBase = Dest.atField(B.Offset);
5824 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
5825 return false;
5826 }
5827
5828 Dest.initialize();
5829 return true;
5830}
5831
5832static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5833 Pointer &Dest, bool Activate = false) {
5834 assert(Src.isLive() && Dest.isLive());
5835
5836 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5837 const Descriptor *DestDesc = Dest.getFieldDesc();
5838
5839 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
5840
5841 if (DestDesc->isPrimitiveArray()) {
5842 assert(SrcDesc->isPrimitiveArray());
5843 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5844 PrimType ET = DestDesc->getPrimType();
5845 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5846 Pointer DestElem = Dest.atIndex(I);
5847 TYPE_SWITCH(ET, {
5848 DestElem.deref<T>() = Src.elem<T>(I);
5849 DestElem.initialize();
5850 });
5851 }
5852 return true;
5853 }
5854
5855 if (DestDesc->isCompositeArray()) {
5856 assert(SrcDesc->isCompositeArray());
5857 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5858 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5859 const Pointer &SrcElem = Src.atIndex(I).narrow();
5860 Pointer DestElem = Dest.atIndex(I).narrow();
5861 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
5862 return false;
5863 }
5864 return true;
5865 }
5866
5867 if (DestDesc->isRecord())
5868 return copyRecord(S, OpPC, Src, Dest, Activate);
5869 return Invalid(S, OpPC);
5870}
5871
5872bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
5873 return copyComposite(S, OpPC, Src, Dest);
5874}
5875
5876} // namespace interp
5877} // namespace clang
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
Definition Compiler.cpp:24
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
uint8_t GFNIMul(uint8_t AByte, uint8_t BByte)
uint8_t GFNIAffine(uint8_t XByte, const APInt &AQword, const APSInt &Imm, bool Inverse)
TokenType getType() const
Returns the token's type, e.g.
#define X(type, name)
Definition Value.h:97
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition PrimType.h:251
#define INT_TYPE_SWITCH(Expr, B)
Definition PrimType.h:232
#define TYPE_SWITCH(Expr, B)
Definition PrimType.h:211
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
CharUnits & getLValueOffset()
Definition APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition ASTContext.h:793
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:945
CanQualType CharTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:910
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
CanQualType HalfTy
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3723
QualType getElementType() const
Definition TypeBase.h:3735
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition Builtins.h:459
Represents a base class of a C++ class.
Definition DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition Type.cpp:254
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
Represents a function declaration or definition.
Definition Decl.h:2000
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
std::optional< llvm::AllocTokenMode > AllocTokenMode
The allocation token mode.
std::optional< uint64_t > AllocTokenMax
Maximum number of allocation tokens (0 = target SIZE_MAX), nullopt if none set (use target SIZE_MAX).
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
Helper class for OffsetOfExpr.
Definition Expr.h:2421
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8293
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8478
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition TargetInfo.h:858
bool isBigEndian() const
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8275
bool isBooleanType() const
Definition TypeBase.h:9022
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isPointerType() const
Definition TypeBase.h:8530
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8936
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9179
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2435
bool isVectorType() const
Definition TypeBase.h:8669
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9112
QualType getType() const
Definition Decl.h:723
Represents a GCC generic vector type.
Definition TypeBase.h:4176
unsigned getNumElements() const
Definition TypeBase.h:4191
QualType getElementType() const
Definition TypeBase.h:4190
A memory block, either on the stack or in the heap.
Definition InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition InterpBlock.h:73
bool isDynamic() const
Definition InterpBlock.h:83
Wrapper around boolean types.
Definition Boolean.h:25
static Boolean from(T Value)
Definition Boolean.h:97
Pointer into the code segment.
Definition Source.h:30
const LangOptions & getLangOpts() const
Returns the language options.
Definition Context.cpp:328
OptPrimType classify(QualType T) const
Classifies a type.
Definition Context.cpp:362
unsigned getEvalID() const
Definition Context.h:147
Manages dynamic memory allocations done during bytecode interpretation.
bool deallocate(const Expr *Source, const Block *BlockToDelete, InterpState &S)
Deallocate the given source+block combination.
std::optional< Form > getAllocationForm(const Expr *Source) const
Checks whether the allocation done at the given source is an array allocation.
Block * allocate(const Descriptor *D, unsigned EvalID, Form AllocForm)
Allocate ONE element of the given descriptor.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition Floating.h:35
void copy(const APFloat &F)
Definition Floating.h:123
llvm::FPClassTest classify() const
Definition Floating.h:154
bool isSignaling() const
Definition Floating.h:149
bool isNormal() const
Definition Floating.h:152
ComparisonCategoryResult compare(const Floating &RHS) const
Definition Floating.h:157
bool isZero() const
Definition Floating.h:144
bool isNegative() const
Definition Floating.h:143
bool isFinite() const
Definition Floating.h:151
bool isDenormal() const
Definition Floating.h:153
APFloat::fltCategory getCategory() const
Definition Floating.h:155
APFloat getAPFloat() const
Definition Floating.h:64
Base class for stack frames, shared between VM and walker.
Definition Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition IntegralAP.h:36
Frame storing local variables.
Definition InterpFrame.h:27
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition InterpFrame.h:30
SourceInfo getSource(CodePtr PC) const
Map a location to a source.
CodePtr getRetPC() const
Returns the return address of the frame.
SourceLocation getLocation(CodePtr PC) const
SourceRange getRange(CodePtr PC) const
unsigned getDepth() const
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition InterpStack.h:39
void push(Tys &&...Args)
Constructs a value in place on the top of the stack.
Definition InterpStack.h:33
void discard()
Discards the top value from the stack.
Definition InterpStack.h:50
T & peek() const
Returns a reference to the value on the top of the stack.
Definition InterpStack.h:62
Interpreter context.
Definition InterpState.h:43
Expr::EvalStatus & getEvalStatus() const override
Definition InterpState.h:67
Context & getContext() const
DynamicAllocator & getAllocator()
Context & Ctx
Interpreter Context.
Floating allocFloat(const llvm::fltSemantics &Sem)
llvm::SmallVector< const Block * > InitializingBlocks
List of blocks we're currently running either constructors or destructors for.
ASTContext & getASTContext() const override
Definition InterpState.h:70
InterpStack & Stk
Temporary stack.
const VarDecl * EvaluatingDecl
Declaration we're initializing/evaluting, if any.
InterpFrame * Current
The current frame.
T allocAP(unsigned BitWidth)
const LangOptions & getLangOpts() const
Definition InterpState.h:71
StdAllocatorCaller getStdAllocatorCaller(StringRef Name) const
Program & P
Reference to the module containing all bytecode.
PrimType value_or(PrimType PT) const
Definition PrimType.h:68
A pointer to a memory block, live or dead.
Definition Pointer.h:92
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition Pointer.h:189
bool isInitialized() const
Checks if an object was initialized.
Definition Pointer.cpp:441
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition Pointer.h:157
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition Pointer.h:552
int64_t getIndex() const
Returns the index into an array.
Definition Pointer.h:617
bool isActive() const
Checks if the object is active.
Definition Pointer.h:541
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition Pointer.h:174
T & deref() const
Dereferences the pointer, if it's live.
Definition Pointer.h:668
unsigned getNumElems() const
Returns the number of elements.
Definition Pointer.h:601
Pointer getArray() const
Returns the parent array.
Definition Pointer.h:321
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition Pointer.h:420
void activate() const
Activats a field.
Definition Pointer.cpp:573
bool isIntegralPointer() const
Definition Pointer.h:474
QualType getType() const
Returns the type of the innermost field.
Definition Pointer.h:341
bool isArrayElement() const
Checks if the pointer points to an array.
Definition Pointer.h:426
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition Pointer.cpp:543
bool isLive() const
Checks if the pointer is live.
Definition Pointer.h:273
bool inArray() const
Checks if the innermost field is an array.
Definition Pointer.h:402
T & elem(unsigned I) const
Dereferences the element at index I.
Definition Pointer.h:684
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition Pointer.h:312
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition Pointer.cpp:428
bool isZero() const
Checks if the pointer is null.
Definition Pointer.h:259
bool isRoot() const
Pointer points directly to a block.
Definition Pointer.h:442
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition Pointer.h:287
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition Pointer.cpp:649
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition Pointer.cpp:172
bool isOnePastEnd() const
Checks if the index is one past end.
Definition Pointer.h:634
uint64_t getIntegerRepresentation() const
Definition Pointer.h:144
const FieldDecl * getField() const
Returns the field information.
Definition Pointer.h:486
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition Pointer.h:224
bool isBlockPointer() const
Definition Pointer.h:473
const Block * block() const
Definition Pointer.h:607
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition Pointer.h:331
bool isVirtualBaseClass() const
Definition Pointer.h:548
bool isBaseClass() const
Checks if a structure is a base class.
Definition Pointer.h:547
size_t elemSize() const
Returns the element size of the innermost field.
Definition Pointer.h:363
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition Pointer.h:449
Lifetime getLifetime() const
Definition Pointer.h:729
void initialize() const
Initializes a field.
Definition Pointer.cpp:492
bool isField() const
Checks if the item is a field in an object.
Definition Pointer.h:279
const Record * getRecord() const
Returns the record descriptor of a class.
Definition Pointer.h:479
Descriptor * createDescriptor(const DeclTy &D, PrimType T, const Type *SourceTy=nullptr, Descriptor::MetadataSize MDSize=std::nullopt, bool IsConst=false, bool IsTemporary=false, bool IsMutable=false, bool IsVolatile=false)
Creates a descriptor for a primitive type.
Definition Program.h:119
Structure/Class descriptor.
Definition Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition Record.cpp:47
llvm::iterator_range< const_base_iter > bases() const
Definition Record.h:92
unsigned getNumFields() const
Definition Record.h:88
llvm::iterator_range< const_field_iter > fields() const
Definition Record.h:84
Describes the statement/declaration an opcode was generated from.
Definition Source.h:74
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId)
Add a note to a prior diagnostic.
Definition State.cpp:63
DiagnosticBuilder report(SourceLocation Loc, diag::kind DiagId)
Directly reports a diagnostic message.
Definition State.cpp:74
OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation could not be folded (FF => FoldFailure)
Definition State.cpp:21
OptionalDiagnostic CCEDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation does not produce a C++11 core constant expression.
Definition State.cpp:42
bool checkingPotentialConstantExpression() const
Are we checking whether the expression is a potential constant expression?
Definition State.h:99
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
static bool isNoopBuiltin(unsigned ID)
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shuffle_generic(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< std::pair< unsigned, int >(unsigned, unsigned)> GetSourceIndex)
static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
static bool interp_builtin_ia32_gfni_affine(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool Inverse)
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_triop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition Interp.cpp:1116
static bool interp__builtin_ia32_shift_with_count(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APInt &, uint64_t)> ShiftOp, llvm::function_ref< APInt(const APInt &, unsigned)> OverflowOp)
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static llvm::RoundingMode getRoundingMode(FPOptions FPO)
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
bool Call(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize)
Definition Interp.cpp:1587
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}...
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static uint64_t popToUInt64(const InterpState &S, const Expr *E)
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition Interp.cpp:594
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_ia32_addsub(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool Activate(InterpState &S, CodePtr OpPC)
Definition Interp.h:1963
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static bool convertDoubleToFloatStrict(APFloat Src, Floating &Dst, InterpState &S, const Expr *DiagExpr)
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition Interp.cpp:792
static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID, bool IsUnsigned)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_ext(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_test_op(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< bool(const APInt &A, const APInt &B)> Fn)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &IntResult)
Interpret an offsetof operation.
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition Interp.cpp:519
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
llvm::APFloat APFloat
Definition Floating.h:27
static void discard(InterpStack &Stk, PrimType T)
static bool interp__builtin_select_scalar(InterpState &S, const CallExpr *Call)
Scalar variant of AVX512 predicated select: Result[i] = (Mask bit 0) ?
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition Interp.cpp:414
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
llvm::APInt APInt
Definition FixedPoint.h:19
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate=false)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_x86_extract_vector_masked(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
PrimType
Enumeration of the primitive types of the VM.
Definition PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B, bool IsUnsigned)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition Interp.cpp:1167
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ia32_cvt_vec2mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, llvm::function_ref< APInt(const APSInt &)> PackFn)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition Interp.cpp:406
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool Error(InterpState &S, CodePtr OpPC)
Do nothing and just abort execution.
Definition Interp.h:3290
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp_builtin_horizontal_fp_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_ia32_pclmulqdq(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_elementwise_triop_fp(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static bool interp__builtin_ia32_vcvtps2ph(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ia32_multishiftqb(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shufbitqmb_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_elementwise_int_unaryop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &)> Fn)
constexpr bool isIntegralType(PrimType T)
Definition PrimType.h:128
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_builtin_horizontal_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_ia32_cvtsd2ss(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool HasRoundingMask)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
llvm::APSInt APSInt
Definition FixedPoint.h:20
static bool interp__builtin_ia32_gfni_mul(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static QualType getElemType(const Pointer &P)
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool MaskZ)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
static bool interp__builtin_ia32_cvtpd2ps(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool IsMasked, bool HasRounding)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ AK_Read
Definition State.h:27
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
SmallVectorImpl< PartialDiagnosticAt > * Diag
Diag - If this is non-null, it will be filled in with a stack of notes indicating why evaluation fail...
Definition Expr.h:633
Track what bits have been initialized to known values and which ones have indeterminate value.
T deref(Bytes Offset) const
Dereferences the value at the given offset.
std::unique_ptr< std::byte[]> Data
A quantity in bits.
A quantity in bytes.
size_t getQuantity() const
Describes a memory block created by an allocation site.
Definition Descriptor.h:122
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition Descriptor.h:263
QualType getElemQualType() const
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition Descriptor.h:148
QualType getType() const
const Decl * asDecl() const
Definition Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition Descriptor.h:144
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition Descriptor.h:254
const VarDecl * asVarDecl() const
Definition Descriptor.h:218
PrimType getPrimType() const
Definition Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition Descriptor.h:153
const Expr * asExpr() const
Definition Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition Descriptor.h:266
Mapping from primitive types to their representation.
Definition PrimType.h:138