clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
9#include "Boolean.h"
10#include "EvalEmitter.h"
12#include "InterpHelpers.h"
13#include "PrimType.h"
14#include "Program.h"
16#include "clang/AST/OSLog.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/Support/AllocToken.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/SipHash.h"
25
26namespace clang {
27namespace interp {
28
29[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
30 switch (ID) {
31 case Builtin::BIas_const:
32 case Builtin::BIforward:
33 case Builtin::BIforward_like:
34 case Builtin::BImove:
35 case Builtin::BImove_if_noexcept:
36 case Builtin::BIaddressof:
37 case Builtin::BI__addressof:
38 case Builtin::BI__builtin_addressof:
39 case Builtin::BI__builtin_launder:
40 return true;
41 default:
42 return false;
43 }
44 return false;
45}
46
47static void discard(InterpStack &Stk, PrimType T) {
48 TYPE_SWITCH(T, { Stk.discard<T>(); });
49}
50
51static uint64_t popToUInt64(const InterpState &S, const Expr *E) {
53 return static_cast<uint64_t>(S.Stk.pop<T>()));
54}
55
57 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
58}
59
60static APSInt popToAPSInt(InterpState &S, const Expr *E) {
61 return popToAPSInt(S.Stk, *S.getContext().classify(E->getType()));
62}
64 return popToAPSInt(S.Stk, *S.getContext().classify(T));
65}
66
67/// Check for common reasons a pointer can't be read from, which
68/// are usually not diagnosed in a builtin function.
69static bool isReadable(const Pointer &P) {
70 if (P.isDummy())
71 return false;
72 if (!P.isBlockPointer())
73 return false;
74 if (!P.isLive())
75 return false;
76 if (P.isOnePastEnd())
77 return false;
78 return true;
79}
80
81/// Pushes \p Val on the stack as the type given by \p QT.
82static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
86 assert(T);
87
88 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
89
90 if (T == PT_IntAPS) {
91 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
92 Result.copy(Val);
94 return;
95 }
96
97 if (T == PT_IntAP) {
98 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
99 Result.copy(Val);
101 return;
102 }
103
105 int64_t V = Val.getSExtValue();
106 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
107 } else {
109 uint64_t V = Val.getZExtValue();
110 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
111 }
112}
113
114template <typename T>
115static void pushInteger(InterpState &S, T Val, QualType QT) {
116 if constexpr (std::is_same_v<T, APInt>)
117 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
118 else if constexpr (std::is_same_v<T, APSInt>)
119 pushInteger(S, Val, QT);
120 else
121 pushInteger(S,
122 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
123 std::is_signed_v<T>),
124 !std::is_signed_v<T>),
125 QT);
126}
127
128static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
129 const APSInt &Value) {
130
131 if (ValueT == PT_IntAPS) {
132 Dest.deref<IntegralAP<true>>() =
133 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
134 Dest.deref<IntegralAP<true>>().copy(Value);
135 } else if (ValueT == PT_IntAP) {
136 Dest.deref<IntegralAP<false>>() =
137 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
138 Dest.deref<IntegralAP<false>>().copy(Value);
139 } else {
141 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
142 }
143}
144
145static QualType getElemType(const Pointer &P) {
146 const Descriptor *Desc = P.getFieldDesc();
147 QualType T = Desc->getType();
148 if (Desc->isPrimitive())
149 return T;
150 if (T->isPointerType())
151 return T->castAs<PointerType>()->getPointeeType();
152 if (Desc->isArray())
153 return Desc->getElemQualType();
154 if (const auto *AT = T->getAsArrayTypeUnsafe())
155 return AT->getElementType();
156 return T;
157}
158
160 unsigned ID) {
161 if (!S.diagnosing())
162 return;
163
164 auto Loc = S.Current->getSource(OpPC);
165 if (S.getLangOpts().CPlusPlus11)
166 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
167 << /*isConstexpr=*/0 << /*isConstructor=*/0
169 else
170 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
171}
172
173static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
174 assert(Val.getFieldDesc()->isPrimitiveArray() &&
176 "Not a boolean vector");
177 unsigned NumElems = Val.getNumElems();
178
179 // Each element is one bit, so create an integer with NumElts bits.
180 llvm::APSInt Result(NumElems, 0);
181 for (unsigned I = 0; I != NumElems; ++I) {
182 if (Val.elem<bool>(I))
183 Result.setBit(I);
184 }
185
186 return Result;
187}
188
189// Strict double -> float conversion used for X86 PD2PS/cvtsd2ss intrinsics.
190// Reject NaN/Inf/Subnormal inputs and any lossy/inexact conversions.
192 InterpState &S, const Expr *DiagExpr) {
193 if (Src.isInfinity()) {
194 if (S.diagnosing())
195 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 0;
196 return false;
197 }
198 if (Src.isNaN()) {
199 if (S.diagnosing())
200 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 1;
201 return false;
202 }
203 APFloat Val = Src;
204 bool LosesInfo = false;
205 APFloat::opStatus Status = Val.convert(
206 APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &LosesInfo);
207 if (LosesInfo || Val.isDenormal()) {
208 if (S.diagnosing())
209 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic_strict);
210 return false;
211 }
212 if (Status != APFloat::opOK) {
213 if (S.diagnosing())
214 S.CCEDiag(DiagExpr, diag::note_invalid_subexpr_in_const_expr);
215 return false;
216 }
217 Dst.copy(Val);
218 return true;
219}
220
222 const InterpFrame *Frame,
223 const CallExpr *Call) {
224 unsigned Depth = S.Current->getDepth();
225 auto isStdCall = [](const FunctionDecl *F) -> bool {
226 return F && F->isInStdNamespace() && F->getIdentifier() &&
227 F->getIdentifier()->isStr("is_constant_evaluated");
228 };
229 const InterpFrame *Caller = Frame->Caller;
230 // The current frame is the one for __builtin_is_constant_evaluated.
231 // The one above that, potentially the one for std::is_constant_evaluated().
233 S.getEvalStatus().Diag &&
234 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
235 if (Caller && isStdCall(Frame->getCallee())) {
236 const Expr *E = Caller->getExpr(Caller->getRetPC());
237 S.report(E->getExprLoc(),
238 diag::warn_is_constant_evaluated_always_true_constexpr)
239 << "std::is_constant_evaluated" << E->getSourceRange();
240 } else {
241 S.report(Call->getExprLoc(),
242 diag::warn_is_constant_evaluated_always_true_constexpr)
243 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
244 }
245 }
246
248 return true;
249}
250
251// __builtin_assume(int)
253 const InterpFrame *Frame,
254 const CallExpr *Call) {
255 assert(Call->getNumArgs() == 1);
256 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
257 return true;
258}
259
261 const InterpFrame *Frame,
262 const CallExpr *Call, unsigned ID) {
263 uint64_t Limit = ~static_cast<uint64_t>(0);
264 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
265 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
266 Limit = popToUInt64(S, Call->getArg(2));
267
268 const Pointer &B = S.Stk.pop<Pointer>();
269 const Pointer &A = S.Stk.pop<Pointer>();
270 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
271 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
272 diagnoseNonConstexprBuiltin(S, OpPC, ID);
273
274 if (Limit == 0) {
275 pushInteger(S, 0, Call->getType());
276 return true;
277 }
278
279 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
280 return false;
281
282 if (A.isDummy() || B.isDummy())
283 return false;
284 if (!A.isBlockPointer() || !B.isBlockPointer())
285 return false;
286
287 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
288 ID == Builtin::BI__builtin_wcscmp ||
289 ID == Builtin::BI__builtin_wcsncmp;
290 assert(A.getFieldDesc()->isPrimitiveArray());
291 assert(B.getFieldDesc()->isPrimitiveArray());
292
293 // Different element types shouldn't happen, but with casts they can.
295 return false;
296
297 PrimType ElemT = *S.getContext().classify(getElemType(A));
298
299 auto returnResult = [&](int V) -> bool {
300 pushInteger(S, V, Call->getType());
301 return true;
302 };
303
304 unsigned IndexA = A.getIndex();
305 unsigned IndexB = B.getIndex();
306 uint64_t Steps = 0;
307 for (;; ++IndexA, ++IndexB, ++Steps) {
308
309 if (Steps >= Limit)
310 break;
311 const Pointer &PA = A.atIndex(IndexA);
312 const Pointer &PB = B.atIndex(IndexB);
313 if (!CheckRange(S, OpPC, PA, AK_Read) ||
314 !CheckRange(S, OpPC, PB, AK_Read)) {
315 return false;
316 }
317
318 if (IsWide) {
319 INT_TYPE_SWITCH(ElemT, {
320 T CA = PA.deref<T>();
321 T CB = PB.deref<T>();
322 if (CA > CB)
323 return returnResult(1);
324 if (CA < CB)
325 return returnResult(-1);
326 if (CA.isZero() || CB.isZero())
327 return returnResult(0);
328 });
329 continue;
330 }
331
332 uint8_t CA = PA.deref<uint8_t>();
333 uint8_t CB = PB.deref<uint8_t>();
334
335 if (CA > CB)
336 return returnResult(1);
337 if (CA < CB)
338 return returnResult(-1);
339 if (CA == 0 || CB == 0)
340 return returnResult(0);
341 }
342
343 return returnResult(0);
344}
345
347 const InterpFrame *Frame,
348 const CallExpr *Call, unsigned ID) {
349 const Pointer &StrPtr = S.Stk.pop<Pointer>().expand();
350
351 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
352 diagnoseNonConstexprBuiltin(S, OpPC, ID);
353
354 if (!CheckArray(S, OpPC, StrPtr))
355 return false;
356
357 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
358 return false;
359
360 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
361 return false;
362
363 if (!StrPtr.getFieldDesc()->isPrimitiveArray())
364 return false;
365
366 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
367 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
368
369 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
370 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
371 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
372 }
373
374 size_t Len = 0;
375 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
376 const Pointer &ElemPtr = StrPtr.atIndex(I);
377
378 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
379 return false;
380
381 uint32_t Val;
382 switch (ElemSize) {
383 case 1:
384 Val = ElemPtr.deref<uint8_t>();
385 break;
386 case 2:
387 Val = ElemPtr.deref<uint16_t>();
388 break;
389 case 4:
390 Val = ElemPtr.deref<uint32_t>();
391 break;
392 default:
393 llvm_unreachable("Unsupported char size");
394 }
395 if (Val == 0)
396 break;
397 }
398
399 pushInteger(S, Len, Call->getType());
400
401 return true;
402}
403
405 const InterpFrame *Frame, const CallExpr *Call,
406 bool Signaling) {
407 const Pointer &Arg = S.Stk.pop<Pointer>();
408
409 if (!CheckLoad(S, OpPC, Arg))
410 return false;
411
412 assert(Arg.getFieldDesc()->isPrimitiveArray());
413
414 // Convert the given string to an integer using StringRef's API.
415 llvm::APInt Fill;
416 std::string Str;
417 assert(Arg.getNumElems() >= 1);
418 for (unsigned I = 0;; ++I) {
419 const Pointer &Elem = Arg.atIndex(I);
420
421 if (!CheckLoad(S, OpPC, Elem))
422 return false;
423
424 if (Elem.deref<int8_t>() == 0)
425 break;
426
427 Str += Elem.deref<char>();
428 }
429
430 // Treat empty strings as if they were zero.
431 if (Str.empty())
432 Fill = llvm::APInt(32, 0);
433 else if (StringRef(Str).getAsInteger(0, Fill))
434 return false;
435
436 const llvm::fltSemantics &TargetSemantics =
438 Call->getDirectCallee()->getReturnType());
439
440 Floating Result = S.allocFloat(TargetSemantics);
442 if (Signaling)
443 Result.copy(
444 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
445 else
446 Result.copy(
447 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
448 } else {
449 // Prior to IEEE 754-2008, architectures were allowed to choose whether
450 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
451 // a different encoding to what became a standard in 2008, and for pre-
452 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
453 // sNaN. This is now known as "legacy NaN" encoding.
454 if (Signaling)
455 Result.copy(
456 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
457 else
458 Result.copy(
459 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
460 }
461
463 return true;
464}
465
467 const InterpFrame *Frame,
468 const CallExpr *Call) {
469 const llvm::fltSemantics &TargetSemantics =
471 Call->getDirectCallee()->getReturnType());
472
473 Floating Result = S.allocFloat(TargetSemantics);
474 Result.copy(APFloat::getInf(TargetSemantics));
476 return true;
477}
478
480 const InterpFrame *Frame) {
481 const Floating &Arg2 = S.Stk.pop<Floating>();
482 const Floating &Arg1 = S.Stk.pop<Floating>();
483 Floating Result = S.allocFloat(Arg1.getSemantics());
484
485 APFloat Copy = Arg1.getAPFloat();
486 Copy.copySign(Arg2.getAPFloat());
487 Result.copy(Copy);
489
490 return true;
491}
492
494 const InterpFrame *Frame, bool IsNumBuiltin) {
495 const Floating &RHS = S.Stk.pop<Floating>();
496 const Floating &LHS = S.Stk.pop<Floating>();
497 Floating Result = S.allocFloat(LHS.getSemantics());
498
499 if (IsNumBuiltin)
500 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
501 else
502 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
504 return true;
505}
506
508 const InterpFrame *Frame, bool IsNumBuiltin) {
509 const Floating &RHS = S.Stk.pop<Floating>();
510 const Floating &LHS = S.Stk.pop<Floating>();
511 Floating Result = S.allocFloat(LHS.getSemantics());
512
513 if (IsNumBuiltin)
514 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
515 else
516 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
518 return true;
519}
520
521/// Defined as __builtin_isnan(...), to accommodate the fact that it can
522/// take a float, double, long double, etc.
523/// But for us, that's all a Floating anyway.
525 const InterpFrame *Frame,
526 const CallExpr *Call) {
527 const Floating &Arg = S.Stk.pop<Floating>();
528
529 pushInteger(S, Arg.isNan(), Call->getType());
530 return true;
531}
532
534 const InterpFrame *Frame,
535 const CallExpr *Call) {
536 const Floating &Arg = S.Stk.pop<Floating>();
537
538 pushInteger(S, Arg.isSignaling(), Call->getType());
539 return true;
540}
541
543 const InterpFrame *Frame, bool CheckSign,
544 const CallExpr *Call) {
545 const Floating &Arg = S.Stk.pop<Floating>();
546 APFloat F = Arg.getAPFloat();
547 bool IsInf = F.isInfinity();
548
549 if (CheckSign)
550 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
551 else
552 pushInteger(S, IsInf, Call->getType());
553 return true;
554}
555
557 const InterpFrame *Frame,
558 const CallExpr *Call) {
559 const Floating &Arg = S.Stk.pop<Floating>();
560
561 pushInteger(S, Arg.isFinite(), Call->getType());
562 return true;
563}
564
566 const InterpFrame *Frame,
567 const CallExpr *Call) {
568 const Floating &Arg = S.Stk.pop<Floating>();
569
570 pushInteger(S, Arg.isNormal(), Call->getType());
571 return true;
572}
573
575 const InterpFrame *Frame,
576 const CallExpr *Call) {
577 const Floating &Arg = S.Stk.pop<Floating>();
578
579 pushInteger(S, Arg.isDenormal(), Call->getType());
580 return true;
581}
582
584 const InterpFrame *Frame,
585 const CallExpr *Call) {
586 const Floating &Arg = S.Stk.pop<Floating>();
587
588 pushInteger(S, Arg.isZero(), Call->getType());
589 return true;
590}
591
593 const InterpFrame *Frame,
594 const CallExpr *Call) {
595 const Floating &Arg = S.Stk.pop<Floating>();
596
597 pushInteger(S, Arg.isNegative(), Call->getType());
598 return true;
599}
600
602 const CallExpr *Call, unsigned ID) {
603 const Floating &RHS = S.Stk.pop<Floating>();
604 const Floating &LHS = S.Stk.pop<Floating>();
605
607 S,
608 [&] {
609 switch (ID) {
610 case Builtin::BI__builtin_isgreater:
611 return LHS > RHS;
612 case Builtin::BI__builtin_isgreaterequal:
613 return LHS >= RHS;
614 case Builtin::BI__builtin_isless:
615 return LHS < RHS;
616 case Builtin::BI__builtin_islessequal:
617 return LHS <= RHS;
618 case Builtin::BI__builtin_islessgreater: {
619 ComparisonCategoryResult Cmp = LHS.compare(RHS);
620 return Cmp == ComparisonCategoryResult::Less ||
622 }
623 case Builtin::BI__builtin_isunordered:
625 default:
626 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
627 "comparison function");
628 }
629 }(),
630 Call->getType());
631 return true;
632}
633
634/// First parameter to __builtin_isfpclass is the floating value, the
635/// second one is an integral value.
637 const InterpFrame *Frame,
638 const CallExpr *Call) {
639 APSInt FPClassArg = popToAPSInt(S, Call->getArg(1));
640 const Floating &F = S.Stk.pop<Floating>();
641
642 int32_t Result = static_cast<int32_t>(
643 (F.classify() & std::move(FPClassArg)).getZExtValue());
644 pushInteger(S, Result, Call->getType());
645
646 return true;
647}
648
649/// Five int values followed by one floating value.
650/// __builtin_fpclassify(int, int, int, int, int, float)
652 const InterpFrame *Frame,
653 const CallExpr *Call) {
654 const Floating &Val = S.Stk.pop<Floating>();
655
656 PrimType IntT = *S.getContext().classify(Call->getArg(0));
657 APSInt Values[5];
658 for (unsigned I = 0; I != 5; ++I)
659 Values[4 - I] = popToAPSInt(S.Stk, IntT);
660
661 unsigned Index;
662 switch (Val.getCategory()) {
663 case APFloat::fcNaN:
664 Index = 0;
665 break;
666 case APFloat::fcInfinity:
667 Index = 1;
668 break;
669 case APFloat::fcNormal:
670 Index = Val.isDenormal() ? 3 : 2;
671 break;
672 case APFloat::fcZero:
673 Index = 4;
674 break;
675 }
676
677 // The last argument is first on the stack.
678 assert(Index <= 4);
679
680 pushInteger(S, Values[Index], Call->getType());
681 return true;
682}
683
684static inline Floating abs(InterpState &S, const Floating &In) {
685 if (!In.isNegative())
686 return In;
687
688 Floating Output = S.allocFloat(In.getSemantics());
689 APFloat New = In.getAPFloat();
690 New.changeSign();
691 Output.copy(New);
692 return Output;
693}
694
695// The C standard says "fabs raises no floating-point exceptions,
696// even if x is a signaling NaN. The returned value is independent of
697// the current rounding direction mode." Therefore constant folding can
698// proceed without regard to the floating point settings.
699// Reference, WG14 N2478 F.10.4.3
701 const InterpFrame *Frame) {
702 const Floating &Val = S.Stk.pop<Floating>();
703 S.Stk.push<Floating>(abs(S, Val));
704 return true;
705}
706
708 const InterpFrame *Frame,
709 const CallExpr *Call) {
710 APSInt Val = popToAPSInt(S, Call->getArg(0));
711 if (Val ==
712 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
713 return false;
714 if (Val.isNegative())
715 Val.negate();
716 pushInteger(S, Val, Call->getType());
717 return true;
718}
719
721 const InterpFrame *Frame,
722 const CallExpr *Call) {
723 APSInt Val;
724 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
725 const Pointer &Arg = S.Stk.pop<Pointer>();
726 Val = convertBoolVectorToInt(Arg);
727 } else {
728 Val = popToAPSInt(S, Call->getArg(0));
729 }
730 pushInteger(S, Val.popcount(), Call->getType());
731 return true;
732}
733
735 const InterpFrame *Frame,
736 const CallExpr *Call) {
737 // This is an unevaluated call, so there are no arguments on the stack.
738 assert(Call->getNumArgs() == 1);
739 const Expr *Arg = Call->getArg(0);
740
741 GCCTypeClass ResultClass =
743 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
744 pushInteger(S, ReturnVal, Call->getType());
745 return true;
746}
747
748// __builtin_expect(long, long)
749// __builtin_expect_with_probability(long, long, double)
751 const InterpFrame *Frame,
752 const CallExpr *Call) {
753 // The return value is simply the value of the first parameter.
754 // We ignore the probability.
755 unsigned NumArgs = Call->getNumArgs();
756 assert(NumArgs == 2 || NumArgs == 3);
757
758 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
759 if (NumArgs == 3)
760 S.Stk.discard<Floating>();
761 discard(S.Stk, ArgT);
762
763 APSInt Val = popToAPSInt(S.Stk, ArgT);
764 pushInteger(S, Val, Call->getType());
765 return true;
766}
767
769 const InterpFrame *Frame,
770 const CallExpr *Call) {
771#ifndef NDEBUG
772 assert(Call->getArg(0)->isLValue());
773 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
774 assert(PtrT == PT_Ptr &&
775 "Unsupported pointer type passed to __builtin_addressof()");
776#endif
777 return true;
778}
779
781 const InterpFrame *Frame,
782 const CallExpr *Call) {
783 return Call->getDirectCallee()->isConstexpr();
784}
785
787 const InterpFrame *Frame,
788 const CallExpr *Call) {
789 APSInt Arg = popToAPSInt(S, Call->getArg(0));
790
792 Arg.getZExtValue());
793 pushInteger(S, Result, Call->getType());
794 return true;
795}
796
797// Two integral values followed by a pointer (lhs, rhs, resultOut)
799 const CallExpr *Call,
800 unsigned BuiltinOp) {
801 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
802 if (ResultPtr.isDummy() || !ResultPtr.isBlockPointer())
803 return false;
804
805 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
806 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
807 APSInt RHS = popToAPSInt(S.Stk, RHST);
808 APSInt LHS = popToAPSInt(S.Stk, LHST);
809 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
810 PrimType ResultT = *S.getContext().classify(ResultType);
811 bool Overflow;
812
814 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
815 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
816 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
817 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
819 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
821 uint64_t LHSSize = LHS.getBitWidth();
822 uint64_t RHSSize = RHS.getBitWidth();
823 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
824 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
825
826 // Add an additional bit if the signedness isn't uniformly agreed to. We
827 // could do this ONLY if there is a signed and an unsigned that both have
828 // MaxBits, but the code to check that is pretty nasty. The issue will be
829 // caught in the shrink-to-result later anyway.
830 if (IsSigned && !AllSigned)
831 ++MaxBits;
832
833 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
834 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
835 Result = APSInt(MaxBits, !IsSigned);
836 }
837
838 // Find largest int.
839 switch (BuiltinOp) {
840 default:
841 llvm_unreachable("Invalid value for BuiltinOp");
842 case Builtin::BI__builtin_add_overflow:
843 case Builtin::BI__builtin_sadd_overflow:
844 case Builtin::BI__builtin_saddl_overflow:
845 case Builtin::BI__builtin_saddll_overflow:
846 case Builtin::BI__builtin_uadd_overflow:
847 case Builtin::BI__builtin_uaddl_overflow:
848 case Builtin::BI__builtin_uaddll_overflow:
849 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
850 : LHS.uadd_ov(RHS, Overflow);
851 break;
852 case Builtin::BI__builtin_sub_overflow:
853 case Builtin::BI__builtin_ssub_overflow:
854 case Builtin::BI__builtin_ssubl_overflow:
855 case Builtin::BI__builtin_ssubll_overflow:
856 case Builtin::BI__builtin_usub_overflow:
857 case Builtin::BI__builtin_usubl_overflow:
858 case Builtin::BI__builtin_usubll_overflow:
859 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
860 : LHS.usub_ov(RHS, Overflow);
861 break;
862 case Builtin::BI__builtin_mul_overflow:
863 case Builtin::BI__builtin_smul_overflow:
864 case Builtin::BI__builtin_smull_overflow:
865 case Builtin::BI__builtin_smulll_overflow:
866 case Builtin::BI__builtin_umul_overflow:
867 case Builtin::BI__builtin_umull_overflow:
868 case Builtin::BI__builtin_umulll_overflow:
869 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
870 : LHS.umul_ov(RHS, Overflow);
871 break;
872 }
873
874 // In the case where multiple sizes are allowed, truncate and see if
875 // the values are the same.
876 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
877 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
878 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
879 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
880 // since it will give us the behavior of a TruncOrSelf in the case where
881 // its parameter <= its size. We previously set Result to be at least the
882 // type-size of the result, so getTypeSize(ResultType) <= Resu
883 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
884 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
885
886 if (!APSInt::isSameValue(Temp, Result))
887 Overflow = true;
888 Result = std::move(Temp);
889 }
890
891 // Write Result to ResultPtr and put Overflow on the stack.
892 assignInteger(S, ResultPtr, ResultT, Result);
893 if (ResultPtr.canBeInitialized())
894 ResultPtr.initialize();
895
896 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
897 S.Stk.push<Boolean>(Overflow);
898 return true;
899}
900
901/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
903 const InterpFrame *Frame,
904 const CallExpr *Call, unsigned BuiltinOp) {
905 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
906 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
907 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
908 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
909 APSInt RHS = popToAPSInt(S.Stk, RHST);
910 APSInt LHS = popToAPSInt(S.Stk, LHST);
911
912 if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer())
913 return false;
914
915 APSInt CarryOut;
916
918 // Copy the number of bits and sign.
919 Result = LHS;
920 CarryOut = LHS;
921
922 bool FirstOverflowed = false;
923 bool SecondOverflowed = false;
924 switch (BuiltinOp) {
925 default:
926 llvm_unreachable("Invalid value for BuiltinOp");
927 case Builtin::BI__builtin_addcb:
928 case Builtin::BI__builtin_addcs:
929 case Builtin::BI__builtin_addc:
930 case Builtin::BI__builtin_addcl:
931 case Builtin::BI__builtin_addcll:
932 Result =
933 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
934 break;
935 case Builtin::BI__builtin_subcb:
936 case Builtin::BI__builtin_subcs:
937 case Builtin::BI__builtin_subc:
938 case Builtin::BI__builtin_subcl:
939 case Builtin::BI__builtin_subcll:
940 Result =
941 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
942 break;
943 }
944 // It is possible for both overflows to happen but CGBuiltin uses an OR so
945 // this is consistent.
946 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
947
948 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
949 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
950 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
951 CarryOutPtr.initialize();
952
953 assert(Call->getType() == Call->getArg(0)->getType());
954 pushInteger(S, Result, Call->getType());
955 return true;
956}
957
959 const InterpFrame *Frame, const CallExpr *Call,
960 unsigned BuiltinOp) {
961
962 std::optional<APSInt> Fallback;
963 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2)
964 Fallback = popToAPSInt(S, Call->getArg(1));
965
966 APSInt Val;
967 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
968 const Pointer &Arg = S.Stk.pop<Pointer>();
969 Val = convertBoolVectorToInt(Arg);
970 } else {
971 Val = popToAPSInt(S, Call->getArg(0));
972 }
973
974 // When the argument is 0, the result of GCC builtins is undefined, whereas
975 // for Microsoft intrinsics, the result is the bit-width of the argument.
976 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
977 BuiltinOp != Builtin::BI__lzcnt &&
978 BuiltinOp != Builtin::BI__lzcnt64;
979
980 if (Val == 0) {
981 if (Fallback) {
982 pushInteger(S, *Fallback, Call->getType());
983 return true;
984 }
985
986 if (ZeroIsUndefined)
987 return false;
988 }
989
990 pushInteger(S, Val.countl_zero(), Call->getType());
991 return true;
992}
993
995 const InterpFrame *Frame, const CallExpr *Call,
996 unsigned BuiltinID) {
997 std::optional<APSInt> Fallback;
998 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2)
999 Fallback = popToAPSInt(S, Call->getArg(1));
1000
1001 APSInt Val;
1002 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
1003 const Pointer &Arg = S.Stk.pop<Pointer>();
1004 Val = convertBoolVectorToInt(Arg);
1005 } else {
1006 Val = popToAPSInt(S, Call->getArg(0));
1007 }
1008
1009 if (Val == 0) {
1010 if (Fallback) {
1011 pushInteger(S, *Fallback, Call->getType());
1012 return true;
1013 }
1014 return false;
1015 }
1016
1017 pushInteger(S, Val.countr_zero(), Call->getType());
1018 return true;
1019}
1020
1022 const InterpFrame *Frame,
1023 const CallExpr *Call) {
1024 const APSInt &Val = popToAPSInt(S, Call->getArg(0));
1025 if (Val.getBitWidth() == 8)
1026 pushInteger(S, Val, Call->getType());
1027 else
1028 pushInteger(S, Val.byteSwap(), Call->getType());
1029 return true;
1030}
1031
1032/// bool __atomic_always_lock_free(size_t, void const volatile*)
1033/// bool __atomic_is_lock_free(size_t, void const volatile*)
1035 const InterpFrame *Frame,
1036 const CallExpr *Call,
1037 unsigned BuiltinOp) {
1038 auto returnBool = [&S](bool Value) -> bool {
1039 S.Stk.push<Boolean>(Value);
1040 return true;
1041 };
1042
1043 const Pointer &Ptr = S.Stk.pop<Pointer>();
1044 uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
1045
1046 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1047 // of two less than or equal to the maximum inline atomic width, we know it
1048 // is lock-free. If the size isn't a power of two, or greater than the
1049 // maximum alignment where we promote atomics, we know it is not lock-free
1050 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1051 // the answer can only be determined at runtime; for example, 16-byte
1052 // atomics have lock-free implementations on some, but not all,
1053 // x86-64 processors.
1054
1055 // Check power-of-two.
1056 CharUnits Size = CharUnits::fromQuantity(SizeVal);
1057 if (Size.isPowerOfTwo()) {
1058 // Check against inlining width.
1059 unsigned InlineWidthBits =
1061 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1062
1063 // OK, we will inline appropriately-aligned operations of this size,
1064 // and _Atomic(T) is appropriately-aligned.
1065 if (Size == CharUnits::One())
1066 return returnBool(true);
1067
1068 // Same for null pointers.
1069 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1070 if (Ptr.isZero())
1071 return returnBool(true);
1072
1073 if (Ptr.isIntegralPointer()) {
1074 uint64_t IntVal = Ptr.getIntegerRepresentation();
1075 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1076 return returnBool(true);
1077 }
1078
1079 const Expr *PtrArg = Call->getArg(1);
1080 // Otherwise, check if the type's alignment against Size.
1081 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1082 // Drop the potential implicit-cast to 'const volatile void*', getting
1083 // the underlying type.
1084 if (ICE->getCastKind() == CK_BitCast)
1085 PtrArg = ICE->getSubExpr();
1086 }
1087
1088 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1089 QualType PointeeType = PtrTy->getPointeeType();
1090 if (!PointeeType->isIncompleteType() &&
1091 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1092 // OK, we will inline operations on this object.
1093 return returnBool(true);
1094 }
1095 }
1096 }
1097 }
1098
1099 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1100 return returnBool(false);
1101
1102 return false;
1103}
1104
1105/// bool __c11_atomic_is_lock_free(size_t)
1107 CodePtr OpPC,
1108 const InterpFrame *Frame,
1109 const CallExpr *Call) {
1110 uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
1111
1112 CharUnits Size = CharUnits::fromQuantity(SizeVal);
1113 if (Size.isPowerOfTwo()) {
1114 // Check against inlining width.
1115 unsigned InlineWidthBits =
1117 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1118 S.Stk.push<Boolean>(true);
1119 return true;
1120 }
1121 }
1122
1123 return false; // returnBool(false);
1124}
1125
1126/// __builtin_complex(Float A, float B);
1128 const InterpFrame *Frame,
1129 const CallExpr *Call) {
1130 const Floating &Arg2 = S.Stk.pop<Floating>();
1131 const Floating &Arg1 = S.Stk.pop<Floating>();
1132 Pointer &Result = S.Stk.peek<Pointer>();
1133
1134 Result.elem<Floating>(0) = Arg1;
1135 Result.elem<Floating>(1) = Arg2;
1136 Result.initializeAllElements();
1137
1138 return true;
1139}
1140
1141/// __builtin_is_aligned()
1142/// __builtin_align_up()
1143/// __builtin_align_down()
1144/// The first parameter is either an integer or a pointer.
1145/// The second parameter is the requested alignment as an integer.
1147 const InterpFrame *Frame,
1148 const CallExpr *Call,
1149 unsigned BuiltinOp) {
1150 const APSInt &Alignment = popToAPSInt(S, Call->getArg(1));
1151
1152 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1153 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1154 return false;
1155 }
1156 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1157 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1158 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1159 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1160 << MaxValue << Call->getArg(0)->getType() << Alignment;
1161 return false;
1162 }
1163
1164 // The first parameter is either an integer or a pointer.
1165 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1166
1167 if (isIntegralType(FirstArgT)) {
1168 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1169 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1170 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1171 APSInt AlignedVal =
1172 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1173 pushInteger(S, AlignedVal, Call->getType());
1174 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1175 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1176 pushInteger(S, AlignedVal, Call->getType());
1177 } else {
1178 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1179 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1180 }
1181 return true;
1182 }
1183 assert(FirstArgT == PT_Ptr);
1184 const Pointer &Ptr = S.Stk.pop<Pointer>();
1185 if (!Ptr.isBlockPointer())
1186 return false;
1187
1188 unsigned PtrOffset = Ptr.getIndex();
1189 CharUnits BaseAlignment =
1191 CharUnits PtrAlign =
1192 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1193
1194 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1195 if (PtrAlign.getQuantity() >= Alignment) {
1196 S.Stk.push<Boolean>(true);
1197 return true;
1198 }
1199 // If the alignment is not known to be sufficient, some cases could still
1200 // be aligned at run time. However, if the requested alignment is less or
1201 // equal to the base alignment and the offset is not aligned, we know that
1202 // the run-time value can never be aligned.
1203 if (BaseAlignment.getQuantity() >= Alignment &&
1204 PtrAlign.getQuantity() < Alignment) {
1205 S.Stk.push<Boolean>(false);
1206 return true;
1207 }
1208
1209 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1210 << Alignment;
1211 return false;
1212 }
1213
1214 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1215 BuiltinOp == Builtin::BI__builtin_align_up);
1216
1217 // For align_up/align_down, we can return the same value if the alignment
1218 // is known to be greater or equal to the requested value.
1219 if (PtrAlign.getQuantity() >= Alignment) {
1220 S.Stk.push<Pointer>(Ptr);
1221 return true;
1222 }
1223
1224 // The alignment could be greater than the minimum at run-time, so we cannot
1225 // infer much about the resulting pointer value. One case is possible:
1226 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1227 // can infer the correct index if the requested alignment is smaller than
1228 // the base alignment so we can perform the computation on the offset.
1229 if (BaseAlignment.getQuantity() >= Alignment) {
1230 assert(Alignment.getBitWidth() <= 64 &&
1231 "Cannot handle > 64-bit address-space");
1232 uint64_t Alignment64 = Alignment.getZExtValue();
1233 CharUnits NewOffset =
1234 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1235 ? llvm::alignDown(PtrOffset, Alignment64)
1236 : llvm::alignTo(PtrOffset, Alignment64));
1237
1238 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1239 return true;
1240 }
1241
1242 // Otherwise, we cannot constant-evaluate the result.
1243 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1244 return false;
1245}
1246
1247/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1249 const InterpFrame *Frame,
1250 const CallExpr *Call) {
1251 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1252
1253 std::optional<APSInt> ExtraOffset;
1254 if (Call->getNumArgs() == 3)
1255 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1256
1257 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1258 const Pointer &Ptr = S.Stk.pop<Pointer>();
1259
1260 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1261
1262 // If there is a base object, then it must have the correct alignment.
1263 if (Ptr.isBlockPointer()) {
1264 CharUnits BaseAlignment;
1265 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1266 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1267 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1268 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1269
1270 if (BaseAlignment < Align) {
1271 S.CCEDiag(Call->getArg(0),
1272 diag::note_constexpr_baa_insufficient_alignment)
1273 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1274 return false;
1275 }
1276 }
1277
1278 APValue AV = Ptr.toAPValue(S.getASTContext());
1279 CharUnits AVOffset = AV.getLValueOffset();
1280 if (ExtraOffset)
1281 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1282 if (AVOffset.alignTo(Align) != AVOffset) {
1283 if (Ptr.isBlockPointer())
1284 S.CCEDiag(Call->getArg(0),
1285 diag::note_constexpr_baa_insufficient_alignment)
1286 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1287 else
1288 S.CCEDiag(Call->getArg(0),
1289 diag::note_constexpr_baa_value_insufficient_alignment)
1290 << AVOffset.getQuantity() << Align.getQuantity();
1291 return false;
1292 }
1293
1294 S.Stk.push<Pointer>(Ptr);
1295 return true;
1296}
1297
1298/// (CarryIn, LHS, RHS, Result)
1300 CodePtr OpPC,
1301 const InterpFrame *Frame,
1302 const CallExpr *Call,
1303 unsigned BuiltinOp) {
1304 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1305 !Call->getArg(1)->getType()->isIntegerType() ||
1306 !Call->getArg(2)->getType()->isIntegerType())
1307 return false;
1308
1309 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1310
1311 APSInt RHS = popToAPSInt(S, Call->getArg(2));
1312 APSInt LHS = popToAPSInt(S, Call->getArg(1));
1313 APSInt CarryIn = popToAPSInt(S, Call->getArg(0));
1314
1315 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1316 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1317
1318 unsigned BitWidth = LHS.getBitWidth();
1319 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1320 APInt ExResult =
1321 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1322 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1323
1324 APInt Result = ExResult.extractBits(BitWidth, 0);
1325 APSInt CarryOut =
1326 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1327
1328 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1329 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1330 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1331
1332 pushInteger(S, CarryOut, Call->getType());
1333
1334 return true;
1335}
1336
1338 CodePtr OpPC,
1339 const InterpFrame *Frame,
1340 const CallExpr *Call) {
1343 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1344 return true;
1345}
1346
1347static bool
1349 const InterpFrame *Frame,
1350 const CallExpr *Call) {
1351 const auto &Ptr = S.Stk.pop<Pointer>();
1352 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1353
1354 // This should be created for a StringLiteral, so should alway shold at least
1355 // one array element.
1356 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1357 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1358 uint64_t Result = getPointerAuthStableSipHash(R);
1359 pushInteger(S, Result, Call->getType());
1360 return true;
1361}
1362
1364 const InterpFrame *Frame,
1365 const CallExpr *Call) {
1366 const ASTContext &ASTCtx = S.getASTContext();
1367 uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
1368 auto Mode =
1369 ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
1370 auto MaxTokensOpt = ASTCtx.getLangOpts().AllocTokenMax;
1371 uint64_t MaxTokens =
1372 MaxTokensOpt.value_or(0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
1373
1374 // We do not read any of the arguments; discard them.
1375 for (int I = Call->getNumArgs() - 1; I >= 0; --I)
1376 discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
1377
1378 // Note: Type inference from a surrounding cast is not supported in
1379 // constexpr evaluation.
1380 QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
1381 if (AllocType.isNull()) {
1382 S.CCEDiag(Call,
1383 diag::note_constexpr_infer_alloc_token_type_inference_failed);
1384 return false;
1385 }
1386
1387 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
1388 if (!ATMD) {
1389 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
1390 return false;
1391 }
1392
1393 auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
1394 if (!MaybeToken) {
1395 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
1396 return false;
1397 }
1398
1399 pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
1400 return true;
1401}
1402
1404 const InterpFrame *Frame,
1405 const CallExpr *Call) {
1406 // A call to __operator_new is only valid within std::allocate<>::allocate.
1407 // Walk up the call stack to find the appropriate caller and get the
1408 // element type from it.
1409 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1410
1411 if (ElemType.isNull()) {
1412 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1413 ? diag::note_constexpr_new_untyped
1414 : diag::note_constexpr_new);
1415 return false;
1416 }
1417 assert(NewCall);
1418
1419 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1420 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1421 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1422 return false;
1423 }
1424
1425 // We only care about the first parameter (the size), so discard all the
1426 // others.
1427 {
1428 unsigned NumArgs = Call->getNumArgs();
1429 assert(NumArgs >= 1);
1430
1431 // The std::nothrow_t arg never gets put on the stack.
1432 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1433 --NumArgs;
1434 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1435 // First arg is needed.
1436 Args = Args.drop_front();
1437
1438 // Discard the rest.
1439 for (const Expr *Arg : Args)
1440 discard(S.Stk, *S.getContext().classify(Arg));
1441 }
1442
1443 APSInt Bytes = popToAPSInt(S, Call->getArg(0));
1444 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1445 assert(!ElemSize.isZero());
1446 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1447 // elements we should allocate.
1448 APInt NumElems, Remainder;
1449 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1450 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1451 if (Remainder != 0) {
1452 // This likely indicates a bug in the implementation of 'std::allocator'.
1453 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1454 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1455 return false;
1456 }
1457
1458 // NB: The same check we're using in CheckArraySize()
1459 if (NumElems.getActiveBits() >
1461 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1462 // FIXME: NoThrow check?
1463 const SourceInfo &Loc = S.Current->getSource(OpPC);
1464 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1465 << NumElems.getZExtValue();
1466 return false;
1467 }
1468
1469 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1470 return false;
1471
1472 bool IsArray = NumElems.ugt(1);
1473 OptPrimType ElemT = S.getContext().classify(ElemType);
1474 DynamicAllocator &Allocator = S.getAllocator();
1475 if (ElemT) {
1476 Block *B =
1477 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1479 assert(B);
1480 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1481 return true;
1482 }
1483
1484 assert(!ElemT);
1485
1486 // Composite arrays
1487 if (IsArray) {
1488 const Descriptor *Desc =
1489 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1490 Block *B =
1491 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1493 assert(B);
1494 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1495 return true;
1496 }
1497
1498 // Records. Still allocate them as single-element arrays.
1500 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1501
1502 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1504 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1506 assert(B);
1507 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1508 return true;
1509}
1510
1512 const InterpFrame *Frame,
1513 const CallExpr *Call) {
1514 const Expr *Source = nullptr;
1515 const Block *BlockToDelete = nullptr;
1516
1518 S.Stk.discard<Pointer>();
1519 return false;
1520 }
1521
1522 // This is permitted only within a call to std::allocator<T>::deallocate.
1523 if (!S.getStdAllocatorCaller("deallocate")) {
1524 S.FFDiag(Call);
1525 S.Stk.discard<Pointer>();
1526 return true;
1527 }
1528
1529 {
1530 const Pointer &Ptr = S.Stk.pop<Pointer>();
1531
1532 if (Ptr.isZero()) {
1533 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1534 return true;
1535 }
1536
1537 Source = Ptr.getDeclDesc()->asExpr();
1538 BlockToDelete = Ptr.block();
1539
1540 if (!BlockToDelete->isDynamic()) {
1541 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1543 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1544 S.Note(D->getLocation(), diag::note_declared_at);
1545 }
1546 }
1547 assert(BlockToDelete);
1548
1549 DynamicAllocator &Allocator = S.getAllocator();
1550 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1551 std::optional<DynamicAllocator::Form> AllocForm =
1552 Allocator.getAllocationForm(Source);
1553
1554 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1555 // Nothing has been deallocated, this must be a double-delete.
1556 const SourceInfo &Loc = S.Current->getSource(OpPC);
1557 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1558 return false;
1559 }
1560 assert(AllocForm);
1561
1562 return CheckNewDeleteForms(
1563 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1564}
1565
1567 const InterpFrame *Frame,
1568 const CallExpr *Call) {
1569 const Floating &Arg0 = S.Stk.pop<Floating>();
1570 S.Stk.push<Floating>(Arg0);
1571 return true;
1572}
1573
1575 const CallExpr *Call, unsigned ID) {
1576 const Pointer &Arg = S.Stk.pop<Pointer>();
1577 assert(Arg.getFieldDesc()->isPrimitiveArray());
1578
1579 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1580 assert(Call->getType() == ElemType);
1581 PrimType ElemT = *S.getContext().classify(ElemType);
1582 unsigned NumElems = Arg.getNumElems();
1583
1585 T Result = Arg.elem<T>(0);
1586 unsigned BitWidth = Result.bitWidth();
1587 for (unsigned I = 1; I != NumElems; ++I) {
1588 T Elem = Arg.elem<T>(I);
1589 T PrevResult = Result;
1590
1591 if (ID == Builtin::BI__builtin_reduce_add) {
1592 if (T::add(Result, Elem, BitWidth, &Result)) {
1593 unsigned OverflowBits = BitWidth + 1;
1594 (void)handleOverflow(S, OpPC,
1595 (PrevResult.toAPSInt(OverflowBits) +
1596 Elem.toAPSInt(OverflowBits)));
1597 return false;
1598 }
1599 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1600 if (T::mul(Result, Elem, BitWidth, &Result)) {
1601 unsigned OverflowBits = BitWidth * 2;
1602 (void)handleOverflow(S, OpPC,
1603 (PrevResult.toAPSInt(OverflowBits) *
1604 Elem.toAPSInt(OverflowBits)));
1605 return false;
1606 }
1607
1608 } else if (ID == Builtin::BI__builtin_reduce_and) {
1609 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1610 } else if (ID == Builtin::BI__builtin_reduce_or) {
1611 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1612 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1613 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1614 } else if (ID == Builtin::BI__builtin_reduce_min) {
1615 if (Elem < Result)
1616 Result = Elem;
1617 } else if (ID == Builtin::BI__builtin_reduce_max) {
1618 if (Elem > Result)
1619 Result = Elem;
1620 } else {
1621 llvm_unreachable("Unhandled vector reduce builtin");
1622 }
1623 }
1624 pushInteger(S, Result.toAPSInt(), Call->getType());
1625 });
1626
1627 return true;
1628}
1629
1631 const InterpFrame *Frame,
1632 const CallExpr *Call,
1633 unsigned BuiltinID) {
1634 assert(Call->getNumArgs() == 1);
1635 QualType Ty = Call->getArg(0)->getType();
1636 if (Ty->isIntegerType()) {
1637 APSInt Val = popToAPSInt(S, Call->getArg(0));
1638 pushInteger(S, Val.abs(), Call->getType());
1639 return true;
1640 }
1641
1642 if (Ty->isFloatingType()) {
1643 Floating Val = S.Stk.pop<Floating>();
1644 Floating Result = abs(S, Val);
1645 S.Stk.push<Floating>(Result);
1646 return true;
1647 }
1648
1649 // Otherwise, the argument must be a vector.
1650 assert(Call->getArg(0)->getType()->isVectorType());
1651 const Pointer &Arg = S.Stk.pop<Pointer>();
1652 assert(Arg.getFieldDesc()->isPrimitiveArray());
1653 const Pointer &Dst = S.Stk.peek<Pointer>();
1654 assert(Dst.getFieldDesc()->isPrimitiveArray());
1655 assert(Arg.getFieldDesc()->getNumElems() ==
1656 Dst.getFieldDesc()->getNumElems());
1657
1658 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1659 PrimType ElemT = *S.getContext().classify(ElemType);
1660 unsigned NumElems = Arg.getNumElems();
1661 // we can either have a vector of integer or a vector of floating point
1662 for (unsigned I = 0; I != NumElems; ++I) {
1663 if (ElemType->isIntegerType()) {
1665 Dst.elem<T>(I) = T::from(static_cast<T>(
1666 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1668 });
1669 } else {
1670 Floating Val = Arg.elem<Floating>(I);
1671 Dst.elem<Floating>(I) = abs(S, Val);
1672 }
1673 }
1675
1676 return true;
1677}
1678
1679/// Can be called with an integer or vector as the first and only parameter.
1681 CodePtr OpPC,
1682 const InterpFrame *Frame,
1683 const CallExpr *Call,
1684 unsigned BuiltinID) {
1685 bool HasZeroArg = Call->getNumArgs() == 2;
1686 bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg;
1687 assert(Call->getNumArgs() == 1 || HasZeroArg);
1688 if (Call->getArg(0)->getType()->isIntegerType()) {
1689 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1690 APSInt Val = popToAPSInt(S.Stk, ArgT);
1691 std::optional<APSInt> ZeroVal;
1692 if (HasZeroArg) {
1693 ZeroVal = Val;
1694 Val = popToAPSInt(S.Stk, ArgT);
1695 }
1696
1697 if (Val.isZero()) {
1698 if (ZeroVal) {
1699 pushInteger(S, *ZeroVal, Call->getType());
1700 return true;
1701 }
1702 // If we haven't been provided the second argument, the result is
1703 // undefined
1704 S.FFDiag(S.Current->getSource(OpPC),
1705 diag::note_constexpr_countzeroes_zero)
1706 << /*IsTrailing=*/IsCTTZ;
1707 return false;
1708 }
1709
1710 if (BuiltinID == Builtin::BI__builtin_elementwise_clzg) {
1711 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1712 } else {
1713 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1714 }
1715 return true;
1716 }
1717 // Otherwise, the argument must be a vector.
1718 const ASTContext &ASTCtx = S.getASTContext();
1719 Pointer ZeroArg;
1720 if (HasZeroArg) {
1721 assert(Call->getArg(1)->getType()->isVectorType() &&
1722 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1723 Call->getArg(1)->getType()));
1724 (void)ASTCtx;
1725 ZeroArg = S.Stk.pop<Pointer>();
1726 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1727 }
1728 assert(Call->getArg(0)->getType()->isVectorType());
1729 const Pointer &Arg = S.Stk.pop<Pointer>();
1730 assert(Arg.getFieldDesc()->isPrimitiveArray());
1731 const Pointer &Dst = S.Stk.peek<Pointer>();
1732 assert(Dst.getFieldDesc()->isPrimitiveArray());
1733 assert(Arg.getFieldDesc()->getNumElems() ==
1734 Dst.getFieldDesc()->getNumElems());
1735
1736 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1737 PrimType ElemT = *S.getContext().classify(ElemType);
1738 unsigned NumElems = Arg.getNumElems();
1739
1740 // FIXME: Reading from uninitialized vector elements?
1741 for (unsigned I = 0; I != NumElems; ++I) {
1743 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1744 if (EltVal.isZero()) {
1745 if (HasZeroArg) {
1746 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1747 } else {
1748 // If we haven't been provided the second argument, the result is
1749 // undefined
1750 S.FFDiag(S.Current->getSource(OpPC),
1751 diag::note_constexpr_countzeroes_zero)
1752 << /*IsTrailing=*/IsCTTZ;
1753 return false;
1754 }
1755 } else if (IsCTTZ) {
1756 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1757 } else {
1758 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1759 }
1760 Dst.atIndex(I).initialize();
1761 });
1762 }
1763
1764 return true;
1765}
1766
1768 const InterpFrame *Frame,
1769 const CallExpr *Call, unsigned ID) {
1770 assert(Call->getNumArgs() == 3);
1771 const ASTContext &ASTCtx = S.getASTContext();
1772 uint64_t Size = popToUInt64(S, Call->getArg(2));
1773 Pointer SrcPtr = S.Stk.pop<Pointer>().expand();
1774 Pointer DestPtr = S.Stk.pop<Pointer>().expand();
1775
1776 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1777 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1778
1779 bool Move =
1780 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1781 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1782 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1783 ID == Builtin::BI__builtin_wmemcpy ||
1784 ID == Builtin::BI__builtin_wmemmove;
1785
1786 // If the size is zero, we treat this as always being a valid no-op.
1787 if (Size == 0) {
1788 S.Stk.push<Pointer>(DestPtr);
1789 return true;
1790 }
1791
1792 if (SrcPtr.isZero() || DestPtr.isZero()) {
1793 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1794 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1795 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1796 << DiagPtr.toDiagnosticString(ASTCtx);
1797 return false;
1798 }
1799
1800 // Diagnose integral src/dest pointers specially.
1801 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1802 std::string DiagVal = "(void *)";
1803 DiagVal += SrcPtr.isIntegralPointer()
1804 ? std::to_string(SrcPtr.getIntegerRepresentation())
1805 : std::to_string(DestPtr.getIntegerRepresentation());
1806 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1807 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1808 return false;
1809 }
1810
1811 if (!isReadable(DestPtr) || !isReadable(SrcPtr))
1812 return false;
1813
1814 if (DestPtr.getType()->isIncompleteType()) {
1815 S.FFDiag(S.Current->getSource(OpPC),
1816 diag::note_constexpr_memcpy_incomplete_type)
1817 << Move << DestPtr.getType();
1818 return false;
1819 }
1820 if (SrcPtr.getType()->isIncompleteType()) {
1821 S.FFDiag(S.Current->getSource(OpPC),
1822 diag::note_constexpr_memcpy_incomplete_type)
1823 << Move << SrcPtr.getType();
1824 return false;
1825 }
1826
1827 QualType DestElemType = getElemType(DestPtr);
1828 if (DestElemType->isIncompleteType()) {
1829 S.FFDiag(S.Current->getSource(OpPC),
1830 diag::note_constexpr_memcpy_incomplete_type)
1831 << Move << DestElemType;
1832 return false;
1833 }
1834
1835 size_t RemainingDestElems;
1836 if (DestPtr.getFieldDesc()->isArray()) {
1837 RemainingDestElems = DestPtr.isUnknownSizeArray()
1838 ? 0
1839 : (DestPtr.getNumElems() - DestPtr.getIndex());
1840 } else {
1841 RemainingDestElems = 1;
1842 }
1843 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1844
1845 if (WChar) {
1846 uint64_t WCharSize =
1847 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1848 Size *= WCharSize;
1849 }
1850
1851 if (Size % DestElemSize != 0) {
1852 S.FFDiag(S.Current->getSource(OpPC),
1853 diag::note_constexpr_memcpy_unsupported)
1854 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1855 return false;
1856 }
1857
1858 QualType SrcElemType = getElemType(SrcPtr);
1859 size_t RemainingSrcElems;
1860 if (SrcPtr.getFieldDesc()->isArray()) {
1861 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1862 ? 0
1863 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1864 } else {
1865 RemainingSrcElems = 1;
1866 }
1867 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1868
1869 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1870 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1871 << Move << SrcElemType << DestElemType;
1872 return false;
1873 }
1874
1875 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
1876 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1877 << Move << DestElemType;
1878 return false;
1879 }
1880
1881 // Check if we have enough elements to read from and write to.
1882 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1883 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1884 if (Size > RemainingDestBytes || Size > RemainingSrcBytes) {
1885 APInt N = APInt(64, Size / DestElemSize);
1886 S.FFDiag(S.Current->getSource(OpPC),
1887 diag::note_constexpr_memcpy_unsupported)
1888 << Move << WChar << (Size > RemainingSrcBytes ? 1 : 2) << DestElemType
1889 << toString(N, 10, /*Signed=*/false);
1890 return false;
1891 }
1892
1893 // Check for overlapping memory regions.
1894 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1895 // Remove base casts.
1896 Pointer SrcP = SrcPtr;
1897 while (SrcP.isBaseClass())
1898 SrcP = SrcP.getBase();
1899
1900 Pointer DestP = DestPtr;
1901 while (DestP.isBaseClass())
1902 DestP = DestP.getBase();
1903
1904 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1905 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1906
1907 if ((SrcIndex <= DstIndex && (SrcIndex + Size) > DstIndex) ||
1908 (DstIndex <= SrcIndex && (DstIndex + Size) > SrcIndex)) {
1909 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1910 << /*IsWChar=*/false;
1911 return false;
1912 }
1913 }
1914
1915 assert(Size % DestElemSize == 0);
1916 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size).toBits()))
1917 return false;
1918
1919 S.Stk.push<Pointer>(DestPtr);
1920 return true;
1921}
1922
1923/// Determine if T is a character type for which we guarantee that
1924/// sizeof(T) == 1.
1926 return T->isCharType() || T->isChar8Type();
1927}
1928
1930 const InterpFrame *Frame,
1931 const CallExpr *Call, unsigned ID) {
1932 assert(Call->getNumArgs() == 3);
1933 uint64_t Size = popToUInt64(S, Call->getArg(2));
1934 const Pointer &PtrB = S.Stk.pop<Pointer>();
1935 const Pointer &PtrA = S.Stk.pop<Pointer>();
1936
1937 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1938 ID == Builtin::BIwmemcmp)
1939 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1940
1941 if (Size == 0) {
1942 pushInteger(S, 0, Call->getType());
1943 return true;
1944 }
1945
1946 if (!PtrA.isBlockPointer() || !PtrB.isBlockPointer())
1947 return false;
1948
1949 bool IsWide =
1950 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1951
1952 const ASTContext &ASTCtx = S.getASTContext();
1953 QualType ElemTypeA = getElemType(PtrA);
1954 QualType ElemTypeB = getElemType(PtrB);
1955 // FIXME: This is an arbitrary limitation the current constant interpreter
1956 // had. We could remove this.
1957 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
1958 !isOneByteCharacterType(ElemTypeB))) {
1959 S.FFDiag(S.Current->getSource(OpPC),
1960 diag::note_constexpr_memcmp_unsupported)
1961 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1962 << PtrB.getType();
1963 return false;
1964 }
1965
1966 if (PtrA.isDummy() || PtrB.isDummy())
1967 return false;
1968
1969 if (!CheckRange(S, OpPC, PtrA, AK_Read) ||
1970 !CheckRange(S, OpPC, PtrB, AK_Read))
1971 return false;
1972
1973 // Now, read both pointers to a buffer and compare those.
1974 BitcastBuffer BufferA(
1975 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
1976 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1977 // FIXME: The swapping here is UNDOING something we do when reading the
1978 // data into the buffer.
1979 if (ASTCtx.getTargetInfo().isBigEndian())
1980 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1981
1982 BitcastBuffer BufferB(
1983 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
1984 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
1985 // FIXME: The swapping here is UNDOING something we do when reading the
1986 // data into the buffer.
1987 if (ASTCtx.getTargetInfo().isBigEndian())
1988 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1989
1990 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
1991 BufferB.byteSize().getQuantity());
1992
1993 unsigned ElemSize = 1;
1994 if (IsWide)
1995 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1996 // The Size given for the wide variants is in wide-char units. Convert it
1997 // to bytes.
1998 size_t ByteSize = Size * ElemSize;
1999 size_t CmpSize = std::min(MinBufferSize, ByteSize);
2000
2001 for (size_t I = 0; I != CmpSize; I += ElemSize) {
2002 if (IsWide) {
2004 T A = *reinterpret_cast<T *>(BufferA.atByte(I));
2005 T B = *reinterpret_cast<T *>(BufferB.atByte(I));
2006 if (A < B) {
2007 pushInteger(S, -1, Call->getType());
2008 return true;
2009 }
2010 if (A > B) {
2011 pushInteger(S, 1, Call->getType());
2012 return true;
2013 }
2014 });
2015 } else {
2016 std::byte A = BufferA.deref<std::byte>(Bytes(I));
2017 std::byte B = BufferB.deref<std::byte>(Bytes(I));
2018
2019 if (A < B) {
2020 pushInteger(S, -1, Call->getType());
2021 return true;
2022 }
2023 if (A > B) {
2024 pushInteger(S, 1, Call->getType());
2025 return true;
2026 }
2027 }
2028 }
2029
2030 // We compared CmpSize bytes above. If the limiting factor was the Size
2031 // passed, we're done and the result is equality (0).
2032 if (ByteSize <= CmpSize) {
2033 pushInteger(S, 0, Call->getType());
2034 return true;
2035 }
2036
2037 // However, if we read all the available bytes but were instructed to read
2038 // even more, diagnose this as a "read of dereferenced one-past-the-end
2039 // pointer". This is what would happen if we called CheckLoad() on every array
2040 // element.
2041 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2042 << AK_Read << S.Current->getRange(OpPC);
2043 return false;
2044}
2045
2046// __builtin_memchr(ptr, int, int)
2047// __builtin_strchr(ptr, int)
2049 const CallExpr *Call, unsigned ID) {
2050 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2051 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2052 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2053
2054 std::optional<APSInt> MaxLength;
2055 if (Call->getNumArgs() == 3)
2056 MaxLength = popToAPSInt(S, Call->getArg(2));
2057
2058 APSInt Desired = popToAPSInt(S, Call->getArg(1));
2059 const Pointer &Ptr = S.Stk.pop<Pointer>();
2060
2061 if (MaxLength && MaxLength->isZero()) {
2062 S.Stk.push<Pointer>();
2063 return true;
2064 }
2065
2066 if (Ptr.isDummy()) {
2067 if (Ptr.getType()->isIncompleteType())
2068 S.FFDiag(S.Current->getSource(OpPC),
2069 diag::note_constexpr_ltor_incomplete_type)
2070 << Ptr.getType();
2071 return false;
2072 }
2073
2074 // Null is only okay if the given size is 0.
2075 if (Ptr.isZero()) {
2076 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2077 << AK_Read;
2078 return false;
2079 }
2080
2081 if (!Ptr.isBlockPointer())
2082 return false;
2083
2084 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2085 ? Ptr.getFieldDesc()->getElemQualType()
2086 : Ptr.getFieldDesc()->getType();
2087 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2088
2089 // Give up on byte-oriented matching against multibyte elements.
2090 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2091 S.FFDiag(S.Current->getSource(OpPC),
2092 diag::note_constexpr_memchr_unsupported)
2093 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2094 return false;
2095 }
2096
2097 if (!isReadable(Ptr))
2098 return false;
2099
2100 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2101 int64_t DesiredTrunc;
2102 if (S.getASTContext().CharTy->isSignedIntegerType())
2103 DesiredTrunc =
2104 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
2105 else
2106 DesiredTrunc =
2107 Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2108 // strchr compares directly to the passed integer, and therefore
2109 // always fails if given an int that is not a char.
2110 if (Desired != DesiredTrunc) {
2111 S.Stk.push<Pointer>();
2112 return true;
2113 }
2114 }
2115
2116 uint64_t DesiredVal;
2117 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2118 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2119 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2120 DesiredVal = Desired.getZExtValue();
2121 } else {
2122 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2123 }
2124
2125 bool StopAtZero =
2126 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2127 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2128
2129 PrimType ElemT =
2130 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2131
2132 size_t Index = Ptr.getIndex();
2133 size_t Step = 0;
2134 for (;;) {
2135 const Pointer &ElemPtr =
2136 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2137
2138 if (!CheckLoad(S, OpPC, ElemPtr))
2139 return false;
2140
2141 uint64_t V;
2143 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2144
2145 if (V == DesiredVal) {
2146 S.Stk.push<Pointer>(ElemPtr);
2147 return true;
2148 }
2149
2150 if (StopAtZero && V == 0)
2151 break;
2152
2153 ++Step;
2154 if (MaxLength && Step == MaxLength->getZExtValue())
2155 break;
2156 }
2157
2158 S.Stk.push<Pointer>();
2159 return true;
2160}
2161
2162static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2163 const Descriptor *Desc) {
2164 if (Desc->isPrimitive())
2165 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2166 if (Desc->isArray())
2167 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2168 Desc->getNumElems();
2169 if (Desc->isRecord()) {
2170 // Can't use Descriptor::getType() as that may return a pointer type. Look
2171 // at the decl directly.
2172 return ASTCtx
2174 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2175 .getQuantity();
2176 }
2177
2178 return std::nullopt;
2179}
2180
2181/// Compute the byte offset of \p Ptr in the full declaration.
2182static unsigned computePointerOffset(const ASTContext &ASTCtx,
2183 const Pointer &Ptr) {
2184 unsigned Result = 0;
2185
2186 Pointer P = Ptr;
2187 while (P.isField() || P.isArrayElement()) {
2188 P = P.expand();
2189 const Descriptor *D = P.getFieldDesc();
2190
2191 if (P.isArrayElement()) {
2192 unsigned ElemSize =
2194 if (P.isOnePastEnd())
2195 Result += ElemSize * P.getNumElems();
2196 else
2197 Result += ElemSize * P.getIndex();
2198 P = P.expand().getArray();
2199 } else if (P.isBaseClass()) {
2200 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2201 bool IsVirtual = Ptr.isVirtualBaseClass();
2202 P = P.getBase();
2203 const Record *BaseRecord = P.getRecord();
2204
2205 const ASTRecordLayout &Layout =
2206 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2207 if (IsVirtual)
2208 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2209 else
2210 Result += Layout.getBaseClassOffset(RD).getQuantity();
2211 } else if (P.isField()) {
2212 const FieldDecl *FD = P.getField();
2213 const ASTRecordLayout &Layout =
2214 ASTCtx.getASTRecordLayout(FD->getParent());
2215 unsigned FieldIndex = FD->getFieldIndex();
2216 uint64_t FieldOffset =
2217 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2218 .getQuantity();
2219 Result += FieldOffset;
2220 P = P.getBase();
2221 } else
2222 llvm_unreachable("Unhandled descriptor type");
2223 }
2224
2225 return Result;
2226}
2227
2228/// Does Ptr point to the last subobject?
2229static bool pointsToLastObject(const Pointer &Ptr) {
2230 Pointer P = Ptr;
2231 while (!P.isRoot()) {
2232
2233 if (P.isArrayElement()) {
2234 P = P.expand().getArray();
2235 continue;
2236 }
2237 if (P.isBaseClass()) {
2238 if (P.getRecord()->getNumFields() > 0)
2239 return false;
2240 P = P.getBase();
2241 continue;
2242 }
2243
2244 Pointer Base = P.getBase();
2245 if (const Record *R = Base.getRecord()) {
2246 assert(P.getField());
2247 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2248 return false;
2249 }
2250 P = Base;
2251 }
2252
2253 return true;
2254}
2255
2256/// Does Ptr point to the last object AND to a flexible array member?
2257static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2258 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2260 FAMKind StrictFlexArraysLevel =
2261 Ctx.getLangOpts().getStrictFlexArraysLevel();
2262
2263 if (StrictFlexArraysLevel == FAMKind::Default)
2264 return true;
2265
2266 unsigned NumElems = FieldDesc->getNumElems();
2267 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2268 return true;
2269
2270 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2271 return true;
2272 return false;
2273 };
2274
2275 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2276 if (!FieldDesc->isArray())
2277 return false;
2278
2279 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2280 isFlexibleArrayMember(FieldDesc);
2281}
2282
2284 const InterpFrame *Frame,
2285 const CallExpr *Call) {
2286 const ASTContext &ASTCtx = S.getASTContext();
2287 // From the GCC docs:
2288 // Kind is an integer constant from 0 to 3. If the least significant bit is
2289 // clear, objects are whole variables. If it is set, a closest surrounding
2290 // subobject is considered the object a pointer points to. The second bit
2291 // determines if maximum or minimum of remaining bytes is computed.
2292 unsigned Kind = popToUInt64(S, Call->getArg(1));
2293 assert(Kind <= 3 && "unexpected kind");
2294 bool UseFieldDesc = (Kind & 1u);
2295 bool ReportMinimum = (Kind & 2u);
2296 const Pointer &Ptr = S.Stk.pop<Pointer>();
2297
2298 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2299 // "If there are any side effects in them, it returns (size_t) -1
2300 // for type 0 or 1 and (size_t) 0 for type 2 or 3."
2301 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2302 return true;
2303 }
2304
2305 if (Ptr.isZero() || !Ptr.isBlockPointer())
2306 return false;
2307
2308 // We can't load through pointers.
2309 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2310 return false;
2311
2312 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2313 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2314 assert(DeclDesc);
2315
2316 if (!UseFieldDesc || DetermineForCompleteObject) {
2317 // Lower bound, so we can't fall back to this.
2318 if (ReportMinimum && !DetermineForCompleteObject)
2319 return false;
2320
2321 // Can't read beyond the pointer decl desc.
2322 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2323 return false;
2324 } else {
2325 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2326 // If we cannot determine the size of the initial allocation, then we
2327 // can't given an accurate upper-bound. However, we are still able to give
2328 // conservative lower-bounds for Type=3.
2329 if (Kind == 1)
2330 return false;
2331 }
2332 }
2333
2334 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2335 assert(Desc);
2336
2337 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2338 if (!FullSize)
2339 return false;
2340
2341 unsigned ByteOffset;
2342 if (UseFieldDesc) {
2343 if (Ptr.isBaseClass())
2344 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2345 computePointerOffset(ASTCtx, Ptr);
2346 else {
2347 if (Ptr.inArray())
2348 ByteOffset =
2349 computePointerOffset(ASTCtx, Ptr) -
2350 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2351 else
2352 ByteOffset = 0;
2353 }
2354 } else
2355 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2356
2357 assert(ByteOffset <= *FullSize);
2358 unsigned Result = *FullSize - ByteOffset;
2359
2360 pushInteger(S, Result, Call->getType());
2361 return true;
2362}
2363
2365 const CallExpr *Call) {
2366
2367 if (!S.inConstantContext())
2368 return false;
2369
2370 const Pointer &Ptr = S.Stk.pop<Pointer>();
2371
2372 auto Error = [&](int Diag) {
2373 bool CalledFromStd = false;
2374 const auto *Callee = S.Current->getCallee();
2375 if (Callee && Callee->isInStdNamespace()) {
2376 const IdentifierInfo *Identifier = Callee->getIdentifier();
2377 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2378 }
2379 S.CCEDiag(CalledFromStd
2381 : S.Current->getSource(OpPC),
2382 diag::err_invalid_is_within_lifetime)
2383 << (CalledFromStd ? "std::is_within_lifetime"
2384 : "__builtin_is_within_lifetime")
2385 << Diag;
2386 return false;
2387 };
2388
2389 if (Ptr.isZero())
2390 return Error(0);
2391 if (Ptr.isOnePastEnd())
2392 return Error(1);
2393
2394 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2395 if (!Ptr.isActive()) {
2396 Result = false;
2397 } else {
2398 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2399 return false;
2400 if (!CheckMutable(S, OpPC, Ptr))
2401 return false;
2402 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2403 return false;
2404 }
2405
2406 // Check if we're currently running an initializer.
2407 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2408 return Error(2);
2409 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2410 return Error(2);
2411
2412 pushInteger(S, Result, Call->getType());
2413 return true;
2414}
2415
2417 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2418 llvm::function_ref<APInt(const APSInt &)> Fn) {
2419 assert(Call->getNumArgs() == 1);
2420
2421 // Single integer case.
2422 if (!Call->getArg(0)->getType()->isVectorType()) {
2423 assert(Call->getType()->isIntegerType());
2424 APSInt Src = popToAPSInt(S, Call->getArg(0));
2425 APInt Result = Fn(Src);
2426 pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
2427 return true;
2428 }
2429
2430 // Vector case.
2431 const Pointer &Arg = S.Stk.pop<Pointer>();
2432 assert(Arg.getFieldDesc()->isPrimitiveArray());
2433 const Pointer &Dst = S.Stk.peek<Pointer>();
2434 assert(Dst.getFieldDesc()->isPrimitiveArray());
2435 assert(Arg.getFieldDesc()->getNumElems() ==
2436 Dst.getFieldDesc()->getNumElems());
2437
2438 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
2439 PrimType ElemT = *S.getContext().classify(ElemType);
2440 unsigned NumElems = Arg.getNumElems();
2441 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2442
2443 for (unsigned I = 0; I != NumElems; ++I) {
2445 APSInt Src = Arg.elem<T>(I).toAPSInt();
2446 APInt Result = Fn(Src);
2447 Dst.elem<T>(I) = static_cast<T>(APSInt(std::move(Result), DestUnsigned));
2448 });
2449 }
2451
2452 return true;
2453}
2454
2456 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2457 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2458 assert(Call->getNumArgs() == 2);
2459
2460 // Single integer case.
2461 if (!Call->getArg(0)->getType()->isVectorType()) {
2462 assert(!Call->getArg(1)->getType()->isVectorType());
2463 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2464 APSInt LHS = popToAPSInt(S, Call->getArg(0));
2465 APInt Result = Fn(LHS, RHS);
2466 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2467 return true;
2468 }
2469
2470 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2471 assert(VT->getElementType()->isIntegralOrEnumerationType());
2472 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2473 unsigned NumElems = VT->getNumElements();
2474 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2475
2476 // Vector + Scalar case.
2477 if (!Call->getArg(1)->getType()->isVectorType()) {
2478 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2479
2480 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2481 const Pointer &LHS = S.Stk.pop<Pointer>();
2482 const Pointer &Dst = S.Stk.peek<Pointer>();
2483
2484 for (unsigned I = 0; I != NumElems; ++I) {
2486 Dst.elem<T>(I) = static_cast<T>(
2487 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2488 });
2489 }
2491 return true;
2492 }
2493
2494 // Vector case.
2495 assert(Call->getArg(0)->getType()->isVectorType() &&
2496 Call->getArg(1)->getType()->isVectorType());
2497 assert(VT->getElementType() ==
2498 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2499 assert(VT->getNumElements() ==
2500 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2501 assert(VT->getElementType()->isIntegralOrEnumerationType());
2502
2503 const Pointer &RHS = S.Stk.pop<Pointer>();
2504 const Pointer &LHS = S.Stk.pop<Pointer>();
2505 const Pointer &Dst = S.Stk.peek<Pointer>();
2506 for (unsigned I = 0; I != NumElems; ++I) {
2508 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2509 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2510 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2511 });
2512 }
2514
2515 return true;
2516}
2517
2518static bool
2520 llvm::function_ref<APInt(const APSInt &)> PackFn) {
2521 const auto *VT0 = E->getArg(0)->getType()->castAs<VectorType>();
2522 [[maybe_unused]] const auto *VT1 =
2523 E->getArg(1)->getType()->castAs<VectorType>();
2524 assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType");
2525 assert(VT0->getElementType() == VT1->getElementType() &&
2526 VT0->getNumElements() == VT1->getNumElements() &&
2527 "pack builtin VT0 and VT1 ElementType must be same");
2528
2529 const Pointer &RHS = S.Stk.pop<Pointer>();
2530 const Pointer &LHS = S.Stk.pop<Pointer>();
2531 const Pointer &Dst = S.Stk.peek<Pointer>();
2532
2533 const ASTContext &ASTCtx = S.getASTContext();
2534 unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType());
2535 unsigned LHSVecLen = VT0->getNumElements();
2536 unsigned SrcPerLane = 128 / SrcBits;
2537 unsigned Lanes = LHSVecLen * SrcBits / 128;
2538
2539 PrimType SrcT = *S.getContext().classify(VT0->getElementType());
2540 PrimType DstT = *S.getContext().classify(getElemType(Dst));
2541 bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType();
2542
2543 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
2544 unsigned BaseSrc = Lane * SrcPerLane;
2545 unsigned BaseDst = Lane * (2 * SrcPerLane);
2546
2547 for (unsigned I = 0; I != SrcPerLane; ++I) {
2549 APSInt A = LHS.elem<T>(BaseSrc + I).toAPSInt();
2550 APSInt B = RHS.elem<T>(BaseSrc + I).toAPSInt();
2551
2552 assignInteger(S, Dst.atIndex(BaseDst + I), DstT,
2553 APSInt(PackFn(A), IsUnsigend));
2554 assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT,
2555 APSInt(PackFn(B), IsUnsigend));
2556 });
2557 }
2558 }
2559
2560 Dst.initializeAllElements();
2561 return true;
2562}
2563
2565 const CallExpr *Call,
2566 unsigned BuiltinID) {
2567 assert(Call->getNumArgs() == 2);
2568
2569 QualType Arg0Type = Call->getArg(0)->getType();
2570
2571 // TODO: Support floating-point types.
2572 if (!(Arg0Type->isIntegerType() ||
2573 (Arg0Type->isVectorType() &&
2574 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2575 return false;
2576
2577 if (!Arg0Type->isVectorType()) {
2578 assert(!Call->getArg(1)->getType()->isVectorType());
2579 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2580 APSInt LHS = popToAPSInt(S, Arg0Type);
2581 APInt Result;
2582 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2583 Result = std::max(LHS, RHS);
2584 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2585 Result = std::min(LHS, RHS);
2586 } else {
2587 llvm_unreachable("Wrong builtin ID");
2588 }
2589
2590 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2591 return true;
2592 }
2593
2594 // Vector case.
2595 assert(Call->getArg(0)->getType()->isVectorType() &&
2596 Call->getArg(1)->getType()->isVectorType());
2597 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2598 assert(VT->getElementType() ==
2599 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2600 assert(VT->getNumElements() ==
2601 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2602 assert(VT->getElementType()->isIntegralOrEnumerationType());
2603
2604 const Pointer &RHS = S.Stk.pop<Pointer>();
2605 const Pointer &LHS = S.Stk.pop<Pointer>();
2606 const Pointer &Dst = S.Stk.peek<Pointer>();
2607 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2608 unsigned NumElems = VT->getNumElements();
2609 for (unsigned I = 0; I != NumElems; ++I) {
2610 APSInt Elem1;
2611 APSInt Elem2;
2613 Elem1 = LHS.elem<T>(I).toAPSInt();
2614 Elem2 = RHS.elem<T>(I).toAPSInt();
2615 });
2616
2617 APSInt Result;
2618 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2619 Result = APSInt(std::max(Elem1, Elem2),
2620 Call->getType()->isUnsignedIntegerOrEnumerationType());
2621 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2622 Result = APSInt(std::min(Elem1, Elem2),
2623 Call->getType()->isUnsignedIntegerOrEnumerationType());
2624 } else {
2625 llvm_unreachable("Wrong builtin ID");
2626 }
2627
2629 { Dst.elem<T>(I) = static_cast<T>(Result); });
2630 }
2631 Dst.initializeAllElements();
2632
2633 return true;
2634}
2635
2637 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2638 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &,
2639 const APSInt &)>
2640 Fn) {
2641 assert(Call->getArg(0)->getType()->isVectorType() &&
2642 Call->getArg(1)->getType()->isVectorType());
2643 const Pointer &RHS = S.Stk.pop<Pointer>();
2644 const Pointer &LHS = S.Stk.pop<Pointer>();
2645 const Pointer &Dst = S.Stk.peek<Pointer>();
2646
2647 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2648 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2649 unsigned NumElems = VT->getNumElements();
2650 const auto *DestVT = Call->getType()->castAs<VectorType>();
2651 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2652 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2653
2654 unsigned DstElem = 0;
2655 for (unsigned I = 0; I != NumElems; I += 2) {
2656 APSInt Result;
2658 APSInt LoLHS = LHS.elem<T>(I).toAPSInt();
2659 APSInt HiLHS = LHS.elem<T>(I + 1).toAPSInt();
2660 APSInt LoRHS = RHS.elem<T>(I).toAPSInt();
2661 APSInt HiRHS = RHS.elem<T>(I + 1).toAPSInt();
2662 Result = APSInt(Fn(LoLHS, HiLHS, LoRHS, HiRHS), DestUnsigned);
2663 });
2664
2665 INT_TYPE_SWITCH_NO_BOOL(DestElemT,
2666 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2667 ++DstElem;
2668 }
2669
2670 Dst.initializeAllElements();
2671 return true;
2672}
2673
2675 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2676 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2677 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2678 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2679 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2680
2681 const Pointer &RHS = S.Stk.pop<Pointer>();
2682 const Pointer &LHS = S.Stk.pop<Pointer>();
2683 const Pointer &Dst = S.Stk.peek<Pointer>();
2684 unsigned NumElts = VT->getNumElements();
2685 unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType());
2686 unsigned EltsPerLane = 128 / EltBits;
2687 unsigned Lanes = NumElts * EltBits / 128;
2688 unsigned DestIndex = 0;
2689
2690 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2691 unsigned LaneStart = Lane * EltsPerLane;
2692 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2694 APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt();
2695 APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt();
2696 APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2697 Dst.elem<T>(DestIndex++) = static_cast<T>(ResL);
2698 });
2699 }
2700
2701 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2703 APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt();
2704 APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt();
2705 APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2706 Dst.elem<T>(DestIndex++) = static_cast<T>(ResR);
2707 });
2708 }
2709 }
2710 Dst.initializeAllElements();
2711 return true;
2712}
2713
2715 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2716 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2717 llvm::RoundingMode)>
2718 Fn) {
2719 const Pointer &RHS = S.Stk.pop<Pointer>();
2720 const Pointer &LHS = S.Stk.pop<Pointer>();
2721 const Pointer &Dst = S.Stk.peek<Pointer>();
2722 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2723 llvm::RoundingMode RM = getRoundingMode(FPO);
2724 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2725
2726 unsigned NumElts = VT->getNumElements();
2727 unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType());
2728 unsigned NumLanes = NumElts * EltBits / 128;
2729 unsigned NumElemsPerLane = NumElts / NumLanes;
2730 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
2731
2732 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
2733 using T = PrimConv<PT_Float>::T;
2734 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2735 APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2736 APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2737 Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM));
2738 }
2739 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2740 APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2741 APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2742 Dst.elem<T>(L + E + HalfElemsPerLane) =
2743 static_cast<T>(Fn(Elem1, Elem2, RM));
2744 }
2745 }
2746 Dst.initializeAllElements();
2747 return true;
2748}
2749
2751 const CallExpr *Call) {
2752 // Addsub: alternates between subtraction and addition
2753 // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
2754 const Pointer &RHS = S.Stk.pop<Pointer>();
2755 const Pointer &LHS = S.Stk.pop<Pointer>();
2756 const Pointer &Dst = S.Stk.peek<Pointer>();
2757 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2758 llvm::RoundingMode RM = getRoundingMode(FPO);
2759 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2760 unsigned NumElems = VT->getNumElements();
2761
2762 using T = PrimConv<PT_Float>::T;
2763 for (unsigned I = 0; I != NumElems; ++I) {
2764 APFloat LElem = LHS.elem<T>(I).getAPFloat();
2765 APFloat RElem = RHS.elem<T>(I).getAPFloat();
2766 if (I % 2 == 0) {
2767 // Even indices: subtract
2768 LElem.subtract(RElem, RM);
2769 } else {
2770 // Odd indices: add
2771 LElem.add(RElem, RM);
2772 }
2773 Dst.elem<T>(I) = static_cast<T>(LElem);
2774 }
2775 Dst.initializeAllElements();
2776 return true;
2777}
2778
2780 const CallExpr *Call) {
2781 // PCLMULQDQ: carry-less multiplication of selected 64-bit halves
2782 // imm8 bit 0: selects lower (0) or upper (1) 64 bits of first operand
2783 // imm8 bit 4: selects lower (0) or upper (1) 64 bits of second operand
2784 assert(Call->getArg(0)->getType()->isVectorType() &&
2785 Call->getArg(1)->getType()->isVectorType());
2786
2787 // Extract imm8 argument
2788 APSInt Imm8 = popToAPSInt(S, Call->getArg(2));
2789 bool SelectUpperA = (Imm8 & 0x01) != 0;
2790 bool SelectUpperB = (Imm8 & 0x10) != 0;
2791
2792 const Pointer &RHS = S.Stk.pop<Pointer>();
2793 const Pointer &LHS = S.Stk.pop<Pointer>();
2794 const Pointer &Dst = S.Stk.peek<Pointer>();
2795
2796 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2797 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2798 unsigned NumElems = VT->getNumElements();
2799 const auto *DestVT = Call->getType()->castAs<VectorType>();
2800 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2801 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2802
2803 // Process each 128-bit lane (2 elements at a time)
2804 for (unsigned Lane = 0; Lane < NumElems; Lane += 2) {
2805 APSInt A0, A1, B0, B1;
2807 A0 = LHS.elem<T>(Lane + 0).toAPSInt();
2808 A1 = LHS.elem<T>(Lane + 1).toAPSInt();
2809 B0 = RHS.elem<T>(Lane + 0).toAPSInt();
2810 B1 = RHS.elem<T>(Lane + 1).toAPSInt();
2811 });
2812
2813 // Select the appropriate 64-bit values based on imm8
2814 APInt A = SelectUpperA ? A1 : A0;
2815 APInt B = SelectUpperB ? B1 : B0;
2816
2817 // Extend both operands to 128 bits for carry-less multiplication
2818 APInt A128 = A.zext(128);
2819 APInt B128 = B.zext(128);
2820
2821 // Use APIntOps::clmul for carry-less multiplication
2822 APInt Result = llvm::APIntOps::clmul(A128, B128);
2823
2824 // Split the 128-bit result into two 64-bit halves
2825 APSInt ResultLow(Result.extractBits(64, 0), DestUnsigned);
2826 APSInt ResultHigh(Result.extractBits(64, 64), DestUnsigned);
2827
2828 INT_TYPE_SWITCH_NO_BOOL(DestElemT, {
2829 Dst.elem<T>(Lane + 0) = static_cast<T>(ResultLow);
2830 Dst.elem<T>(Lane + 1) = static_cast<T>(ResultHigh);
2831 });
2832 }
2833
2834 Dst.initializeAllElements();
2835 return true;
2836}
2837
2839 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2840 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2841 const APFloat &, llvm::RoundingMode)>
2842 Fn) {
2843 assert(Call->getNumArgs() == 3);
2844
2845 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2846 llvm::RoundingMode RM = getRoundingMode(FPO);
2847 QualType Arg1Type = Call->getArg(0)->getType();
2848 QualType Arg2Type = Call->getArg(1)->getType();
2849 QualType Arg3Type = Call->getArg(2)->getType();
2850
2851 // Non-vector floating point types.
2852 if (!Arg1Type->isVectorType()) {
2853 assert(!Arg2Type->isVectorType());
2854 assert(!Arg3Type->isVectorType());
2855 (void)Arg2Type;
2856 (void)Arg3Type;
2857
2858 const Floating &Z = S.Stk.pop<Floating>();
2859 const Floating &Y = S.Stk.pop<Floating>();
2860 const Floating &X = S.Stk.pop<Floating>();
2861 APFloat F = Fn(X.getAPFloat(), Y.getAPFloat(), Z.getAPFloat(), RM);
2862 Floating Result = S.allocFloat(X.getSemantics());
2863 Result.copy(F);
2864 S.Stk.push<Floating>(Result);
2865 return true;
2866 }
2867
2868 // Vector type.
2869 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2870 Arg3Type->isVectorType());
2871
2872 const VectorType *VecTy = Arg1Type->castAs<VectorType>();
2873 QualType ElemQT = VecTy->getElementType();
2874 unsigned NumElems = VecTy->getNumElements();
2875
2876 assert(ElemQT == Arg2Type->castAs<VectorType>()->getElementType() &&
2877 ElemQT == Arg3Type->castAs<VectorType>()->getElementType());
2878 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2879 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2880 assert(ElemQT->isRealFloatingType());
2881 (void)ElemQT;
2882
2883 const Pointer &VZ = S.Stk.pop<Pointer>();
2884 const Pointer &VY = S.Stk.pop<Pointer>();
2885 const Pointer &VX = S.Stk.pop<Pointer>();
2886 const Pointer &Dst = S.Stk.peek<Pointer>();
2887 for (unsigned I = 0; I != NumElems; ++I) {
2888 using T = PrimConv<PT_Float>::T;
2889 APFloat X = VX.elem<T>(I).getAPFloat();
2890 APFloat Y = VY.elem<T>(I).getAPFloat();
2891 APFloat Z = VZ.elem<T>(I).getAPFloat();
2892 APFloat F = Fn(X, Y, Z, RM);
2893 Dst.elem<Floating>(I) = Floating(F);
2894 }
2896 return true;
2897}
2898
2899/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2901 const CallExpr *Call) {
2902 const Pointer &RHS = S.Stk.pop<Pointer>();
2903 const Pointer &LHS = S.Stk.pop<Pointer>();
2904 APSInt Mask = popToAPSInt(S, Call->getArg(0));
2905 const Pointer &Dst = S.Stk.peek<Pointer>();
2906
2907 assert(LHS.getNumElems() == RHS.getNumElems());
2908 assert(LHS.getNumElems() == Dst.getNumElems());
2909 unsigned NumElems = LHS.getNumElems();
2910 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2911 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2912
2913 for (unsigned I = 0; I != NumElems; ++I) {
2914 if (ElemT == PT_Float) {
2915 assert(DstElemT == PT_Float);
2916 Dst.elem<Floating>(I) =
2917 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2918 } else {
2919 APSInt Elem;
2920 INT_TYPE_SWITCH(ElemT, {
2921 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2922 });
2923 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2924 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2925 }
2926 }
2928
2929 return true;
2930}
2931
2932/// Scalar variant of AVX512 predicated select:
2933/// Result[i] = (Mask bit 0) ? LHS[i] : RHS[i], but only element 0 may change.
2934/// All other elements are taken from RHS.
2936 const CallExpr *Call) {
2937 unsigned N =
2938 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements();
2939
2940 const Pointer &W = S.Stk.pop<Pointer>();
2941 const Pointer &A = S.Stk.pop<Pointer>();
2942 APSInt U = popToAPSInt(S, Call->getArg(0));
2943 const Pointer &Dst = S.Stk.peek<Pointer>();
2944
2945 bool TakeA0 = U.getZExtValue() & 1ULL;
2946
2947 for (unsigned I = TakeA0; I != N; ++I)
2948 Dst.elem<Floating>(I) = W.elem<Floating>(I);
2949 if (TakeA0)
2950 Dst.elem<Floating>(0) = A.elem<Floating>(0);
2951
2953 return true;
2954}
2955
2957 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2958 llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) {
2959 const Pointer &RHS = S.Stk.pop<Pointer>();
2960 const Pointer &LHS = S.Stk.pop<Pointer>();
2961
2962 assert(LHS.getNumElems() == RHS.getNumElems());
2963
2964 unsigned SourceLen = LHS.getNumElems();
2965 QualType ElemQT = getElemType(LHS);
2966 OptPrimType ElemPT = S.getContext().classify(ElemQT);
2967 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
2968
2969 APInt AWide(LaneWidth * SourceLen, 0);
2970 APInt BWide(LaneWidth * SourceLen, 0);
2971
2972 for (unsigned I = 0; I != SourceLen; ++I) {
2973 APInt ALane;
2974 APInt BLane;
2975
2976 if (ElemQT->isIntegerType()) { // Get value.
2977 INT_TYPE_SWITCH_NO_BOOL(*ElemPT, {
2978 ALane = LHS.elem<T>(I).toAPSInt();
2979 BLane = RHS.elem<T>(I).toAPSInt();
2980 });
2981 } else if (ElemQT->isFloatingType()) { // Get only sign bit.
2982 using T = PrimConv<PT_Float>::T;
2983 ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2984 BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2985 } else { // Must be integer or floating type.
2986 return false;
2987 }
2988 AWide.insertBits(ALane, I * LaneWidth);
2989 BWide.insertBits(BLane, I * LaneWidth);
2990 }
2991 pushInteger(S, Fn(AWide, BWide), Call->getType());
2992 return true;
2993}
2994
2996 const CallExpr *Call) {
2997 assert(Call->getNumArgs() == 1);
2998
2999 const Pointer &Source = S.Stk.pop<Pointer>();
3000
3001 unsigned SourceLen = Source.getNumElems();
3002 QualType ElemQT = getElemType(Source);
3003 OptPrimType ElemT = S.getContext().classify(ElemQT);
3004 unsigned ResultLen =
3005 S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
3006 APInt Result(ResultLen, 0);
3007
3008 for (unsigned I = 0; I != SourceLen; ++I) {
3009 APInt Elem;
3010 if (ElemQT->isIntegerType()) {
3011 INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
3012 } else if (ElemQT->isRealFloatingType()) {
3013 using T = PrimConv<PT_Float>::T;
3014 Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
3015 } else {
3016 return false;
3017 }
3018 Result.setBitVal(I, Elem.isNegative());
3019 }
3020 pushInteger(S, Result, Call->getType());
3021 return true;
3022}
3023
3025 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3026 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
3027 Fn) {
3028 assert(Call->getNumArgs() == 3);
3029
3030 QualType Arg0Type = Call->getArg(0)->getType();
3031 QualType Arg2Type = Call->getArg(2)->getType();
3032 // Non-vector integer types.
3033 if (!Arg0Type->isVectorType()) {
3034 const APSInt &Op2 = popToAPSInt(S, Arg2Type);
3035 const APSInt &Op1 = popToAPSInt(S, Call->getArg(1));
3036 const APSInt &Op0 = popToAPSInt(S, Arg0Type);
3037 APSInt Result = APSInt(Fn(Op0, Op1, Op2), Op0.isUnsigned());
3038 pushInteger(S, Result, Call->getType());
3039 return true;
3040 }
3041
3042 const auto *VecT = Arg0Type->castAs<VectorType>();
3043 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3044 unsigned NumElems = VecT->getNumElements();
3045 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3046
3047 // Vector + Vector + Scalar case.
3048 if (!Arg2Type->isVectorType()) {
3049 APSInt Op2 = popToAPSInt(S, Arg2Type);
3050
3051 const Pointer &Op1 = S.Stk.pop<Pointer>();
3052 const Pointer &Op0 = S.Stk.pop<Pointer>();
3053 const Pointer &Dst = S.Stk.peek<Pointer>();
3054 for (unsigned I = 0; I != NumElems; ++I) {
3056 Dst.elem<T>(I) = static_cast<T>(APSInt(
3057 Fn(Op0.elem<T>(I).toAPSInt(), Op1.elem<T>(I).toAPSInt(), Op2),
3058 DestUnsigned));
3059 });
3060 }
3062
3063 return true;
3064 }
3065
3066 // Vector type.
3067 const Pointer &Op2 = S.Stk.pop<Pointer>();
3068 const Pointer &Op1 = S.Stk.pop<Pointer>();
3069 const Pointer &Op0 = S.Stk.pop<Pointer>();
3070 const Pointer &Dst = S.Stk.peek<Pointer>();
3071 for (unsigned I = 0; I != NumElems; ++I) {
3072 APSInt Val0, Val1, Val2;
3074 Val0 = Op0.elem<T>(I).toAPSInt();
3075 Val1 = Op1.elem<T>(I).toAPSInt();
3076 Val2 = Op2.elem<T>(I).toAPSInt();
3077 });
3078 APSInt Result = APSInt(Fn(Val0, Val1, Val2), Val0.isUnsigned());
3080 { Dst.elem<T>(I) = static_cast<T>(Result); });
3081 }
3083
3084 return true;
3085}
3086
3088 const CallExpr *Call,
3089 unsigned ID) {
3090 assert(Call->getNumArgs() == 2);
3091
3092 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3093 uint64_t Index = ImmAPS.getZExtValue();
3094
3095 const Pointer &Src = S.Stk.pop<Pointer>();
3096 if (!Src.getFieldDesc()->isPrimitiveArray())
3097 return false;
3098
3099 const Pointer &Dst = S.Stk.peek<Pointer>();
3100 if (!Dst.getFieldDesc()->isPrimitiveArray())
3101 return false;
3102
3103 unsigned SrcElems = Src.getNumElems();
3104 unsigned DstElems = Dst.getNumElems();
3105
3106 unsigned NumLanes = SrcElems / DstElems;
3107 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3108 unsigned ExtractPos = Lane * DstElems;
3109
3110 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3111
3112 TYPE_SWITCH(ElemT, {
3113 for (unsigned I = 0; I != DstElems; ++I) {
3114 Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
3115 }
3116 });
3117
3119 return true;
3120}
3121
3123 CodePtr OpPC,
3124 const CallExpr *Call,
3125 unsigned ID) {
3126 assert(Call->getNumArgs() == 4);
3127
3128 APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
3129 const Pointer &Merge = S.Stk.pop<Pointer>();
3130 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3131 const Pointer &Src = S.Stk.pop<Pointer>();
3132
3133 if (!Src.getFieldDesc()->isPrimitiveArray() ||
3134 !Merge.getFieldDesc()->isPrimitiveArray())
3135 return false;
3136
3137 const Pointer &Dst = S.Stk.peek<Pointer>();
3138 if (!Dst.getFieldDesc()->isPrimitiveArray())
3139 return false;
3140
3141 unsigned SrcElems = Src.getNumElems();
3142 unsigned DstElems = Dst.getNumElems();
3143
3144 unsigned NumLanes = SrcElems / DstElems;
3145 unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
3146 unsigned Base = Lane * DstElems;
3147
3148 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3149
3150 TYPE_SWITCH(ElemT, {
3151 for (unsigned I = 0; I != DstElems; ++I) {
3152 if (MaskAPS[I])
3153 Dst.elem<T>(I) = Src.elem<T>(Base + I);
3154 else
3155 Dst.elem<T>(I) = Merge.elem<T>(I);
3156 }
3157 });
3158
3160 return true;
3161}
3162
3164 const CallExpr *Call,
3165 unsigned ID) {
3166 assert(Call->getNumArgs() == 3);
3167
3168 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3169 uint64_t Index = ImmAPS.getZExtValue();
3170
3171 const Pointer &SubVec = S.Stk.pop<Pointer>();
3172 if (!SubVec.getFieldDesc()->isPrimitiveArray())
3173 return false;
3174
3175 const Pointer &BaseVec = S.Stk.pop<Pointer>();
3176 if (!BaseVec.getFieldDesc()->isPrimitiveArray())
3177 return false;
3178
3179 const Pointer &Dst = S.Stk.peek<Pointer>();
3180
3181 unsigned BaseElements = BaseVec.getNumElems();
3182 unsigned SubElements = SubVec.getNumElems();
3183
3184 assert(SubElements != 0 && BaseElements != 0 &&
3185 (BaseElements % SubElements) == 0);
3186
3187 unsigned NumLanes = BaseElements / SubElements;
3188 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3189 unsigned InsertPos = Lane * SubElements;
3190
3191 PrimType ElemT = BaseVec.getFieldDesc()->getPrimType();
3192
3193 TYPE_SWITCH(ElemT, {
3194 for (unsigned I = 0; I != BaseElements; ++I)
3195 Dst.elem<T>(I) = BaseVec.elem<T>(I);
3196 for (unsigned I = 0; I != SubElements; ++I)
3197 Dst.elem<T>(InsertPos + I) = SubVec.elem<T>(I);
3198 });
3199
3201 return true;
3202}
3203
3205 const CallExpr *Call) {
3206 assert(Call->getNumArgs() == 1);
3207
3208 const Pointer &Source = S.Stk.pop<Pointer>();
3209 const Pointer &Dest = S.Stk.peek<Pointer>();
3210
3211 unsigned SourceLen = Source.getNumElems();
3212 QualType ElemQT = getElemType(Source);
3213 OptPrimType ElemT = S.getContext().classify(ElemQT);
3214 unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
3215
3216 bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
3217 ->castAs<VectorType>()
3218 ->getElementType()
3220
3221 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3222 APSInt MinIndex(ElemBitWidth, DestUnsigned);
3223 APSInt MinVal = Source.elem<T>(0).toAPSInt();
3224
3225 for (unsigned I = 1; I != SourceLen; ++I) {
3226 APSInt Val = Source.elem<T>(I).toAPSInt();
3227 if (MinVal.ugt(Val)) {
3228 MinVal = Val;
3229 MinIndex = I;
3230 }
3231 }
3232
3233 Dest.elem<T>(0) = static_cast<T>(MinVal);
3234 Dest.elem<T>(1) = static_cast<T>(MinIndex);
3235 for (unsigned I = 2; I != SourceLen; ++I) {
3236 Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
3237 }
3238 });
3239 Dest.initializeAllElements();
3240 return true;
3241}
3242
3244 const CallExpr *Call, bool MaskZ) {
3245 assert(Call->getNumArgs() == 5);
3246
3247 APInt U = popToAPSInt(S, Call->getArg(4)); // Lane mask
3248 APInt Imm = popToAPSInt(S, Call->getArg(3)); // Ternary truth table
3249 const Pointer &C = S.Stk.pop<Pointer>();
3250 const Pointer &B = S.Stk.pop<Pointer>();
3251 const Pointer &A = S.Stk.pop<Pointer>();
3252 const Pointer &Dst = S.Stk.peek<Pointer>();
3253
3254 unsigned DstLen = A.getNumElems();
3255 QualType ElemQT = getElemType(A);
3256 OptPrimType ElemT = S.getContext().classify(ElemQT);
3257 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
3258 bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType();
3259
3260 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3261 for (unsigned I = 0; I != DstLen; ++I) {
3262 APInt ALane = A.elem<T>(I).toAPSInt();
3263 APInt BLane = B.elem<T>(I).toAPSInt();
3264 APInt CLane = C.elem<T>(I).toAPSInt();
3265 APInt RLane(LaneWidth, 0);
3266 if (U[I]) { // If lane not masked, compute ternary logic.
3267 for (unsigned Bit = 0; Bit != LaneWidth; ++Bit) {
3268 unsigned ABit = ALane[Bit];
3269 unsigned BBit = BLane[Bit];
3270 unsigned CBit = CLane[Bit];
3271 unsigned Idx = (ABit << 2) | (BBit << 1) | (CBit);
3272 RLane.setBitVal(Bit, Imm[Idx]);
3273 }
3274 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3275 } else if (MaskZ) { // If zero masked, zero the lane.
3276 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3277 } else { // Just masked, put in A lane.
3278 Dst.elem<T>(I) = static_cast<T>(APSInt(ALane, DstUnsigned));
3279 }
3280 }
3281 });
3282 Dst.initializeAllElements();
3283 return true;
3284}
3285
3287 const CallExpr *Call, unsigned ID) {
3288 assert(Call->getNumArgs() == 2);
3289
3290 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3291 const Pointer &Vec = S.Stk.pop<Pointer>();
3292 if (!Vec.getFieldDesc()->isPrimitiveArray())
3293 return false;
3294
3295 unsigned NumElems = Vec.getNumElems();
3296 unsigned Index =
3297 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3298
3299 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3300 // FIXME(#161685): Replace float+int split with a numeric-only type switch
3301 if (ElemT == PT_Float) {
3302 S.Stk.push<Floating>(Vec.elem<Floating>(Index));
3303 return true;
3304 }
3306 APSInt V = Vec.elem<T>(Index).toAPSInt();
3307 pushInteger(S, V, Call->getType());
3308 });
3309
3310 return true;
3311}
3312
3314 const CallExpr *Call, unsigned ID) {
3315 assert(Call->getNumArgs() == 3);
3316
3317 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3318 APSInt ValAPS = popToAPSInt(S, Call->getArg(1));
3319
3320 const Pointer &Base = S.Stk.pop<Pointer>();
3321 if (!Base.getFieldDesc()->isPrimitiveArray())
3322 return false;
3323
3324 const Pointer &Dst = S.Stk.peek<Pointer>();
3325
3326 unsigned NumElems = Base.getNumElems();
3327 unsigned Index =
3328 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3329
3330 PrimType ElemT = Base.getFieldDesc()->getPrimType();
3332 for (unsigned I = 0; I != NumElems; ++I)
3333 Dst.elem<T>(I) = Base.elem<T>(I);
3334 Dst.elem<T>(Index) = static_cast<T>(ValAPS);
3335 });
3336
3338 return true;
3339}
3340
3341static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
3342 bool IsUnsigned) {
3343 switch (Imm & 0x7) {
3344 case 0x00: // _MM_CMPINT_EQ
3345 return (A == B);
3346 case 0x01: // _MM_CMPINT_LT
3347 return IsUnsigned ? A.ult(B) : A.slt(B);
3348 case 0x02: // _MM_CMPINT_LE
3349 return IsUnsigned ? A.ule(B) : A.sle(B);
3350 case 0x03: // _MM_CMPINT_FALSE
3351 return false;
3352 case 0x04: // _MM_CMPINT_NE
3353 return (A != B);
3354 case 0x05: // _MM_CMPINT_NLT
3355 return IsUnsigned ? A.ugt(B) : A.sgt(B);
3356 case 0x06: // _MM_CMPINT_NLE
3357 return IsUnsigned ? A.uge(B) : A.sge(B);
3358 case 0x07: // _MM_CMPINT_TRUE
3359 return true;
3360 default:
3361 llvm_unreachable("Invalid Op");
3362 }
3363}
3364
3366 const CallExpr *Call, unsigned ID,
3367 bool IsUnsigned) {
3368 assert(Call->getNumArgs() == 4);
3369
3370 APSInt Mask = popToAPSInt(S, Call->getArg(3));
3371 APSInt Opcode = popToAPSInt(S, Call->getArg(2));
3372 unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
3373 const Pointer &RHS = S.Stk.pop<Pointer>();
3374 const Pointer &LHS = S.Stk.pop<Pointer>();
3375
3376 assert(LHS.getNumElems() == RHS.getNumElems());
3377
3378 APInt RetMask = APInt::getZero(LHS.getNumElems());
3379 unsigned VectorLen = LHS.getNumElems();
3380 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
3381
3382 for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
3383 APSInt A, B;
3385 A = LHS.elem<T>(ElemNum).toAPSInt();
3386 B = RHS.elem<T>(ElemNum).toAPSInt();
3387 });
3388 RetMask.setBitVal(ElemNum,
3389 Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
3390 }
3391 pushInteger(S, RetMask, Call->getType());
3392 return true;
3393}
3394
3396 const CallExpr *Call) {
3397 assert(Call->getNumArgs() == 1);
3398
3399 QualType Arg0Type = Call->getArg(0)->getType();
3400 const auto *VecT = Arg0Type->castAs<VectorType>();
3401 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3402 unsigned NumElems = VecT->getNumElements();
3403 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3404 const Pointer &Src = S.Stk.pop<Pointer>();
3405 const Pointer &Dst = S.Stk.peek<Pointer>();
3406
3407 for (unsigned I = 0; I != NumElems; ++I) {
3409 APSInt ElemI = Src.elem<T>(I).toAPSInt();
3410 APInt ConflictMask(ElemI.getBitWidth(), 0);
3411 for (unsigned J = 0; J != I; ++J) {
3412 APSInt ElemJ = Src.elem<T>(J).toAPSInt();
3413 ConflictMask.setBitVal(J, ElemI == ElemJ);
3414 }
3415 Dst.elem<T>(I) = static_cast<T>(APSInt(ConflictMask, DestUnsigned));
3416 });
3417 }
3419 return true;
3420}
3421
3423 const CallExpr *Call,
3424 unsigned ID) {
3425 assert(Call->getNumArgs() == 1);
3426
3427 const Pointer &Vec = S.Stk.pop<Pointer>();
3428 unsigned RetWidth = S.getASTContext().getIntWidth(Call->getType());
3429 APInt RetMask(RetWidth, 0);
3430
3431 unsigned VectorLen = Vec.getNumElems();
3432 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3433
3434 for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
3435 APSInt A;
3436 INT_TYPE_SWITCH_NO_BOOL(ElemT, { A = Vec.elem<T>(ElemNum).toAPSInt(); });
3437 unsigned MSB = A[A.getBitWidth() - 1];
3438 RetMask.setBitVal(ElemNum, MSB);
3439 }
3440 pushInteger(S, RetMask, Call->getType());
3441 return true;
3442}
3443
3445 const CallExpr *Call,
3446 unsigned ID) {
3447 assert(Call->getNumArgs() == 1);
3448
3449 APSInt Mask = popToAPSInt(S, Call->getArg(0));
3450
3451 const Pointer &Vec = S.Stk.peek<Pointer>();
3452 unsigned NumElems = Vec.getNumElems();
3453 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3454
3455 for (unsigned I = 0; I != NumElems; ++I) {
3456 bool BitSet = Mask[I];
3457
3459 ElemT, { Vec.elem<T>(I) = BitSet ? T::from(-1) : T::from(0); });
3460 }
3461
3463
3464 return true;
3465}
3466
3468 const CallExpr *Call,
3469 bool HasRoundingMask) {
3470 APSInt Rounding, MaskInt;
3471 Pointer Src, B, A;
3472
3473 if (HasRoundingMask) {
3474 assert(Call->getNumArgs() == 5);
3475 Rounding = popToAPSInt(S, Call->getArg(4));
3476 MaskInt = popToAPSInt(S, Call->getArg(3));
3477 Src = S.Stk.pop<Pointer>();
3478 B = S.Stk.pop<Pointer>();
3479 A = S.Stk.pop<Pointer>();
3480 if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B) ||
3481 !CheckLoad(S, OpPC, Src))
3482 return false;
3483 } else {
3484 assert(Call->getNumArgs() == 2);
3485 B = S.Stk.pop<Pointer>();
3486 A = S.Stk.pop<Pointer>();
3487 if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B))
3488 return false;
3489 }
3490
3491 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3492 unsigned NumElems = DstVTy->getNumElements();
3493 const Pointer &Dst = S.Stk.peek<Pointer>();
3494
3495 // Copy all elements except lane 0 (overwritten below) from A to Dst.
3496 for (unsigned I = 1; I != NumElems; ++I)
3497 Dst.elem<Floating>(I) = A.elem<Floating>(I);
3498
3499 // Convert element 0 from double to float, or use Src if masked off.
3500 if (!HasRoundingMask || (MaskInt.getZExtValue() & 0x1)) {
3501 assert(S.getASTContext().FloatTy == DstVTy->getElementType() &&
3502 "cvtsd2ss requires float element type in destination vector");
3503
3504 Floating Conv = S.allocFloat(
3505 S.getASTContext().getFloatTypeSemantics(DstVTy->getElementType()));
3506 APFloat SrcVal = B.elem<Floating>(0).getAPFloat();
3507 if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
3508 return false;
3509 Dst.elem<Floating>(0) = Conv;
3510 } else {
3511 Dst.elem<Floating>(0) = Src.elem<Floating>(0);
3512 }
3513
3515 return true;
3516}
3517
3519 const CallExpr *Call, bool IsMasked,
3520 bool HasRounding) {
3521
3522 APSInt MaskVal;
3523 Pointer PassThrough;
3524 Pointer Src;
3525 APSInt Rounding;
3526
3527 if (IsMasked) {
3528 // Pop in reverse order.
3529 if (HasRounding) {
3530 Rounding = popToAPSInt(S, Call->getArg(3));
3531 MaskVal = popToAPSInt(S, Call->getArg(2));
3532 PassThrough = S.Stk.pop<Pointer>();
3533 Src = S.Stk.pop<Pointer>();
3534 } else {
3535 MaskVal = popToAPSInt(S, Call->getArg(2));
3536 PassThrough = S.Stk.pop<Pointer>();
3537 Src = S.Stk.pop<Pointer>();
3538 }
3539
3540 if (!CheckLoad(S, OpPC, PassThrough))
3541 return false;
3542 } else {
3543 // Pop source only.
3544 Src = S.Stk.pop<Pointer>();
3545 }
3546
3547 if (!CheckLoad(S, OpPC, Src))
3548 return false;
3549
3550 const auto *RetVTy = Call->getType()->castAs<VectorType>();
3551 unsigned RetElems = RetVTy->getNumElements();
3552 unsigned SrcElems = Src.getNumElems();
3553 const Pointer &Dst = S.Stk.peek<Pointer>();
3554
3555 // Initialize destination with passthrough or zeros.
3556 for (unsigned I = 0; I != RetElems; ++I)
3557 if (IsMasked)
3558 Dst.elem<Floating>(I) = PassThrough.elem<Floating>(I);
3559 else
3560 Dst.elem<Floating>(I) = Floating(APFloat(0.0f));
3561
3562 assert(S.getASTContext().FloatTy == RetVTy->getElementType() &&
3563 "cvtpd2ps requires float element type in return vector");
3564
3565 // Convert double to float for enabled elements (only process source elements
3566 // that exist).
3567 for (unsigned I = 0; I != SrcElems; ++I) {
3568 if (IsMasked && !MaskVal[I])
3569 continue;
3570
3571 APFloat SrcVal = Src.elem<Floating>(I).getAPFloat();
3572
3573 Floating Conv = S.allocFloat(
3574 S.getASTContext().getFloatTypeSemantics(RetVTy->getElementType()));
3575 if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
3576 return false;
3577 Dst.elem<Floating>(I) = Conv;
3578 }
3579
3581 return true;
3582}
3583
3585 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3586 llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
3587 GetSourceIndex) {
3588
3589 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
3590
3591 unsigned ShuffleMask = 0;
3592 Pointer A, MaskVector, B;
3593 bool IsVectorMask = false;
3594 bool IsSingleOperand = (Call->getNumArgs() == 2);
3595
3596 if (IsSingleOperand) {
3597 QualType MaskType = Call->getArg(1)->getType();
3598 if (MaskType->isVectorType()) {
3599 IsVectorMask = true;
3600 MaskVector = S.Stk.pop<Pointer>();
3601 A = S.Stk.pop<Pointer>();
3602 B = A;
3603 } else if (MaskType->isIntegerType()) {
3604 ShuffleMask = popToAPSInt(S, Call->getArg(1)).getZExtValue();
3605 A = S.Stk.pop<Pointer>();
3606 B = A;
3607 } else {
3608 return false;
3609 }
3610 } else {
3611 QualType Arg2Type = Call->getArg(2)->getType();
3612 if (Arg2Type->isVectorType()) {
3613 IsVectorMask = true;
3614 B = S.Stk.pop<Pointer>();
3615 MaskVector = S.Stk.pop<Pointer>();
3616 A = S.Stk.pop<Pointer>();
3617 } else if (Arg2Type->isIntegerType()) {
3618 ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
3619 B = S.Stk.pop<Pointer>();
3620 A = S.Stk.pop<Pointer>();
3621 } else {
3622 return false;
3623 }
3624 }
3625
3626 QualType Arg0Type = Call->getArg(0)->getType();
3627 const auto *VecT = Arg0Type->castAs<VectorType>();
3628 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3629 unsigned NumElems = VecT->getNumElements();
3630
3631 const Pointer &Dst = S.Stk.peek<Pointer>();
3632
3633 PrimType MaskElemT = PT_Uint32;
3634 if (IsVectorMask) {
3635 QualType Arg1Type = Call->getArg(1)->getType();
3636 const auto *MaskVecT = Arg1Type->castAs<VectorType>();
3637 QualType MaskElemType = MaskVecT->getElementType();
3638 MaskElemT = *S.getContext().classify(MaskElemType);
3639 }
3640
3641 for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
3642 if (IsVectorMask) {
3643 INT_TYPE_SWITCH(MaskElemT, {
3644 ShuffleMask = static_cast<unsigned>(MaskVector.elem<T>(DstIdx));
3645 });
3646 }
3647
3648 auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
3649
3650 if (SrcIdx < 0) {
3651 // Zero out this element
3652 if (ElemT == PT_Float) {
3653 Dst.elem<Floating>(DstIdx) = Floating(
3654 S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
3655 } else {
3656 INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
3657 }
3658 } else {
3659 const Pointer &Src = (SrcVecIdx == 0) ? A : B;
3660 TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
3661 }
3662 }
3664
3665 return true;
3666}
3667
3669 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3670 llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
3671 llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
3672
3673 assert(Call->getNumArgs() == 2);
3674
3675 const Pointer &Count = S.Stk.pop<Pointer>();
3676 const Pointer &Source = S.Stk.pop<Pointer>();
3677
3678 QualType SourceType = Call->getArg(0)->getType();
3679 QualType CountType = Call->getArg(1)->getType();
3680 assert(SourceType->isVectorType() && CountType->isVectorType());
3681
3682 const auto *SourceVecT = SourceType->castAs<VectorType>();
3683 const auto *CountVecT = CountType->castAs<VectorType>();
3684 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3685 PrimType CountElemT = *S.getContext().classify(CountVecT->getElementType());
3686
3687 const Pointer &Dst = S.Stk.peek<Pointer>();
3688
3689 unsigned DestEltWidth =
3690 S.getASTContext().getTypeSize(SourceVecT->getElementType());
3691 bool IsDestUnsigned = SourceVecT->getElementType()->isUnsignedIntegerType();
3692 unsigned DestLen = SourceVecT->getNumElements();
3693 unsigned CountEltWidth =
3694 S.getASTContext().getTypeSize(CountVecT->getElementType());
3695 unsigned NumBitsInQWord = 64;
3696 unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
3697
3698 uint64_t CountLQWord = 0;
3699 for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
3700 uint64_t Elt = 0;
3701 INT_TYPE_SWITCH(CountElemT,
3702 { Elt = static_cast<uint64_t>(Count.elem<T>(EltIdx)); });
3703 CountLQWord |= (Elt << (EltIdx * CountEltWidth));
3704 }
3705
3706 for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
3707 APSInt Elt;
3708 INT_TYPE_SWITCH(SourceElemT, { Elt = Source.elem<T>(EltIdx).toAPSInt(); });
3709
3710 APInt Result;
3711 if (CountLQWord < DestEltWidth) {
3712 Result = ShiftOp(Elt, CountLQWord);
3713 } else {
3714 Result = OverflowOp(Elt, DestEltWidth);
3715 }
3716 if (IsDestUnsigned) {
3717 INT_TYPE_SWITCH(SourceElemT, {
3718 Dst.elem<T>(EltIdx) = T::from(Result.getZExtValue());
3719 });
3720 } else {
3721 INT_TYPE_SWITCH(SourceElemT, {
3722 Dst.elem<T>(EltIdx) = T::from(Result.getSExtValue());
3723 });
3724 }
3725 }
3726
3728 return true;
3729}
3730
3732 const CallExpr *Call) {
3733
3734 assert(Call->getNumArgs() == 3);
3735
3736 QualType SourceType = Call->getArg(0)->getType();
3737 QualType ShuffleMaskType = Call->getArg(1)->getType();
3738 QualType ZeroMaskType = Call->getArg(2)->getType();
3739 if (!SourceType->isVectorType() || !ShuffleMaskType->isVectorType() ||
3740 !ZeroMaskType->isIntegerType()) {
3741 return false;
3742 }
3743
3744 Pointer Source, ShuffleMask;
3745 APSInt ZeroMask = popToAPSInt(S, Call->getArg(2));
3746 ShuffleMask = S.Stk.pop<Pointer>();
3747 Source = S.Stk.pop<Pointer>();
3748
3749 const auto *SourceVecT = SourceType->castAs<VectorType>();
3750 const auto *ShuffleMaskVecT = ShuffleMaskType->castAs<VectorType>();
3751 assert(SourceVecT->getNumElements() == ShuffleMaskVecT->getNumElements());
3752 assert(ZeroMask.getBitWidth() == SourceVecT->getNumElements());
3753
3754 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3755 PrimType ShuffleMaskElemT =
3756 *S.getContext().classify(ShuffleMaskVecT->getElementType());
3757
3758 unsigned NumBytesInQWord = 8;
3759 unsigned NumBitsInByte = 8;
3760 unsigned NumBytes = SourceVecT->getNumElements();
3761 unsigned NumQWords = NumBytes / NumBytesInQWord;
3762 unsigned RetWidth = ZeroMask.getBitWidth();
3763 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
3764
3765 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3766 APInt SourceQWord(64, 0);
3767 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3768 uint64_t Byte = 0;
3769 INT_TYPE_SWITCH(SourceElemT, {
3770 Byte = static_cast<uint64_t>(
3771 Source.elem<T>(QWordId * NumBytesInQWord + ByteIdx));
3772 });
3773 SourceQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3774 }
3775
3776 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3777 unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
3778 unsigned M = 0;
3779 INT_TYPE_SWITCH(ShuffleMaskElemT, {
3780 M = static_cast<unsigned>(ShuffleMask.elem<T>(SelIdx)) & 0x3F;
3781 });
3782
3783 if (ZeroMask[SelIdx]) {
3784 RetMask.setBitVal(SelIdx, SourceQWord[M]);
3785 }
3786 }
3787 }
3788
3789 pushInteger(S, RetMask, Call->getType());
3790 return true;
3791}
3792
3794 const CallExpr *Call) {
3795 // Arguments are: vector of floats, rounding immediate
3796 assert(Call->getNumArgs() == 2);
3797
3798 APSInt Imm = popToAPSInt(S, Call->getArg(1));
3799 const Pointer &Src = S.Stk.pop<Pointer>();
3800 const Pointer &Dst = S.Stk.peek<Pointer>();
3801
3802 assert(Src.getFieldDesc()->isPrimitiveArray());
3803 assert(Dst.getFieldDesc()->isPrimitiveArray());
3804
3805 const auto *SrcVTy = Call->getArg(0)->getType()->castAs<VectorType>();
3806 unsigned SrcNumElems = SrcVTy->getNumElements();
3807 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3808 unsigned DstNumElems = DstVTy->getNumElements();
3809
3810 const llvm::fltSemantics &HalfSem =
3812
3813 // imm[2] == 1 means use MXCSR rounding mode.
3814 // In that case, we can only evaluate if the conversion is exact.
3815 int ImmVal = Imm.getZExtValue();
3816 bool UseMXCSR = (ImmVal & 4) != 0;
3817 bool IsFPConstrained =
3818 Call->getFPFeaturesInEffect(S.getASTContext().getLangOpts())
3819 .isFPConstrained();
3820
3821 llvm::RoundingMode RM;
3822 if (!UseMXCSR) {
3823 switch (ImmVal & 3) {
3824 case 0:
3825 RM = llvm::RoundingMode::NearestTiesToEven;
3826 break;
3827 case 1:
3828 RM = llvm::RoundingMode::TowardNegative;
3829 break;
3830 case 2:
3831 RM = llvm::RoundingMode::TowardPositive;
3832 break;
3833 case 3:
3834 RM = llvm::RoundingMode::TowardZero;
3835 break;
3836 default:
3837 llvm_unreachable("Invalid immediate rounding mode");
3838 }
3839 } else {
3840 // For MXCSR, we must check for exactness. We can use any rounding mode
3841 // for the trial conversion since the result is the same if it's exact.
3842 RM = llvm::RoundingMode::NearestTiesToEven;
3843 }
3844
3845 QualType DstElemQT = Dst.getFieldDesc()->getElemQualType();
3846 PrimType DstElemT = *S.getContext().classify(DstElemQT);
3847
3848 for (unsigned I = 0; I != SrcNumElems; ++I) {
3849 Floating SrcVal = Src.elem<Floating>(I);
3850 APFloat DstVal = SrcVal.getAPFloat();
3851
3852 bool LostInfo;
3853 APFloat::opStatus St = DstVal.convert(HalfSem, RM, &LostInfo);
3854
3855 if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
3856 S.FFDiag(S.Current->getSource(OpPC),
3857 diag::note_constexpr_dynamic_rounding);
3858 return false;
3859 }
3860
3861 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
3862 // Convert the destination value's bit pattern to an unsigned integer,
3863 // then reconstruct the element using the target type's 'from' method.
3864 uint64_t RawBits = DstVal.bitcastToAPInt().getZExtValue();
3865 Dst.elem<T>(I) = T::from(RawBits);
3866 });
3867 }
3868
3869 // Zero out remaining elements if the destination has more elements
3870 // (e.g., vcvtps2ph converting 4 floats to 8 shorts).
3871 if (DstNumElems > SrcNumElems) {
3872 for (unsigned I = SrcNumElems; I != DstNumElems; ++I) {
3873 INT_TYPE_SWITCH_NO_BOOL(DstElemT, { Dst.elem<T>(I) = T::from(0); });
3874 }
3875 }
3876
3877 Dst.initializeAllElements();
3878 return true;
3879}
3880
3882 const CallExpr *Call) {
3883 assert(Call->getNumArgs() == 2);
3884
3885 QualType ATy = Call->getArg(0)->getType();
3886 QualType BTy = Call->getArg(1)->getType();
3887 if (!ATy->isVectorType() || !BTy->isVectorType()) {
3888 return false;
3889 }
3890
3891 const Pointer &BPtr = S.Stk.pop<Pointer>();
3892 const Pointer &APtr = S.Stk.pop<Pointer>();
3893 const auto *AVecT = ATy->castAs<VectorType>();
3894 assert(AVecT->getNumElements() ==
3895 BTy->castAs<VectorType>()->getNumElements());
3896
3897 PrimType ElemT = *S.getContext().classify(AVecT->getElementType());
3898
3899 unsigned NumBytesInQWord = 8;
3900 unsigned NumBitsInByte = 8;
3901 unsigned NumBytes = AVecT->getNumElements();
3902 unsigned NumQWords = NumBytes / NumBytesInQWord;
3903 const Pointer &Dst = S.Stk.peek<Pointer>();
3904
3905 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3906 APInt BQWord(64, 0);
3907 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3908 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3909 INT_TYPE_SWITCH(ElemT, {
3910 uint64_t Byte = static_cast<uint64_t>(BPtr.elem<T>(Idx));
3911 BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3912 });
3913 }
3914
3915 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3916 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3917 uint64_t Ctrl = 0;
3919 ElemT, { Ctrl = static_cast<uint64_t>(APtr.elem<T>(Idx)) & 0x3F; });
3920
3921 APInt Byte(8, 0);
3922 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
3923 Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]);
3924 }
3925 INT_TYPE_SWITCH(ElemT,
3926 { Dst.elem<T>(Idx) = T::from(Byte.getZExtValue()); });
3927 }
3928 }
3929
3931
3932 return true;
3933}
3934
3936 const CallExpr *Call,
3937 bool Inverse) {
3938 assert(Call->getNumArgs() == 3);
3939 QualType XType = Call->getArg(0)->getType();
3940 QualType AType = Call->getArg(1)->getType();
3941 QualType ImmType = Call->getArg(2)->getType();
3942 if (!XType->isVectorType() || !AType->isVectorType() ||
3943 !ImmType->isIntegerType()) {
3944 return false;
3945 }
3946
3947 Pointer X, A;
3948 APSInt Imm = popToAPSInt(S, Call->getArg(2));
3949 A = S.Stk.pop<Pointer>();
3950 X = S.Stk.pop<Pointer>();
3951
3952 const Pointer &Dst = S.Stk.peek<Pointer>();
3953 const auto *AVecT = AType->castAs<VectorType>();
3954 assert(XType->castAs<VectorType>()->getNumElements() ==
3955 AVecT->getNumElements());
3956 unsigned NumBytesInQWord = 8;
3957 unsigned NumBytes = AVecT->getNumElements();
3958 unsigned NumBitsInQWord = 64;
3959 unsigned NumQWords = NumBytes / NumBytesInQWord;
3960 unsigned NumBitsInByte = 8;
3961 PrimType AElemT = *S.getContext().classify(AVecT->getElementType());
3962
3963 // computing A*X + Imm
3964 for (unsigned QWordIdx = 0; QWordIdx != NumQWords; ++QWordIdx) {
3965 // Extract the QWords from X, A
3966 APInt XQWord(NumBitsInQWord, 0);
3967 APInt AQWord(NumBitsInQWord, 0);
3968 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3969 unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
3970 uint8_t XByte;
3971 uint8_t AByte;
3972 INT_TYPE_SWITCH(AElemT, {
3973 XByte = static_cast<uint8_t>(X.elem<T>(Idx));
3974 AByte = static_cast<uint8_t>(A.elem<T>(Idx));
3975 });
3976
3977 XQWord.insertBits(APInt(NumBitsInByte, XByte), ByteIdx * NumBitsInByte);
3978 AQWord.insertBits(APInt(NumBitsInByte, AByte), ByteIdx * NumBitsInByte);
3979 }
3980
3981 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3982 unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
3983 uint8_t XByte =
3984 XQWord.lshr(ByteIdx * NumBitsInByte).getLoBits(8).getZExtValue();
3985 INT_TYPE_SWITCH(AElemT, {
3986 Dst.elem<T>(Idx) = T::from(GFNIAffine(XByte, AQWord, Imm, Inverse));
3987 });
3988 }
3989 }
3990 Dst.initializeAllElements();
3991 return true;
3992}
3993
3995 const CallExpr *Call) {
3996 assert(Call->getNumArgs() == 2);
3997
3998 QualType AType = Call->getArg(0)->getType();
3999 QualType BType = Call->getArg(1)->getType();
4000 if (!AType->isVectorType() || !BType->isVectorType()) {
4001 return false;
4002 }
4003
4004 Pointer A, B;
4005 B = S.Stk.pop<Pointer>();
4006 A = S.Stk.pop<Pointer>();
4007
4008 const Pointer &Dst = S.Stk.peek<Pointer>();
4009 const auto *AVecT = AType->castAs<VectorType>();
4010 assert(AVecT->getNumElements() ==
4011 BType->castAs<VectorType>()->getNumElements());
4012
4013 PrimType AElemT = *S.getContext().classify(AVecT->getElementType());
4014 unsigned NumBytes = A.getNumElems();
4015
4016 for (unsigned ByteIdx = 0; ByteIdx != NumBytes; ++ByteIdx) {
4017 uint8_t AByte, BByte;
4018 INT_TYPE_SWITCH(AElemT, {
4019 AByte = static_cast<uint8_t>(A.elem<T>(ByteIdx));
4020 BByte = static_cast<uint8_t>(B.elem<T>(ByteIdx));
4021 Dst.elem<T>(ByteIdx) = T::from(GFNIMul(AByte, BByte));
4022 });
4023 }
4024
4025 Dst.initializeAllElements();
4026 return true;
4027}
4028
4030 uint32_t BuiltinID) {
4031 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
4032 return Invalid(S, OpPC);
4033
4034 const InterpFrame *Frame = S.Current;
4035 switch (BuiltinID) {
4036 case Builtin::BI__builtin_is_constant_evaluated:
4038
4039 case Builtin::BI__builtin_assume:
4040 case Builtin::BI__assume:
4041 return interp__builtin_assume(S, OpPC, Frame, Call);
4042
4043 case Builtin::BI__builtin_strcmp:
4044 case Builtin::BIstrcmp:
4045 case Builtin::BI__builtin_strncmp:
4046 case Builtin::BIstrncmp:
4047 case Builtin::BI__builtin_wcsncmp:
4048 case Builtin::BIwcsncmp:
4049 case Builtin::BI__builtin_wcscmp:
4050 case Builtin::BIwcscmp:
4051 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
4052
4053 case Builtin::BI__builtin_strlen:
4054 case Builtin::BIstrlen:
4055 case Builtin::BI__builtin_wcslen:
4056 case Builtin::BIwcslen:
4057 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
4058
4059 case Builtin::BI__builtin_nan:
4060 case Builtin::BI__builtin_nanf:
4061 case Builtin::BI__builtin_nanl:
4062 case Builtin::BI__builtin_nanf16:
4063 case Builtin::BI__builtin_nanf128:
4064 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
4065
4066 case Builtin::BI__builtin_nans:
4067 case Builtin::BI__builtin_nansf:
4068 case Builtin::BI__builtin_nansl:
4069 case Builtin::BI__builtin_nansf16:
4070 case Builtin::BI__builtin_nansf128:
4071 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
4072
4073 case Builtin::BI__builtin_huge_val:
4074 case Builtin::BI__builtin_huge_valf:
4075 case Builtin::BI__builtin_huge_vall:
4076 case Builtin::BI__builtin_huge_valf16:
4077 case Builtin::BI__builtin_huge_valf128:
4078 case Builtin::BI__builtin_inf:
4079 case Builtin::BI__builtin_inff:
4080 case Builtin::BI__builtin_infl:
4081 case Builtin::BI__builtin_inff16:
4082 case Builtin::BI__builtin_inff128:
4083 return interp__builtin_inf(S, OpPC, Frame, Call);
4084
4085 case Builtin::BI__builtin_copysign:
4086 case Builtin::BI__builtin_copysignf:
4087 case Builtin::BI__builtin_copysignl:
4088 case Builtin::BI__builtin_copysignf128:
4089 return interp__builtin_copysign(S, OpPC, Frame);
4090
4091 case Builtin::BI__builtin_fmin:
4092 case Builtin::BI__builtin_fminf:
4093 case Builtin::BI__builtin_fminl:
4094 case Builtin::BI__builtin_fminf16:
4095 case Builtin::BI__builtin_fminf128:
4096 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
4097
4098 case Builtin::BI__builtin_fminimum_num:
4099 case Builtin::BI__builtin_fminimum_numf:
4100 case Builtin::BI__builtin_fminimum_numl:
4101 case Builtin::BI__builtin_fminimum_numf16:
4102 case Builtin::BI__builtin_fminimum_numf128:
4103 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
4104
4105 case Builtin::BI__builtin_fmax:
4106 case Builtin::BI__builtin_fmaxf:
4107 case Builtin::BI__builtin_fmaxl:
4108 case Builtin::BI__builtin_fmaxf16:
4109 case Builtin::BI__builtin_fmaxf128:
4110 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
4111
4112 case Builtin::BI__builtin_fmaximum_num:
4113 case Builtin::BI__builtin_fmaximum_numf:
4114 case Builtin::BI__builtin_fmaximum_numl:
4115 case Builtin::BI__builtin_fmaximum_numf16:
4116 case Builtin::BI__builtin_fmaximum_numf128:
4117 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
4118
4119 case Builtin::BI__builtin_isnan:
4120 return interp__builtin_isnan(S, OpPC, Frame, Call);
4121
4122 case Builtin::BI__builtin_issignaling:
4123 return interp__builtin_issignaling(S, OpPC, Frame, Call);
4124
4125 case Builtin::BI__builtin_isinf:
4126 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
4127
4128 case Builtin::BI__builtin_isinf_sign:
4129 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
4130
4131 case Builtin::BI__builtin_isfinite:
4132 return interp__builtin_isfinite(S, OpPC, Frame, Call);
4133
4134 case Builtin::BI__builtin_isnormal:
4135 return interp__builtin_isnormal(S, OpPC, Frame, Call);
4136
4137 case Builtin::BI__builtin_issubnormal:
4138 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
4139
4140 case Builtin::BI__builtin_iszero:
4141 return interp__builtin_iszero(S, OpPC, Frame, Call);
4142
4143 case Builtin::BI__builtin_signbit:
4144 case Builtin::BI__builtin_signbitf:
4145 case Builtin::BI__builtin_signbitl:
4146 return interp__builtin_signbit(S, OpPC, Frame, Call);
4147
4148 case Builtin::BI__builtin_isgreater:
4149 case Builtin::BI__builtin_isgreaterequal:
4150 case Builtin::BI__builtin_isless:
4151 case Builtin::BI__builtin_islessequal:
4152 case Builtin::BI__builtin_islessgreater:
4153 case Builtin::BI__builtin_isunordered:
4154 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
4155
4156 case Builtin::BI__builtin_isfpclass:
4157 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
4158
4159 case Builtin::BI__builtin_fpclassify:
4160 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
4161
4162 case Builtin::BI__builtin_fabs:
4163 case Builtin::BI__builtin_fabsf:
4164 case Builtin::BI__builtin_fabsl:
4165 case Builtin::BI__builtin_fabsf128:
4166 return interp__builtin_fabs(S, OpPC, Frame);
4167
4168 case Builtin::BI__builtin_abs:
4169 case Builtin::BI__builtin_labs:
4170 case Builtin::BI__builtin_llabs:
4171 return interp__builtin_abs(S, OpPC, Frame, Call);
4172
4173 case Builtin::BI__builtin_popcount:
4174 case Builtin::BI__builtin_popcountl:
4175 case Builtin::BI__builtin_popcountll:
4176 case Builtin::BI__builtin_popcountg:
4177 case Builtin::BI__popcnt16: // Microsoft variants of popcount
4178 case Builtin::BI__popcnt:
4179 case Builtin::BI__popcnt64:
4180 return interp__builtin_popcount(S, OpPC, Frame, Call);
4181
4182 case Builtin::BI__builtin_parity:
4183 case Builtin::BI__builtin_parityl:
4184 case Builtin::BI__builtin_parityll:
4186 S, OpPC, Call, [](const APSInt &Val) {
4187 return APInt(Val.getBitWidth(), Val.popcount() % 2);
4188 });
4189 case Builtin::BI__builtin_clrsb:
4190 case Builtin::BI__builtin_clrsbl:
4191 case Builtin::BI__builtin_clrsbll:
4193 S, OpPC, Call, [](const APSInt &Val) {
4194 return APInt(Val.getBitWidth(),
4195 Val.getBitWidth() - Val.getSignificantBits());
4196 });
4197 case Builtin::BI__builtin_bitreverse8:
4198 case Builtin::BI__builtin_bitreverse16:
4199 case Builtin::BI__builtin_bitreverse32:
4200 case Builtin::BI__builtin_bitreverse64:
4202 S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
4203
4204 case Builtin::BI__builtin_classify_type:
4205 return interp__builtin_classify_type(S, OpPC, Frame, Call);
4206
4207 case Builtin::BI__builtin_expect:
4208 case Builtin::BI__builtin_expect_with_probability:
4209 return interp__builtin_expect(S, OpPC, Frame, Call);
4210
4211 case Builtin::BI__builtin_rotateleft8:
4212 case Builtin::BI__builtin_rotateleft16:
4213 case Builtin::BI__builtin_rotateleft32:
4214 case Builtin::BI__builtin_rotateleft64:
4215 case Builtin::BI_rotl8: // Microsoft variants of rotate left
4216 case Builtin::BI_rotl16:
4217 case Builtin::BI_rotl:
4218 case Builtin::BI_lrotl:
4219 case Builtin::BI_rotl64:
4221 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
4222 return Value.rotl(Amount);
4223 });
4224
4225 case Builtin::BI__builtin_rotateright8:
4226 case Builtin::BI__builtin_rotateright16:
4227 case Builtin::BI__builtin_rotateright32:
4228 case Builtin::BI__builtin_rotateright64:
4229 case Builtin::BI_rotr8: // Microsoft variants of rotate right
4230 case Builtin::BI_rotr16:
4231 case Builtin::BI_rotr:
4232 case Builtin::BI_lrotr:
4233 case Builtin::BI_rotr64:
4235 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
4236 return Value.rotr(Amount);
4237 });
4238
4239 case Builtin::BI__builtin_ffs:
4240 case Builtin::BI__builtin_ffsl:
4241 case Builtin::BI__builtin_ffsll:
4243 S, OpPC, Call, [](const APSInt &Val) {
4244 return APInt(Val.getBitWidth(),
4245 Val.isZero() ? 0u : Val.countTrailingZeros() + 1u);
4246 });
4247
4248 case Builtin::BIaddressof:
4249 case Builtin::BI__addressof:
4250 case Builtin::BI__builtin_addressof:
4251 assert(isNoopBuiltin(BuiltinID));
4252 return interp__builtin_addressof(S, OpPC, Frame, Call);
4253
4254 case Builtin::BIas_const:
4255 case Builtin::BIforward:
4256 case Builtin::BIforward_like:
4257 case Builtin::BImove:
4258 case Builtin::BImove_if_noexcept:
4259 assert(isNoopBuiltin(BuiltinID));
4260 return interp__builtin_move(S, OpPC, Frame, Call);
4261
4262 case Builtin::BI__builtin_eh_return_data_regno:
4264
4265 case Builtin::BI__builtin_launder:
4266 assert(isNoopBuiltin(BuiltinID));
4267 return true;
4268
4269 case Builtin::BI__builtin_add_overflow:
4270 case Builtin::BI__builtin_sub_overflow:
4271 case Builtin::BI__builtin_mul_overflow:
4272 case Builtin::BI__builtin_sadd_overflow:
4273 case Builtin::BI__builtin_uadd_overflow:
4274 case Builtin::BI__builtin_uaddl_overflow:
4275 case Builtin::BI__builtin_uaddll_overflow:
4276 case Builtin::BI__builtin_usub_overflow:
4277 case Builtin::BI__builtin_usubl_overflow:
4278 case Builtin::BI__builtin_usubll_overflow:
4279 case Builtin::BI__builtin_umul_overflow:
4280 case Builtin::BI__builtin_umull_overflow:
4281 case Builtin::BI__builtin_umulll_overflow:
4282 case Builtin::BI__builtin_saddl_overflow:
4283 case Builtin::BI__builtin_saddll_overflow:
4284 case Builtin::BI__builtin_ssub_overflow:
4285 case Builtin::BI__builtin_ssubl_overflow:
4286 case Builtin::BI__builtin_ssubll_overflow:
4287 case Builtin::BI__builtin_smul_overflow:
4288 case Builtin::BI__builtin_smull_overflow:
4289 case Builtin::BI__builtin_smulll_overflow:
4290 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
4291
4292 case Builtin::BI__builtin_addcb:
4293 case Builtin::BI__builtin_addcs:
4294 case Builtin::BI__builtin_addc:
4295 case Builtin::BI__builtin_addcl:
4296 case Builtin::BI__builtin_addcll:
4297 case Builtin::BI__builtin_subcb:
4298 case Builtin::BI__builtin_subcs:
4299 case Builtin::BI__builtin_subc:
4300 case Builtin::BI__builtin_subcl:
4301 case Builtin::BI__builtin_subcll:
4302 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
4303
4304 case Builtin::BI__builtin_clz:
4305 case Builtin::BI__builtin_clzl:
4306 case Builtin::BI__builtin_clzll:
4307 case Builtin::BI__builtin_clzs:
4308 case Builtin::BI__builtin_clzg:
4309 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
4310 case Builtin::BI__lzcnt:
4311 case Builtin::BI__lzcnt64:
4312 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
4313
4314 case Builtin::BI__builtin_ctz:
4315 case Builtin::BI__builtin_ctzl:
4316 case Builtin::BI__builtin_ctzll:
4317 case Builtin::BI__builtin_ctzs:
4318 case Builtin::BI__builtin_ctzg:
4319 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
4320
4321 case Builtin::BI__builtin_elementwise_clzg:
4322 case Builtin::BI__builtin_elementwise_ctzg:
4324 BuiltinID);
4325 case Builtin::BI__builtin_bswapg:
4326 case Builtin::BI__builtin_bswap16:
4327 case Builtin::BI__builtin_bswap32:
4328 case Builtin::BI__builtin_bswap64:
4329 return interp__builtin_bswap(S, OpPC, Frame, Call);
4330
4331 case Builtin::BI__atomic_always_lock_free:
4332 case Builtin::BI__atomic_is_lock_free:
4333 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
4334
4335 case Builtin::BI__c11_atomic_is_lock_free:
4337
4338 case Builtin::BI__builtin_complex:
4339 return interp__builtin_complex(S, OpPC, Frame, Call);
4340
4341 case Builtin::BI__builtin_is_aligned:
4342 case Builtin::BI__builtin_align_up:
4343 case Builtin::BI__builtin_align_down:
4344 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
4345
4346 case Builtin::BI__builtin_assume_aligned:
4347 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
4348
4349 case clang::X86::BI__builtin_ia32_bextr_u32:
4350 case clang::X86::BI__builtin_ia32_bextr_u64:
4351 case clang::X86::BI__builtin_ia32_bextri_u32:
4352 case clang::X86::BI__builtin_ia32_bextri_u64:
4354 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4355 unsigned BitWidth = Val.getBitWidth();
4356 uint64_t Shift = Idx.extractBitsAsZExtValue(8, 0);
4357 uint64_t Length = Idx.extractBitsAsZExtValue(8, 8);
4358 if (Length > BitWidth) {
4359 Length = BitWidth;
4360 }
4361
4362 // Handle out of bounds cases.
4363 if (Length == 0 || Shift >= BitWidth)
4364 return APInt(BitWidth, 0);
4365
4366 uint64_t Result = Val.getZExtValue() >> Shift;
4367 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
4368 return APInt(BitWidth, Result);
4369 });
4370
4371 case clang::X86::BI__builtin_ia32_bzhi_si:
4372 case clang::X86::BI__builtin_ia32_bzhi_di:
4374 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4375 unsigned BitWidth = Val.getBitWidth();
4376 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
4377 APSInt Result = Val;
4378
4379 if (Index < BitWidth)
4380 Result.clearHighBits(BitWidth - Index);
4381
4382 return Result;
4383 });
4384
4385 case clang::X86::BI__builtin_ia32_ktestcqi:
4386 case clang::X86::BI__builtin_ia32_ktestchi:
4387 case clang::X86::BI__builtin_ia32_ktestcsi:
4388 case clang::X86::BI__builtin_ia32_ktestcdi:
4390 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4391 return APInt(sizeof(unsigned char) * 8, (~A & B) == 0);
4392 });
4393
4394 case clang::X86::BI__builtin_ia32_ktestzqi:
4395 case clang::X86::BI__builtin_ia32_ktestzhi:
4396 case clang::X86::BI__builtin_ia32_ktestzsi:
4397 case clang::X86::BI__builtin_ia32_ktestzdi:
4399 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4400 return APInt(sizeof(unsigned char) * 8, (A & B) == 0);
4401 });
4402
4403 case clang::X86::BI__builtin_ia32_kortestcqi:
4404 case clang::X86::BI__builtin_ia32_kortestchi:
4405 case clang::X86::BI__builtin_ia32_kortestcsi:
4406 case clang::X86::BI__builtin_ia32_kortestcdi:
4408 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4409 return APInt(sizeof(unsigned char) * 8, ~(A | B) == 0);
4410 });
4411
4412 case clang::X86::BI__builtin_ia32_kortestzqi:
4413 case clang::X86::BI__builtin_ia32_kortestzhi:
4414 case clang::X86::BI__builtin_ia32_kortestzsi:
4415 case clang::X86::BI__builtin_ia32_kortestzdi:
4417 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4418 return APInt(sizeof(unsigned char) * 8, (A | B) == 0);
4419 });
4420
4421 case clang::X86::BI__builtin_ia32_kshiftliqi:
4422 case clang::X86::BI__builtin_ia32_kshiftlihi:
4423 case clang::X86::BI__builtin_ia32_kshiftlisi:
4424 case clang::X86::BI__builtin_ia32_kshiftlidi:
4426 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4427 unsigned Amt = RHS.getZExtValue() & 0xFF;
4428 if (Amt >= LHS.getBitWidth())
4429 return APInt::getZero(LHS.getBitWidth());
4430 return LHS.shl(Amt);
4431 });
4432
4433 case clang::X86::BI__builtin_ia32_kshiftriqi:
4434 case clang::X86::BI__builtin_ia32_kshiftrihi:
4435 case clang::X86::BI__builtin_ia32_kshiftrisi:
4436 case clang::X86::BI__builtin_ia32_kshiftridi:
4438 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4439 unsigned Amt = RHS.getZExtValue() & 0xFF;
4440 if (Amt >= LHS.getBitWidth())
4441 return APInt::getZero(LHS.getBitWidth());
4442 return LHS.lshr(Amt);
4443 });
4444
4445 case clang::X86::BI__builtin_ia32_lzcnt_u16:
4446 case clang::X86::BI__builtin_ia32_lzcnt_u32:
4447 case clang::X86::BI__builtin_ia32_lzcnt_u64:
4449 S, OpPC, Call, [](const APSInt &Src) {
4450 return APInt(Src.getBitWidth(), Src.countLeadingZeros());
4451 });
4452
4453 case clang::X86::BI__builtin_ia32_tzcnt_u16:
4454 case clang::X86::BI__builtin_ia32_tzcnt_u32:
4455 case clang::X86::BI__builtin_ia32_tzcnt_u64:
4457 S, OpPC, Call, [](const APSInt &Src) {
4458 return APInt(Src.getBitWidth(), Src.countTrailingZeros());
4459 });
4460
4461 case clang::X86::BI__builtin_ia32_pdep_si:
4462 case clang::X86::BI__builtin_ia32_pdep_di:
4464 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4465 unsigned BitWidth = Val.getBitWidth();
4466 APInt Result = APInt::getZero(BitWidth);
4467
4468 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4469 if (Mask[I])
4470 Result.setBitVal(I, Val[P++]);
4471 }
4472
4473 return Result;
4474 });
4475
4476 case clang::X86::BI__builtin_ia32_pext_si:
4477 case clang::X86::BI__builtin_ia32_pext_di:
4479 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4480 unsigned BitWidth = Val.getBitWidth();
4481 APInt Result = APInt::getZero(BitWidth);
4482
4483 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4484 if (Mask[I])
4485 Result.setBitVal(P++, Val[I]);
4486 }
4487
4488 return Result;
4489 });
4490
4491 case clang::X86::BI__builtin_ia32_addcarryx_u32:
4492 case clang::X86::BI__builtin_ia32_addcarryx_u64:
4493 case clang::X86::BI__builtin_ia32_subborrow_u32:
4494 case clang::X86::BI__builtin_ia32_subborrow_u64:
4496 BuiltinID);
4497
4498 case Builtin::BI__builtin_os_log_format_buffer_size:
4500
4501 case Builtin::BI__builtin_ptrauth_string_discriminator:
4503
4504 case Builtin::BI__builtin_infer_alloc_token:
4506
4507 case Builtin::BI__noop:
4508 pushInteger(S, 0, Call->getType());
4509 return true;
4510
4511 case Builtin::BI__builtin_operator_new:
4512 return interp__builtin_operator_new(S, OpPC, Frame, Call);
4513
4514 case Builtin::BI__builtin_operator_delete:
4515 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
4516
4517 case Builtin::BI__arithmetic_fence:
4519
4520 case Builtin::BI__builtin_reduce_add:
4521 case Builtin::BI__builtin_reduce_mul:
4522 case Builtin::BI__builtin_reduce_and:
4523 case Builtin::BI__builtin_reduce_or:
4524 case Builtin::BI__builtin_reduce_xor:
4525 case Builtin::BI__builtin_reduce_min:
4526 case Builtin::BI__builtin_reduce_max:
4527 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
4528
4529 case Builtin::BI__builtin_elementwise_popcount:
4531 S, OpPC, Call, [](const APSInt &Src) {
4532 return APInt(Src.getBitWidth(), Src.popcount());
4533 });
4534 case Builtin::BI__builtin_elementwise_bitreverse:
4536 S, OpPC, Call, [](const APSInt &Src) { return Src.reverseBits(); });
4537
4538 case Builtin::BI__builtin_elementwise_abs:
4539 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
4540
4541 case Builtin::BI__builtin_memcpy:
4542 case Builtin::BImemcpy:
4543 case Builtin::BI__builtin_wmemcpy:
4544 case Builtin::BIwmemcpy:
4545 case Builtin::BI__builtin_memmove:
4546 case Builtin::BImemmove:
4547 case Builtin::BI__builtin_wmemmove:
4548 case Builtin::BIwmemmove:
4549 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
4550
4551 case Builtin::BI__builtin_memcmp:
4552 case Builtin::BImemcmp:
4553 case Builtin::BI__builtin_bcmp:
4554 case Builtin::BIbcmp:
4555 case Builtin::BI__builtin_wmemcmp:
4556 case Builtin::BIwmemcmp:
4557 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
4558
4559 case Builtin::BImemchr:
4560 case Builtin::BI__builtin_memchr:
4561 case Builtin::BIstrchr:
4562 case Builtin::BI__builtin_strchr:
4563 case Builtin::BIwmemchr:
4564 case Builtin::BI__builtin_wmemchr:
4565 case Builtin::BIwcschr:
4566 case Builtin::BI__builtin_wcschr:
4567 case Builtin::BI__builtin_char_memchr:
4568 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
4569
4570 case Builtin::BI__builtin_object_size:
4571 case Builtin::BI__builtin_dynamic_object_size:
4572 return interp__builtin_object_size(S, OpPC, Frame, Call);
4573
4574 case Builtin::BI__builtin_is_within_lifetime:
4576
4577 case Builtin::BI__builtin_elementwise_add_sat:
4579 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4580 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
4581 });
4582
4583 case Builtin::BI__builtin_elementwise_sub_sat:
4585 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4586 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
4587 });
4588 case X86::BI__builtin_ia32_extract128i256:
4589 case X86::BI__builtin_ia32_vextractf128_pd256:
4590 case X86::BI__builtin_ia32_vextractf128_ps256:
4591 case X86::BI__builtin_ia32_vextractf128_si256:
4592 return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
4593
4594 case X86::BI__builtin_ia32_extractf32x4_256_mask:
4595 case X86::BI__builtin_ia32_extractf32x4_mask:
4596 case X86::BI__builtin_ia32_extractf32x8_mask:
4597 case X86::BI__builtin_ia32_extractf64x2_256_mask:
4598 case X86::BI__builtin_ia32_extractf64x2_512_mask:
4599 case X86::BI__builtin_ia32_extractf64x4_mask:
4600 case X86::BI__builtin_ia32_extracti32x4_256_mask:
4601 case X86::BI__builtin_ia32_extracti32x4_mask:
4602 case X86::BI__builtin_ia32_extracti32x8_mask:
4603 case X86::BI__builtin_ia32_extracti64x2_256_mask:
4604 case X86::BI__builtin_ia32_extracti64x2_512_mask:
4605 case X86::BI__builtin_ia32_extracti64x4_mask:
4606 return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
4607
4608 case clang::X86::BI__builtin_ia32_pmulhrsw128:
4609 case clang::X86::BI__builtin_ia32_pmulhrsw256:
4610 case clang::X86::BI__builtin_ia32_pmulhrsw512:
4612 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4613 return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
4614 .extractBits(16, 1);
4615 });
4616
4617 case clang::X86::BI__builtin_ia32_movmskps:
4618 case clang::X86::BI__builtin_ia32_movmskpd:
4619 case clang::X86::BI__builtin_ia32_pmovmskb128:
4620 case clang::X86::BI__builtin_ia32_pmovmskb256:
4621 case clang::X86::BI__builtin_ia32_movmskps256:
4622 case clang::X86::BI__builtin_ia32_movmskpd256: {
4623 return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
4624 }
4625
4626 case X86::BI__builtin_ia32_psignb128:
4627 case X86::BI__builtin_ia32_psignb256:
4628 case X86::BI__builtin_ia32_psignw128:
4629 case X86::BI__builtin_ia32_psignw256:
4630 case X86::BI__builtin_ia32_psignd128:
4631 case X86::BI__builtin_ia32_psignd256:
4633 S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
4634 if (BElem.isZero())
4635 return APInt::getZero(AElem.getBitWidth());
4636 if (BElem.isNegative())
4637 return -AElem;
4638 return AElem;
4639 });
4640
4641 case clang::X86::BI__builtin_ia32_pavgb128:
4642 case clang::X86::BI__builtin_ia32_pavgw128:
4643 case clang::X86::BI__builtin_ia32_pavgb256:
4644 case clang::X86::BI__builtin_ia32_pavgw256:
4645 case clang::X86::BI__builtin_ia32_pavgb512:
4646 case clang::X86::BI__builtin_ia32_pavgw512:
4648 llvm::APIntOps::avgCeilU);
4649
4650 case clang::X86::BI__builtin_ia32_pmaddubsw128:
4651 case clang::X86::BI__builtin_ia32_pmaddubsw256:
4652 case clang::X86::BI__builtin_ia32_pmaddubsw512:
4654 S, OpPC, Call,
4655 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4656 const APSInt &HiRHS) {
4657 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4658 return (LoLHS.zext(BitWidth) * LoRHS.sext(BitWidth))
4659 .sadd_sat((HiLHS.zext(BitWidth) * HiRHS.sext(BitWidth)));
4660 });
4661
4662 case clang::X86::BI__builtin_ia32_pmaddwd128:
4663 case clang::X86::BI__builtin_ia32_pmaddwd256:
4664 case clang::X86::BI__builtin_ia32_pmaddwd512:
4666 S, OpPC, Call,
4667 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4668 const APSInt &HiRHS) {
4669 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4670 return (LoLHS.sext(BitWidth) * LoRHS.sext(BitWidth)) +
4671 (HiLHS.sext(BitWidth) * HiRHS.sext(BitWidth));
4672 });
4673
4674 case clang::X86::BI__builtin_ia32_pmulhuw128:
4675 case clang::X86::BI__builtin_ia32_pmulhuw256:
4676 case clang::X86::BI__builtin_ia32_pmulhuw512:
4678 llvm::APIntOps::mulhu);
4679
4680 case clang::X86::BI__builtin_ia32_pmulhw128:
4681 case clang::X86::BI__builtin_ia32_pmulhw256:
4682 case clang::X86::BI__builtin_ia32_pmulhw512:
4684 llvm::APIntOps::mulhs);
4685
4686 case clang::X86::BI__builtin_ia32_psllv2di:
4687 case clang::X86::BI__builtin_ia32_psllv4di:
4688 case clang::X86::BI__builtin_ia32_psllv4si:
4689 case clang::X86::BI__builtin_ia32_psllv8di:
4690 case clang::X86::BI__builtin_ia32_psllv8hi:
4691 case clang::X86::BI__builtin_ia32_psllv8si:
4692 case clang::X86::BI__builtin_ia32_psllv16hi:
4693 case clang::X86::BI__builtin_ia32_psllv16si:
4694 case clang::X86::BI__builtin_ia32_psllv32hi:
4695 case clang::X86::BI__builtin_ia32_psllwi128:
4696 case clang::X86::BI__builtin_ia32_psllwi256:
4697 case clang::X86::BI__builtin_ia32_psllwi512:
4698 case clang::X86::BI__builtin_ia32_pslldi128:
4699 case clang::X86::BI__builtin_ia32_pslldi256:
4700 case clang::X86::BI__builtin_ia32_pslldi512:
4701 case clang::X86::BI__builtin_ia32_psllqi128:
4702 case clang::X86::BI__builtin_ia32_psllqi256:
4703 case clang::X86::BI__builtin_ia32_psllqi512:
4705 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4706 if (RHS.uge(LHS.getBitWidth())) {
4707 return APInt::getZero(LHS.getBitWidth());
4708 }
4709 return LHS.shl(RHS.getZExtValue());
4710 });
4711
4712 case clang::X86::BI__builtin_ia32_psrav4si:
4713 case clang::X86::BI__builtin_ia32_psrav8di:
4714 case clang::X86::BI__builtin_ia32_psrav8hi:
4715 case clang::X86::BI__builtin_ia32_psrav8si:
4716 case clang::X86::BI__builtin_ia32_psrav16hi:
4717 case clang::X86::BI__builtin_ia32_psrav16si:
4718 case clang::X86::BI__builtin_ia32_psrav32hi:
4719 case clang::X86::BI__builtin_ia32_psravq128:
4720 case clang::X86::BI__builtin_ia32_psravq256:
4721 case clang::X86::BI__builtin_ia32_psrawi128:
4722 case clang::X86::BI__builtin_ia32_psrawi256:
4723 case clang::X86::BI__builtin_ia32_psrawi512:
4724 case clang::X86::BI__builtin_ia32_psradi128:
4725 case clang::X86::BI__builtin_ia32_psradi256:
4726 case clang::X86::BI__builtin_ia32_psradi512:
4727 case clang::X86::BI__builtin_ia32_psraqi128:
4728 case clang::X86::BI__builtin_ia32_psraqi256:
4729 case clang::X86::BI__builtin_ia32_psraqi512:
4731 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4732 if (RHS.uge(LHS.getBitWidth())) {
4733 return LHS.ashr(LHS.getBitWidth() - 1);
4734 }
4735 return LHS.ashr(RHS.getZExtValue());
4736 });
4737
4738 case clang::X86::BI__builtin_ia32_psrlv2di:
4739 case clang::X86::BI__builtin_ia32_psrlv4di:
4740 case clang::X86::BI__builtin_ia32_psrlv4si:
4741 case clang::X86::BI__builtin_ia32_psrlv8di:
4742 case clang::X86::BI__builtin_ia32_psrlv8hi:
4743 case clang::X86::BI__builtin_ia32_psrlv8si:
4744 case clang::X86::BI__builtin_ia32_psrlv16hi:
4745 case clang::X86::BI__builtin_ia32_psrlv16si:
4746 case clang::X86::BI__builtin_ia32_psrlv32hi:
4747 case clang::X86::BI__builtin_ia32_psrlwi128:
4748 case clang::X86::BI__builtin_ia32_psrlwi256:
4749 case clang::X86::BI__builtin_ia32_psrlwi512:
4750 case clang::X86::BI__builtin_ia32_psrldi128:
4751 case clang::X86::BI__builtin_ia32_psrldi256:
4752 case clang::X86::BI__builtin_ia32_psrldi512:
4753 case clang::X86::BI__builtin_ia32_psrlqi128:
4754 case clang::X86::BI__builtin_ia32_psrlqi256:
4755 case clang::X86::BI__builtin_ia32_psrlqi512:
4757 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4758 if (RHS.uge(LHS.getBitWidth())) {
4759 return APInt::getZero(LHS.getBitWidth());
4760 }
4761 return LHS.lshr(RHS.getZExtValue());
4762 });
4763 case clang::X86::BI__builtin_ia32_packsswb128:
4764 case clang::X86::BI__builtin_ia32_packsswb256:
4765 case clang::X86::BI__builtin_ia32_packsswb512:
4766 case clang::X86::BI__builtin_ia32_packssdw128:
4767 case clang::X86::BI__builtin_ia32_packssdw256:
4768 case clang::X86::BI__builtin_ia32_packssdw512:
4769 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4770 return APInt(Src).truncSSat(Src.getBitWidth() / 2);
4771 });
4772 case clang::X86::BI__builtin_ia32_packusdw128:
4773 case clang::X86::BI__builtin_ia32_packusdw256:
4774 case clang::X86::BI__builtin_ia32_packusdw512:
4775 case clang::X86::BI__builtin_ia32_packuswb128:
4776 case clang::X86::BI__builtin_ia32_packuswb256:
4777 case clang::X86::BI__builtin_ia32_packuswb512:
4778 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4779 unsigned DstBits = Src.getBitWidth() / 2;
4780 if (Src.isNegative())
4781 return APInt::getZero(DstBits);
4782 if (Src.isIntN(DstBits))
4783 return APInt(Src).trunc(DstBits);
4784 return APInt::getAllOnes(DstBits);
4785 });
4786
4787 case clang::X86::BI__builtin_ia32_selectss_128:
4788 case clang::X86::BI__builtin_ia32_selectsd_128:
4789 case clang::X86::BI__builtin_ia32_selectsh_128:
4790 case clang::X86::BI__builtin_ia32_selectsbf_128:
4792 case clang::X86::BI__builtin_ia32_vprotbi:
4793 case clang::X86::BI__builtin_ia32_vprotdi:
4794 case clang::X86::BI__builtin_ia32_vprotqi:
4795 case clang::X86::BI__builtin_ia32_vprotwi:
4796 case clang::X86::BI__builtin_ia32_prold128:
4797 case clang::X86::BI__builtin_ia32_prold256:
4798 case clang::X86::BI__builtin_ia32_prold512:
4799 case clang::X86::BI__builtin_ia32_prolq128:
4800 case clang::X86::BI__builtin_ia32_prolq256:
4801 case clang::X86::BI__builtin_ia32_prolq512:
4803 S, OpPC, Call,
4804 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
4805
4806 case clang::X86::BI__builtin_ia32_prord128:
4807 case clang::X86::BI__builtin_ia32_prord256:
4808 case clang::X86::BI__builtin_ia32_prord512:
4809 case clang::X86::BI__builtin_ia32_prorq128:
4810 case clang::X86::BI__builtin_ia32_prorq256:
4811 case clang::X86::BI__builtin_ia32_prorq512:
4813 S, OpPC, Call,
4814 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
4815
4816 case Builtin::BI__builtin_elementwise_max:
4817 case Builtin::BI__builtin_elementwise_min:
4818 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
4819
4820 case clang::X86::BI__builtin_ia32_phaddw128:
4821 case clang::X86::BI__builtin_ia32_phaddw256:
4822 case clang::X86::BI__builtin_ia32_phaddd128:
4823 case clang::X86::BI__builtin_ia32_phaddd256:
4825 S, OpPC, Call,
4826 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
4827 case clang::X86::BI__builtin_ia32_phaddsw128:
4828 case clang::X86::BI__builtin_ia32_phaddsw256:
4830 S, OpPC, Call,
4831 [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); });
4832 case clang::X86::BI__builtin_ia32_phsubw128:
4833 case clang::X86::BI__builtin_ia32_phsubw256:
4834 case clang::X86::BI__builtin_ia32_phsubd128:
4835 case clang::X86::BI__builtin_ia32_phsubd256:
4837 S, OpPC, Call,
4838 [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; });
4839 case clang::X86::BI__builtin_ia32_phsubsw128:
4840 case clang::X86::BI__builtin_ia32_phsubsw256:
4842 S, OpPC, Call,
4843 [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); });
4844 case clang::X86::BI__builtin_ia32_haddpd:
4845 case clang::X86::BI__builtin_ia32_haddps:
4846 case clang::X86::BI__builtin_ia32_haddpd256:
4847 case clang::X86::BI__builtin_ia32_haddps256:
4849 S, OpPC, Call,
4850 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4851 APFloat F = LHS;
4852 F.add(RHS, RM);
4853 return F;
4854 });
4855 case clang::X86::BI__builtin_ia32_hsubpd:
4856 case clang::X86::BI__builtin_ia32_hsubps:
4857 case clang::X86::BI__builtin_ia32_hsubpd256:
4858 case clang::X86::BI__builtin_ia32_hsubps256:
4860 S, OpPC, Call,
4861 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4862 APFloat F = LHS;
4863 F.subtract(RHS, RM);
4864 return F;
4865 });
4866 case clang::X86::BI__builtin_ia32_addsubpd:
4867 case clang::X86::BI__builtin_ia32_addsubps:
4868 case clang::X86::BI__builtin_ia32_addsubpd256:
4869 case clang::X86::BI__builtin_ia32_addsubps256:
4870 return interp__builtin_ia32_addsub(S, OpPC, Call);
4871
4872 case clang::X86::BI__builtin_ia32_pmuldq128:
4873 case clang::X86::BI__builtin_ia32_pmuldq256:
4874 case clang::X86::BI__builtin_ia32_pmuldq512:
4876 S, OpPC, Call,
4877 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4878 const APSInt &HiRHS) {
4879 return llvm::APIntOps::mulsExtended(LoLHS, LoRHS);
4880 });
4881
4882 case clang::X86::BI__builtin_ia32_pmuludq128:
4883 case clang::X86::BI__builtin_ia32_pmuludq256:
4884 case clang::X86::BI__builtin_ia32_pmuludq512:
4886 S, OpPC, Call,
4887 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4888 const APSInt &HiRHS) {
4889 return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
4890 });
4891
4892 case clang::X86::BI__builtin_ia32_pclmulqdq128:
4893 case clang::X86::BI__builtin_ia32_pclmulqdq256:
4894 case clang::X86::BI__builtin_ia32_pclmulqdq512:
4895 return interp__builtin_ia32_pclmulqdq(S, OpPC, Call);
4896
4897 case Builtin::BI__builtin_elementwise_fma:
4899 S, OpPC, Call,
4900 [](const APFloat &X, const APFloat &Y, const APFloat &Z,
4901 llvm::RoundingMode RM) {
4902 APFloat F = X;
4903 F.fusedMultiplyAdd(Y, Z, RM);
4904 return F;
4905 });
4906
4907 case X86::BI__builtin_ia32_vpmadd52luq128:
4908 case X86::BI__builtin_ia32_vpmadd52luq256:
4909 case X86::BI__builtin_ia32_vpmadd52luq512:
4911 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4912 return A + (B.trunc(52) * C.trunc(52)).zext(64);
4913 });
4914 case X86::BI__builtin_ia32_vpmadd52huq128:
4915 case X86::BI__builtin_ia32_vpmadd52huq256:
4916 case X86::BI__builtin_ia32_vpmadd52huq512:
4918 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4919 return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64);
4920 });
4921
4922 case X86::BI__builtin_ia32_vpshldd128:
4923 case X86::BI__builtin_ia32_vpshldd256:
4924 case X86::BI__builtin_ia32_vpshldd512:
4925 case X86::BI__builtin_ia32_vpshldq128:
4926 case X86::BI__builtin_ia32_vpshldq256:
4927 case X86::BI__builtin_ia32_vpshldq512:
4928 case X86::BI__builtin_ia32_vpshldw128:
4929 case X86::BI__builtin_ia32_vpshldw256:
4930 case X86::BI__builtin_ia32_vpshldw512:
4932 S, OpPC, Call,
4933 [](const APSInt &Hi, const APSInt &Lo, const APSInt &Amt) {
4934 return llvm::APIntOps::fshl(Hi, Lo, Amt);
4935 });
4936
4937 case X86::BI__builtin_ia32_vpshrdd128:
4938 case X86::BI__builtin_ia32_vpshrdd256:
4939 case X86::BI__builtin_ia32_vpshrdd512:
4940 case X86::BI__builtin_ia32_vpshrdq128:
4941 case X86::BI__builtin_ia32_vpshrdq256:
4942 case X86::BI__builtin_ia32_vpshrdq512:
4943 case X86::BI__builtin_ia32_vpshrdw128:
4944 case X86::BI__builtin_ia32_vpshrdw256:
4945 case X86::BI__builtin_ia32_vpshrdw512:
4946 // NOTE: Reversed Hi/Lo operands.
4948 S, OpPC, Call,
4949 [](const APSInt &Lo, const APSInt &Hi, const APSInt &Amt) {
4950 return llvm::APIntOps::fshr(Hi, Lo, Amt);
4951 });
4952 case X86::BI__builtin_ia32_vpconflictsi_128:
4953 case X86::BI__builtin_ia32_vpconflictsi_256:
4954 case X86::BI__builtin_ia32_vpconflictsi_512:
4955 case X86::BI__builtin_ia32_vpconflictdi_128:
4956 case X86::BI__builtin_ia32_vpconflictdi_256:
4957 case X86::BI__builtin_ia32_vpconflictdi_512:
4958 return interp__builtin_ia32_vpconflict(S, OpPC, Call);
4959 case clang::X86::BI__builtin_ia32_blendpd:
4960 case clang::X86::BI__builtin_ia32_blendpd256:
4961 case clang::X86::BI__builtin_ia32_blendps:
4962 case clang::X86::BI__builtin_ia32_blendps256:
4963 case clang::X86::BI__builtin_ia32_pblendw128:
4964 case clang::X86::BI__builtin_ia32_pblendw256:
4965 case clang::X86::BI__builtin_ia32_pblendd128:
4966 case clang::X86::BI__builtin_ia32_pblendd256:
4968 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4969 // Bit index for mask.
4970 unsigned MaskBit = (ShuffleMask >> (DstIdx % 8)) & 0x1;
4971 unsigned SrcVecIdx = MaskBit ? 1 : 0; // 1 = TrueVec, 0 = FalseVec
4972 return std::pair<unsigned, int>{SrcVecIdx, static_cast<int>(DstIdx)};
4973 });
4974
4975
4976
4977 case clang::X86::BI__builtin_ia32_blendvpd:
4978 case clang::X86::BI__builtin_ia32_blendvpd256:
4979 case clang::X86::BI__builtin_ia32_blendvps:
4980 case clang::X86::BI__builtin_ia32_blendvps256:
4982 S, OpPC, Call,
4983 [](const APFloat &F, const APFloat &T, const APFloat &C,
4984 llvm::RoundingMode) { return C.isNegative() ? T : F; });
4985
4986 case clang::X86::BI__builtin_ia32_pblendvb128:
4987 case clang::X86::BI__builtin_ia32_pblendvb256:
4989 S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
4990 return ((APInt)C).isNegative() ? T : F;
4991 });
4992 case X86::BI__builtin_ia32_ptestz128:
4993 case X86::BI__builtin_ia32_ptestz256:
4994 case X86::BI__builtin_ia32_vtestzps:
4995 case X86::BI__builtin_ia32_vtestzps256:
4996 case X86::BI__builtin_ia32_vtestzpd:
4997 case X86::BI__builtin_ia32_vtestzpd256:
4999 S, OpPC, Call,
5000 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
5001 case X86::BI__builtin_ia32_ptestc128:
5002 case X86::BI__builtin_ia32_ptestc256:
5003 case X86::BI__builtin_ia32_vtestcps:
5004 case X86::BI__builtin_ia32_vtestcps256:
5005 case X86::BI__builtin_ia32_vtestcpd:
5006 case X86::BI__builtin_ia32_vtestcpd256:
5008 S, OpPC, Call,
5009 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
5010 case X86::BI__builtin_ia32_ptestnzc128:
5011 case X86::BI__builtin_ia32_ptestnzc256:
5012 case X86::BI__builtin_ia32_vtestnzcps:
5013 case X86::BI__builtin_ia32_vtestnzcps256:
5014 case X86::BI__builtin_ia32_vtestnzcpd:
5015 case X86::BI__builtin_ia32_vtestnzcpd256:
5017 S, OpPC, Call, [](const APInt &A, const APInt &B) {
5018 return ((A & B) != 0) && ((~A & B) != 0);
5019 });
5020 case X86::BI__builtin_ia32_selectb_128:
5021 case X86::BI__builtin_ia32_selectb_256:
5022 case X86::BI__builtin_ia32_selectb_512:
5023 case X86::BI__builtin_ia32_selectw_128:
5024 case X86::BI__builtin_ia32_selectw_256:
5025 case X86::BI__builtin_ia32_selectw_512:
5026 case X86::BI__builtin_ia32_selectd_128:
5027 case X86::BI__builtin_ia32_selectd_256:
5028 case X86::BI__builtin_ia32_selectd_512:
5029 case X86::BI__builtin_ia32_selectq_128:
5030 case X86::BI__builtin_ia32_selectq_256:
5031 case X86::BI__builtin_ia32_selectq_512:
5032 case X86::BI__builtin_ia32_selectph_128:
5033 case X86::BI__builtin_ia32_selectph_256:
5034 case X86::BI__builtin_ia32_selectph_512:
5035 case X86::BI__builtin_ia32_selectpbf_128:
5036 case X86::BI__builtin_ia32_selectpbf_256:
5037 case X86::BI__builtin_ia32_selectpbf_512:
5038 case X86::BI__builtin_ia32_selectps_128:
5039 case X86::BI__builtin_ia32_selectps_256:
5040 case X86::BI__builtin_ia32_selectps_512:
5041 case X86::BI__builtin_ia32_selectpd_128:
5042 case X86::BI__builtin_ia32_selectpd_256:
5043 case X86::BI__builtin_ia32_selectpd_512:
5044 return interp__builtin_select(S, OpPC, Call);
5045
5046 case X86::BI__builtin_ia32_shufps:
5047 case X86::BI__builtin_ia32_shufps256:
5048 case X86::BI__builtin_ia32_shufps512:
5050 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5051 unsigned NumElemPerLane = 4;
5052 unsigned NumSelectableElems = NumElemPerLane / 2;
5053 unsigned BitsPerElem = 2;
5054 unsigned IndexMask = 0x3;
5055 unsigned MaskBits = 8;
5056 unsigned Lane = DstIdx / NumElemPerLane;
5057 unsigned ElemInLane = DstIdx % NumElemPerLane;
5058 unsigned LaneOffset = Lane * NumElemPerLane;
5059 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
5060 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
5061 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
5062 return std::pair<unsigned, int>{SrcIdx,
5063 static_cast<int>(LaneOffset + Index)};
5064 });
5065 case X86::BI__builtin_ia32_shufpd:
5066 case X86::BI__builtin_ia32_shufpd256:
5067 case X86::BI__builtin_ia32_shufpd512:
5069 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5070 unsigned NumElemPerLane = 2;
5071 unsigned NumSelectableElems = NumElemPerLane / 2;
5072 unsigned BitsPerElem = 1;
5073 unsigned IndexMask = 0x1;
5074 unsigned MaskBits = 8;
5075 unsigned Lane = DstIdx / NumElemPerLane;
5076 unsigned ElemInLane = DstIdx % NumElemPerLane;
5077 unsigned LaneOffset = Lane * NumElemPerLane;
5078 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
5079 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
5080 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
5081 return std::pair<unsigned, int>{SrcIdx,
5082 static_cast<int>(LaneOffset + Index)};
5083 });
5084
5085 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
5086 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
5087 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi:
5088 return interp_builtin_ia32_gfni_affine(S, OpPC, Call, true);
5089 case X86::BI__builtin_ia32_vgf2p8affineqb_v16qi:
5090 case X86::BI__builtin_ia32_vgf2p8affineqb_v32qi:
5091 case X86::BI__builtin_ia32_vgf2p8affineqb_v64qi:
5092 return interp_builtin_ia32_gfni_affine(S, OpPC, Call, false);
5093
5094 case X86::BI__builtin_ia32_vgf2p8mulb_v16qi:
5095 case X86::BI__builtin_ia32_vgf2p8mulb_v32qi:
5096 case X86::BI__builtin_ia32_vgf2p8mulb_v64qi:
5097 return interp__builtin_ia32_gfni_mul(S, OpPC, Call);
5098
5099 case X86::BI__builtin_ia32_insertps128:
5101 S, OpPC, Call, [](unsigned DstIdx, unsigned Mask) {
5102 // Bits [3:0]: zero mask - if bit is set, zero this element
5103 if ((Mask & (1 << DstIdx)) != 0) {
5104 return std::pair<unsigned, int>{0, -1};
5105 }
5106 // Bits [7:6]: select element from source vector Y (0-3)
5107 // Bits [5:4]: select destination position (0-3)
5108 unsigned SrcElem = (Mask >> 6) & 0x3;
5109 unsigned DstElem = (Mask >> 4) & 0x3;
5110 if (DstIdx == DstElem) {
5111 // Insert element from source vector (B) at this position
5112 return std::pair<unsigned, int>{1, static_cast<int>(SrcElem)};
5113 } else {
5114 // Copy from destination vector (A)
5115 return std::pair<unsigned, int>{0, static_cast<int>(DstIdx)};
5116 }
5117 });
5118 case X86::BI__builtin_ia32_permvarsi256:
5119 case X86::BI__builtin_ia32_permvarsf256:
5120 case X86::BI__builtin_ia32_permvardf512:
5121 case X86::BI__builtin_ia32_permvardi512:
5122 case X86::BI__builtin_ia32_permvarhi128:
5124 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5125 int Offset = ShuffleMask & 0x7;
5126 return std::pair<unsigned, int>{0, Offset};
5127 });
5128 case X86::BI__builtin_ia32_permvarqi128:
5129 case X86::BI__builtin_ia32_permvarhi256:
5130 case X86::BI__builtin_ia32_permvarsi512:
5131 case X86::BI__builtin_ia32_permvarsf512:
5133 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5134 int Offset = ShuffleMask & 0xF;
5135 return std::pair<unsigned, int>{0, Offset};
5136 });
5137 case X86::BI__builtin_ia32_permvardi256:
5138 case X86::BI__builtin_ia32_permvardf256:
5140 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5141 int Offset = ShuffleMask & 0x3;
5142 return std::pair<unsigned, int>{0, Offset};
5143 });
5144 case X86::BI__builtin_ia32_permvarqi256:
5145 case X86::BI__builtin_ia32_permvarhi512:
5147 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5148 int Offset = ShuffleMask & 0x1F;
5149 return std::pair<unsigned, int>{0, Offset};
5150 });
5151 case X86::BI__builtin_ia32_permvarqi512:
5153 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5154 int Offset = ShuffleMask & 0x3F;
5155 return std::pair<unsigned, int>{0, Offset};
5156 });
5157 case X86::BI__builtin_ia32_vpermi2varq128:
5158 case X86::BI__builtin_ia32_vpermi2varpd128:
5160 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5161 int Offset = ShuffleMask & 0x1;
5162 unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
5163 return std::pair<unsigned, int>{SrcIdx, Offset};
5164 });
5165 case X86::BI__builtin_ia32_vpermi2vard128:
5166 case X86::BI__builtin_ia32_vpermi2varps128:
5167 case X86::BI__builtin_ia32_vpermi2varq256:
5168 case X86::BI__builtin_ia32_vpermi2varpd256:
5170 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5171 int Offset = ShuffleMask & 0x3;
5172 unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
5173 return std::pair<unsigned, int>{SrcIdx, Offset};
5174 });
5175 case X86::BI__builtin_ia32_vpermi2varhi128:
5176 case X86::BI__builtin_ia32_vpermi2vard256:
5177 case X86::BI__builtin_ia32_vpermi2varps256:
5178 case X86::BI__builtin_ia32_vpermi2varq512:
5179 case X86::BI__builtin_ia32_vpermi2varpd512:
5181 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5182 int Offset = ShuffleMask & 0x7;
5183 unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
5184 return std::pair<unsigned, int>{SrcIdx, Offset};
5185 });
5186 case X86::BI__builtin_ia32_vpermi2varqi128:
5187 case X86::BI__builtin_ia32_vpermi2varhi256:
5188 case X86::BI__builtin_ia32_vpermi2vard512:
5189 case X86::BI__builtin_ia32_vpermi2varps512:
5191 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5192 int Offset = ShuffleMask & 0xF;
5193 unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
5194 return std::pair<unsigned, int>{SrcIdx, Offset};
5195 });
5196 case X86::BI__builtin_ia32_vpermi2varqi256:
5197 case X86::BI__builtin_ia32_vpermi2varhi512:
5199 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5200 int Offset = ShuffleMask & 0x1F;
5201 unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
5202 return std::pair<unsigned, int>{SrcIdx, Offset};
5203 });
5204 case X86::BI__builtin_ia32_vpermi2varqi512:
5206 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5207 int Offset = ShuffleMask & 0x3F;
5208 unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
5209 return std::pair<unsigned, int>{SrcIdx, Offset};
5210 });
5211 case X86::BI__builtin_ia32_vperm2f128_pd256:
5212 case X86::BI__builtin_ia32_vperm2f128_ps256:
5213 case X86::BI__builtin_ia32_vperm2f128_si256:
5214 case X86::BI__builtin_ia32_permti256: {
5215 unsigned NumElements =
5216 Call->getArg(0)->getType()->castAs<VectorType>()->getNumElements();
5217 unsigned PreservedBitsCnt = NumElements >> 2;
5219 S, OpPC, Call,
5220 [PreservedBitsCnt](unsigned DstIdx, unsigned ShuffleMask) {
5221 unsigned ControlBitsCnt = DstIdx >> PreservedBitsCnt << 2;
5222 unsigned ControlBits = ShuffleMask >> ControlBitsCnt;
5223
5224 if (ControlBits & 0b1000)
5225 return std::make_pair(0u, -1);
5226
5227 unsigned SrcVecIdx = (ControlBits & 0b10) >> 1;
5228 unsigned PreservedBitsMask = (1 << PreservedBitsCnt) - 1;
5229 int SrcIdx = ((ControlBits & 0b1) << PreservedBitsCnt) |
5230 (DstIdx & PreservedBitsMask);
5231 return std::make_pair(SrcVecIdx, SrcIdx);
5232 });
5233 }
5234 case X86::BI__builtin_ia32_pshufb128:
5235 case X86::BI__builtin_ia32_pshufb256:
5236 case X86::BI__builtin_ia32_pshufb512:
5238 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5239 uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
5240 if (Ctlb & 0x80)
5241 return std::make_pair(0, -1);
5242
5243 unsigned LaneBase = (DstIdx / 16) * 16;
5244 unsigned SrcOffset = Ctlb & 0x0F;
5245 unsigned SrcIdx = LaneBase + SrcOffset;
5246 return std::make_pair(0, static_cast<int>(SrcIdx));
5247 });
5248
5249 case X86::BI__builtin_ia32_pshuflw:
5250 case X86::BI__builtin_ia32_pshuflw256:
5251 case X86::BI__builtin_ia32_pshuflw512:
5253 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5254 unsigned LaneBase = (DstIdx / 8) * 8;
5255 unsigned LaneIdx = DstIdx % 8;
5256 if (LaneIdx < 4) {
5257 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
5258 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
5259 }
5260
5261 return std::make_pair(0, static_cast<int>(DstIdx));
5262 });
5263
5264 case X86::BI__builtin_ia32_pshufhw:
5265 case X86::BI__builtin_ia32_pshufhw256:
5266 case X86::BI__builtin_ia32_pshufhw512:
5268 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5269 unsigned LaneBase = (DstIdx / 8) * 8;
5270 unsigned LaneIdx = DstIdx % 8;
5271 if (LaneIdx >= 4) {
5272 unsigned Sel = (ShuffleMask >> (2 * (LaneIdx - 4))) & 0x3;
5273 return std::make_pair(0, static_cast<int>(LaneBase + 4 + Sel));
5274 }
5275
5276 return std::make_pair(0, static_cast<int>(DstIdx));
5277 });
5278
5279 case X86::BI__builtin_ia32_pshufd:
5280 case X86::BI__builtin_ia32_pshufd256:
5281 case X86::BI__builtin_ia32_pshufd512:
5282 case X86::BI__builtin_ia32_vpermilps:
5283 case X86::BI__builtin_ia32_vpermilps256:
5284 case X86::BI__builtin_ia32_vpermilps512:
5286 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5287 unsigned LaneBase = (DstIdx / 4) * 4;
5288 unsigned LaneIdx = DstIdx % 4;
5289 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
5290 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
5291 });
5292
5293 case X86::BI__builtin_ia32_vpermilvarpd:
5294 case X86::BI__builtin_ia32_vpermilvarpd256:
5295 case X86::BI__builtin_ia32_vpermilvarpd512:
5297 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5298 unsigned NumElemPerLane = 2;
5299 unsigned Lane = DstIdx / NumElemPerLane;
5300 unsigned Offset = ShuffleMask & 0b10 ? 1 : 0;
5301 return std::make_pair(
5302 0, static_cast<int>(Lane * NumElemPerLane + Offset));
5303 });
5304
5305 case X86::BI__builtin_ia32_vpermilvarps:
5306 case X86::BI__builtin_ia32_vpermilvarps256:
5307 case X86::BI__builtin_ia32_vpermilvarps512:
5309 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5310 unsigned NumElemPerLane = 4;
5311 unsigned Lane = DstIdx / NumElemPerLane;
5312 unsigned Offset = ShuffleMask & 0b11;
5313 return std::make_pair(
5314 0, static_cast<int>(Lane * NumElemPerLane + Offset));
5315 });
5316
5317 case X86::BI__builtin_ia32_vpermilpd:
5318 case X86::BI__builtin_ia32_vpermilpd256:
5319 case X86::BI__builtin_ia32_vpermilpd512:
5321 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
5322 unsigned NumElemPerLane = 2;
5323 unsigned BitsPerElem = 1;
5324 unsigned MaskBits = 8;
5325 unsigned IndexMask = 0x1;
5326 unsigned Lane = DstIdx / NumElemPerLane;
5327 unsigned LaneOffset = Lane * NumElemPerLane;
5328 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
5329 unsigned Index = (Control >> BitIndex) & IndexMask;
5330 return std::make_pair(0, static_cast<int>(LaneOffset + Index));
5331 });
5332
5333 case X86::BI__builtin_ia32_permdf256:
5334 case X86::BI__builtin_ia32_permdi256:
5336 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
5337 // permute4x64 operates on 4 64-bit elements
5338 // For element i (0-3), extract bits [2*i+1:2*i] from Control
5339 unsigned Index = (Control >> (2 * DstIdx)) & 0x3;
5340 return std::make_pair(0, static_cast<int>(Index));
5341 });
5342
5343 case X86::BI__builtin_ia32_vpmultishiftqb128:
5344 case X86::BI__builtin_ia32_vpmultishiftqb256:
5345 case X86::BI__builtin_ia32_vpmultishiftqb512:
5346 return interp__builtin_ia32_multishiftqb(S, OpPC, Call);
5347 case X86::BI__builtin_ia32_kandqi:
5348 case X86::BI__builtin_ia32_kandhi:
5349 case X86::BI__builtin_ia32_kandsi:
5350 case X86::BI__builtin_ia32_kanddi:
5352 S, OpPC, Call,
5353 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
5354
5355 case X86::BI__builtin_ia32_kandnqi:
5356 case X86::BI__builtin_ia32_kandnhi:
5357 case X86::BI__builtin_ia32_kandnsi:
5358 case X86::BI__builtin_ia32_kandndi:
5360 S, OpPC, Call,
5361 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
5362
5363 case X86::BI__builtin_ia32_korqi:
5364 case X86::BI__builtin_ia32_korhi:
5365 case X86::BI__builtin_ia32_korsi:
5366 case X86::BI__builtin_ia32_kordi:
5368 S, OpPC, Call,
5369 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
5370
5371 case X86::BI__builtin_ia32_kxnorqi:
5372 case X86::BI__builtin_ia32_kxnorhi:
5373 case X86::BI__builtin_ia32_kxnorsi:
5374 case X86::BI__builtin_ia32_kxnordi:
5376 S, OpPC, Call,
5377 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
5378
5379 case X86::BI__builtin_ia32_kxorqi:
5380 case X86::BI__builtin_ia32_kxorhi:
5381 case X86::BI__builtin_ia32_kxorsi:
5382 case X86::BI__builtin_ia32_kxordi:
5384 S, OpPC, Call,
5385 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
5386
5387 case X86::BI__builtin_ia32_knotqi:
5388 case X86::BI__builtin_ia32_knothi:
5389 case X86::BI__builtin_ia32_knotsi:
5390 case X86::BI__builtin_ia32_knotdi:
5392 S, OpPC, Call, [](const APSInt &Src) { return ~Src; });
5393
5394 case X86::BI__builtin_ia32_kaddqi:
5395 case X86::BI__builtin_ia32_kaddhi:
5396 case X86::BI__builtin_ia32_kaddsi:
5397 case X86::BI__builtin_ia32_kadddi:
5399 S, OpPC, Call,
5400 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
5401
5402 case X86::BI__builtin_ia32_kmovb:
5403 case X86::BI__builtin_ia32_kmovw:
5404 case X86::BI__builtin_ia32_kmovd:
5405 case X86::BI__builtin_ia32_kmovq:
5407 S, OpPC, Call, [](const APSInt &Src) { return Src; });
5408
5409 case X86::BI__builtin_ia32_kunpckhi:
5410 case X86::BI__builtin_ia32_kunpckdi:
5411 case X86::BI__builtin_ia32_kunpcksi:
5413 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
5414 // Generic kunpack: extract lower half of each operand and concatenate
5415 // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
5416 unsigned BW = A.getBitWidth();
5417 return APSInt(A.trunc(BW / 2).concat(B.trunc(BW / 2)),
5418 A.isUnsigned());
5419 });
5420
5421 case X86::BI__builtin_ia32_phminposuw128:
5422 return interp__builtin_ia32_phminposuw(S, OpPC, Call);
5423
5424 case X86::BI__builtin_ia32_psraq128:
5425 case X86::BI__builtin_ia32_psraq256:
5426 case X86::BI__builtin_ia32_psraq512:
5427 case X86::BI__builtin_ia32_psrad128:
5428 case X86::BI__builtin_ia32_psrad256:
5429 case X86::BI__builtin_ia32_psrad512:
5430 case X86::BI__builtin_ia32_psraw128:
5431 case X86::BI__builtin_ia32_psraw256:
5432 case X86::BI__builtin_ia32_psraw512:
5434 S, OpPC, Call,
5435 [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); },
5436 [](const APInt &Elt, unsigned Width) { return Elt.ashr(Width - 1); });
5437
5438 case X86::BI__builtin_ia32_psllq128:
5439 case X86::BI__builtin_ia32_psllq256:
5440 case X86::BI__builtin_ia32_psllq512:
5441 case X86::BI__builtin_ia32_pslld128:
5442 case X86::BI__builtin_ia32_pslld256:
5443 case X86::BI__builtin_ia32_pslld512:
5444 case X86::BI__builtin_ia32_psllw128:
5445 case X86::BI__builtin_ia32_psllw256:
5446 case X86::BI__builtin_ia32_psllw512:
5448 S, OpPC, Call,
5449 [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); },
5450 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5451
5452 case X86::BI__builtin_ia32_psrlq128:
5453 case X86::BI__builtin_ia32_psrlq256:
5454 case X86::BI__builtin_ia32_psrlq512:
5455 case X86::BI__builtin_ia32_psrld128:
5456 case X86::BI__builtin_ia32_psrld256:
5457 case X86::BI__builtin_ia32_psrld512:
5458 case X86::BI__builtin_ia32_psrlw128:
5459 case X86::BI__builtin_ia32_psrlw256:
5460 case X86::BI__builtin_ia32_psrlw512:
5462 S, OpPC, Call,
5463 [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); },
5464 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5465
5466 case X86::BI__builtin_ia32_pternlogd128_mask:
5467 case X86::BI__builtin_ia32_pternlogd256_mask:
5468 case X86::BI__builtin_ia32_pternlogd512_mask:
5469 case X86::BI__builtin_ia32_pternlogq128_mask:
5470 case X86::BI__builtin_ia32_pternlogq256_mask:
5471 case X86::BI__builtin_ia32_pternlogq512_mask:
5472 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/false);
5473 case X86::BI__builtin_ia32_pternlogd128_maskz:
5474 case X86::BI__builtin_ia32_pternlogd256_maskz:
5475 case X86::BI__builtin_ia32_pternlogd512_maskz:
5476 case X86::BI__builtin_ia32_pternlogq128_maskz:
5477 case X86::BI__builtin_ia32_pternlogq256_maskz:
5478 case X86::BI__builtin_ia32_pternlogq512_maskz:
5479 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/true);
5480 case Builtin::BI__builtin_elementwise_fshl:
5482 llvm::APIntOps::fshl);
5483 case Builtin::BI__builtin_elementwise_fshr:
5485 llvm::APIntOps::fshr);
5486
5487 case X86::BI__builtin_ia32_shuf_f32x4_256:
5488 case X86::BI__builtin_ia32_shuf_i32x4_256:
5489 case X86::BI__builtin_ia32_shuf_f64x2_256:
5490 case X86::BI__builtin_ia32_shuf_i64x2_256:
5491 case X86::BI__builtin_ia32_shuf_f32x4:
5492 case X86::BI__builtin_ia32_shuf_i32x4:
5493 case X86::BI__builtin_ia32_shuf_f64x2:
5494 case X86::BI__builtin_ia32_shuf_i64x2: {
5495 // Destination and sources A, B all have the same type.
5496 QualType VecQT = Call->getArg(0)->getType();
5497 const auto *VecT = VecQT->castAs<VectorType>();
5498 unsigned NumElems = VecT->getNumElements();
5499 unsigned ElemBits = S.getASTContext().getTypeSize(VecT->getElementType());
5500 unsigned LaneBits = 128u;
5501 unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
5502 unsigned NumElemsPerLane = LaneBits / ElemBits;
5503
5505 S, OpPC, Call,
5506 [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) {
5507 // DstIdx determines source. ShuffleMask selects lane in source.
5508 unsigned BitsPerElem = NumLanes / 2;
5509 unsigned IndexMask = (1u << BitsPerElem) - 1;
5510 unsigned Lane = DstIdx / NumElemsPerLane;
5511 unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
5512 unsigned BitIdx = BitsPerElem * Lane;
5513 unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
5514 unsigned ElemInLane = DstIdx % NumElemsPerLane;
5515 unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
5516 return std::pair<unsigned, int>{SrcIdx, IdxToPick};
5517 });
5518 }
5519
5520 case X86::BI__builtin_ia32_insertf32x4_256:
5521 case X86::BI__builtin_ia32_inserti32x4_256:
5522 case X86::BI__builtin_ia32_insertf64x2_256:
5523 case X86::BI__builtin_ia32_inserti64x2_256:
5524 case X86::BI__builtin_ia32_insertf32x4:
5525 case X86::BI__builtin_ia32_inserti32x4:
5526 case X86::BI__builtin_ia32_insertf64x2_512:
5527 case X86::BI__builtin_ia32_inserti64x2_512:
5528 case X86::BI__builtin_ia32_insertf32x8:
5529 case X86::BI__builtin_ia32_inserti32x8:
5530 case X86::BI__builtin_ia32_insertf64x4:
5531 case X86::BI__builtin_ia32_inserti64x4:
5532 case X86::BI__builtin_ia32_vinsertf128_ps256:
5533 case X86::BI__builtin_ia32_vinsertf128_pd256:
5534 case X86::BI__builtin_ia32_vinsertf128_si256:
5535 case X86::BI__builtin_ia32_insert128i256:
5536 return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
5537
5538 case clang::X86::BI__builtin_ia32_vcvtps2ph:
5539 case clang::X86::BI__builtin_ia32_vcvtps2ph256:
5540 return interp__builtin_ia32_vcvtps2ph(S, OpPC, Call);
5541
5542 case X86::BI__builtin_ia32_vec_ext_v4hi:
5543 case X86::BI__builtin_ia32_vec_ext_v16qi:
5544 case X86::BI__builtin_ia32_vec_ext_v8hi:
5545 case X86::BI__builtin_ia32_vec_ext_v4si:
5546 case X86::BI__builtin_ia32_vec_ext_v2di:
5547 case X86::BI__builtin_ia32_vec_ext_v32qi:
5548 case X86::BI__builtin_ia32_vec_ext_v16hi:
5549 case X86::BI__builtin_ia32_vec_ext_v8si:
5550 case X86::BI__builtin_ia32_vec_ext_v4di:
5551 case X86::BI__builtin_ia32_vec_ext_v4sf:
5552 return interp__builtin_vec_ext(S, OpPC, Call, BuiltinID);
5553
5554 case X86::BI__builtin_ia32_vec_set_v4hi:
5555 case X86::BI__builtin_ia32_vec_set_v16qi:
5556 case X86::BI__builtin_ia32_vec_set_v8hi:
5557 case X86::BI__builtin_ia32_vec_set_v4si:
5558 case X86::BI__builtin_ia32_vec_set_v2di:
5559 case X86::BI__builtin_ia32_vec_set_v32qi:
5560 case X86::BI__builtin_ia32_vec_set_v16hi:
5561 case X86::BI__builtin_ia32_vec_set_v8si:
5562 case X86::BI__builtin_ia32_vec_set_v4di:
5563 return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
5564
5565 case X86::BI__builtin_ia32_cvtb2mask128:
5566 case X86::BI__builtin_ia32_cvtb2mask256:
5567 case X86::BI__builtin_ia32_cvtb2mask512:
5568 case X86::BI__builtin_ia32_cvtw2mask128:
5569 case X86::BI__builtin_ia32_cvtw2mask256:
5570 case X86::BI__builtin_ia32_cvtw2mask512:
5571 case X86::BI__builtin_ia32_cvtd2mask128:
5572 case X86::BI__builtin_ia32_cvtd2mask256:
5573 case X86::BI__builtin_ia32_cvtd2mask512:
5574 case X86::BI__builtin_ia32_cvtq2mask128:
5575 case X86::BI__builtin_ia32_cvtq2mask256:
5576 case X86::BI__builtin_ia32_cvtq2mask512:
5577 return interp__builtin_ia32_cvt_vec2mask(S, OpPC, Call, BuiltinID);
5578
5579 case X86::BI__builtin_ia32_cvtmask2b128:
5580 case X86::BI__builtin_ia32_cvtmask2b256:
5581 case X86::BI__builtin_ia32_cvtmask2b512:
5582 case X86::BI__builtin_ia32_cvtmask2w128:
5583 case X86::BI__builtin_ia32_cvtmask2w256:
5584 case X86::BI__builtin_ia32_cvtmask2w512:
5585 case X86::BI__builtin_ia32_cvtmask2d128:
5586 case X86::BI__builtin_ia32_cvtmask2d256:
5587 case X86::BI__builtin_ia32_cvtmask2d512:
5588 case X86::BI__builtin_ia32_cvtmask2q128:
5589 case X86::BI__builtin_ia32_cvtmask2q256:
5590 case X86::BI__builtin_ia32_cvtmask2q512:
5591 return interp__builtin_ia32_cvt_mask2vec(S, OpPC, Call, BuiltinID);
5592
5593 case X86::BI__builtin_ia32_cvtsd2ss:
5594 return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, false);
5595
5596 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
5597 return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, true);
5598
5599 case X86::BI__builtin_ia32_cvtpd2ps:
5600 case X86::BI__builtin_ia32_cvtpd2ps256:
5601 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, false, false);
5602 case X86::BI__builtin_ia32_cvtpd2ps_mask:
5603 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, false);
5604 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
5605 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, true);
5606
5607 case X86::BI__builtin_ia32_cmpb128_mask:
5608 case X86::BI__builtin_ia32_cmpw128_mask:
5609 case X86::BI__builtin_ia32_cmpd128_mask:
5610 case X86::BI__builtin_ia32_cmpq128_mask:
5611 case X86::BI__builtin_ia32_cmpb256_mask:
5612 case X86::BI__builtin_ia32_cmpw256_mask:
5613 case X86::BI__builtin_ia32_cmpd256_mask:
5614 case X86::BI__builtin_ia32_cmpq256_mask:
5615 case X86::BI__builtin_ia32_cmpb512_mask:
5616 case X86::BI__builtin_ia32_cmpw512_mask:
5617 case X86::BI__builtin_ia32_cmpd512_mask:
5618 case X86::BI__builtin_ia32_cmpq512_mask:
5619 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5620 /*IsUnsigned=*/false);
5621
5622 case X86::BI__builtin_ia32_ucmpb128_mask:
5623 case X86::BI__builtin_ia32_ucmpw128_mask:
5624 case X86::BI__builtin_ia32_ucmpd128_mask:
5625 case X86::BI__builtin_ia32_ucmpq128_mask:
5626 case X86::BI__builtin_ia32_ucmpb256_mask:
5627 case X86::BI__builtin_ia32_ucmpw256_mask:
5628 case X86::BI__builtin_ia32_ucmpd256_mask:
5629 case X86::BI__builtin_ia32_ucmpq256_mask:
5630 case X86::BI__builtin_ia32_ucmpb512_mask:
5631 case X86::BI__builtin_ia32_ucmpw512_mask:
5632 case X86::BI__builtin_ia32_ucmpd512_mask:
5633 case X86::BI__builtin_ia32_ucmpq512_mask:
5634 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5635 /*IsUnsigned=*/true);
5636
5637 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
5638 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
5639 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
5641
5642 case X86::BI__builtin_ia32_pslldqi128_byteshift:
5643 case X86::BI__builtin_ia32_pslldqi256_byteshift:
5644 case X86::BI__builtin_ia32_pslldqi512_byteshift:
5645 // These SLLDQ intrinsics always operate on byte elements (8 bits).
5646 // The lane width is hardcoded to 16 to match the SIMD register size,
5647 // but the algorithm processes one byte per iteration,
5648 // so APInt(8, ...) is correct and intentional.
5650 S, OpPC, Call,
5651 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5652 unsigned LaneBase = (DstIdx / 16) * 16;
5653 unsigned LaneIdx = DstIdx % 16;
5654 if (LaneIdx < Shift)
5655 return std::make_pair(0, -1);
5656
5657 return std::make_pair(0,
5658 static_cast<int>(LaneBase + LaneIdx - Shift));
5659 });
5660
5661 case X86::BI__builtin_ia32_psrldqi128_byteshift:
5662 case X86::BI__builtin_ia32_psrldqi256_byteshift:
5663 case X86::BI__builtin_ia32_psrldqi512_byteshift:
5664 // These SRLDQ intrinsics always operate on byte elements (8 bits).
5665 // The lane width is hardcoded to 16 to match the SIMD register size,
5666 // but the algorithm processes one byte per iteration,
5667 // so APInt(8, ...) is correct and intentional.
5669 S, OpPC, Call,
5670 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5671 unsigned LaneBase = (DstIdx / 16) * 16;
5672 unsigned LaneIdx = DstIdx % 16;
5673 if (LaneIdx + Shift < 16)
5674 return std::make_pair(0,
5675 static_cast<int>(LaneBase + LaneIdx + Shift));
5676
5677 return std::make_pair(0, -1);
5678 });
5679
5680 case X86::BI__builtin_ia32_palignr128:
5681 case X86::BI__builtin_ia32_palignr256:
5682 case X86::BI__builtin_ia32_palignr512:
5684 S, OpPC, Call, [](unsigned DstIdx, unsigned Shift) {
5685 // Default to -1 → zero-fill this destination element
5686 unsigned VecIdx = 1;
5687 int ElemIdx = -1;
5688
5689 int Lane = DstIdx / 16;
5690 int Offset = DstIdx % 16;
5691
5692 // Elements come from VecB first, then VecA after the shift boundary
5693 unsigned ShiftedIdx = Offset + (Shift & 0xFF);
5694 if (ShiftedIdx < 16) { // from VecB
5695 ElemIdx = ShiftedIdx + (Lane * 16);
5696 } else if (ShiftedIdx < 32) { // from VecA
5697 VecIdx = 0;
5698 ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
5699 }
5700
5701 return std::pair<unsigned, int>{VecIdx, ElemIdx};
5702 });
5703
5704 case X86::BI__builtin_ia32_alignd128:
5705 case X86::BI__builtin_ia32_alignd256:
5706 case X86::BI__builtin_ia32_alignd512:
5707 case X86::BI__builtin_ia32_alignq128:
5708 case X86::BI__builtin_ia32_alignq256:
5709 case X86::BI__builtin_ia32_alignq512: {
5710 unsigned NumElems = Call->getType()->castAs<VectorType>()->getNumElements();
5712 S, OpPC, Call, [NumElems](unsigned DstIdx, unsigned Shift) {
5713 unsigned Imm = Shift & 0xFF;
5714 unsigned EffectiveShift = Imm & (NumElems - 1);
5715 unsigned SourcePos = DstIdx + EffectiveShift;
5716 unsigned VecIdx = SourcePos < NumElems ? 1u : 0u;
5717 unsigned ElemIdx = SourcePos & (NumElems - 1);
5718 return std::pair<unsigned, int>{VecIdx, static_cast<int>(ElemIdx)};
5719 });
5720 }
5721
5722 default:
5723 S.FFDiag(S.Current->getLocation(OpPC),
5724 diag::note_invalid_subexpr_in_const_expr)
5725 << S.Current->getRange(OpPC);
5726
5727 return false;
5728 }
5729
5730 llvm_unreachable("Unhandled builtin ID");
5731}
5732
5734 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
5736 unsigned N = E->getNumComponents();
5737 assert(N > 0);
5738
5739 unsigned ArrayIndex = 0;
5740 QualType CurrentType = E->getTypeSourceInfo()->getType();
5741 for (unsigned I = 0; I != N; ++I) {
5742 const OffsetOfNode &Node = E->getComponent(I);
5743 switch (Node.getKind()) {
5744 case OffsetOfNode::Field: {
5745 const FieldDecl *MemberDecl = Node.getField();
5746 const auto *RD = CurrentType->getAsRecordDecl();
5747 if (!RD || RD->isInvalidDecl())
5748 return false;
5750 unsigned FieldIndex = MemberDecl->getFieldIndex();
5751 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
5752 Result +=
5754 CurrentType = MemberDecl->getType().getNonReferenceType();
5755 break;
5756 }
5757 case OffsetOfNode::Array: {
5758 // When generating bytecode, we put all the index expressions as Sint64 on
5759 // the stack.
5760 int64_t Index = ArrayIndices[ArrayIndex];
5761 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
5762 if (!AT)
5763 return false;
5764 CurrentType = AT->getElementType();
5765 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
5766 Result += Index * ElementSize;
5767 ++ArrayIndex;
5768 break;
5769 }
5770 case OffsetOfNode::Base: {
5771 const CXXBaseSpecifier *BaseSpec = Node.getBase();
5772 if (BaseSpec->isVirtual())
5773 return false;
5774
5775 // Find the layout of the class whose base we are looking into.
5776 const auto *RD = CurrentType->getAsCXXRecordDecl();
5777 if (!RD || RD->isInvalidDecl())
5778 return false;
5780
5781 // Find the base class itself.
5782 CurrentType = BaseSpec->getType();
5783 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
5784 if (!BaseRD)
5785 return false;
5786
5787 // Add the offset to the base.
5788 Result += RL.getBaseClassOffset(BaseRD);
5789 break;
5790 }
5792 llvm_unreachable("Dependent OffsetOfExpr?");
5793 }
5794 }
5795
5796 IntResult = Result.getQuantity();
5797
5798 return true;
5799}
5800
5802 const Pointer &Ptr, const APSInt &IntValue) {
5803
5804 const Record *R = Ptr.getRecord();
5805 assert(R);
5806 assert(R->getNumFields() == 1);
5807
5808 unsigned FieldOffset = R->getField(0u)->Offset;
5809 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
5810 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
5811
5812 INT_TYPE_SWITCH(FieldT,
5813 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
5814 FieldPtr.initialize();
5815 return true;
5816}
5817
5818static void zeroAll(Pointer &Dest) {
5819 const Descriptor *Desc = Dest.getFieldDesc();
5820
5821 if (Desc->isPrimitive()) {
5822 TYPE_SWITCH(Desc->getPrimType(), {
5823 Dest.deref<T>().~T();
5824 new (&Dest.deref<T>()) T();
5825 });
5826 return;
5827 }
5828
5829 if (Desc->isRecord()) {
5830 const Record *R = Desc->ElemRecord;
5831 for (const Record::Field &F : R->fields()) {
5832 Pointer FieldPtr = Dest.atField(F.Offset);
5833 zeroAll(FieldPtr);
5834 }
5835 return;
5836 }
5837
5838 if (Desc->isPrimitiveArray()) {
5839 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5840 TYPE_SWITCH(Desc->getPrimType(), {
5841 Dest.deref<T>().~T();
5842 new (&Dest.deref<T>()) T();
5843 });
5844 }
5845 return;
5846 }
5847
5848 if (Desc->isCompositeArray()) {
5849 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5850 Pointer ElemPtr = Dest.atIndex(I).narrow();
5851 zeroAll(ElemPtr);
5852 }
5853 return;
5854 }
5855}
5856
5857static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5858 Pointer &Dest, bool Activate);
5859static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
5860 Pointer &Dest, bool Activate = false) {
5861 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5862 const Descriptor *DestDesc = Dest.getFieldDesc();
5863
5864 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
5865 Pointer DestField = Dest.atField(F.Offset);
5866 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
5867 TYPE_SWITCH(*FT, {
5868 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
5869 if (Src.atField(F.Offset).isInitialized())
5870 DestField.initialize();
5871 if (Activate)
5872 DestField.activate();
5873 });
5874 return true;
5875 }
5876 // Composite field.
5877 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
5878 };
5879
5880 assert(SrcDesc->isRecord());
5881 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
5882 const Record *R = DestDesc->ElemRecord;
5883 for (const Record::Field &F : R->fields()) {
5884 if (R->isUnion()) {
5885 // For unions, only copy the active field. Zero all others.
5886 const Pointer &SrcField = Src.atField(F.Offset);
5887 if (SrcField.isActive()) {
5888 if (!copyField(F, /*Activate=*/true))
5889 return false;
5890 } else {
5891 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
5892 return false;
5893 Pointer DestField = Dest.atField(F.Offset);
5894 zeroAll(DestField);
5895 }
5896 } else {
5897 if (!copyField(F, Activate))
5898 return false;
5899 }
5900 }
5901
5902 for (const Record::Base &B : R->bases()) {
5903 Pointer DestBase = Dest.atField(B.Offset);
5904 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
5905 return false;
5906 }
5907
5908 Dest.initialize();
5909 return true;
5910}
5911
5912static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5913 Pointer &Dest, bool Activate = false) {
5914 assert(Src.isLive() && Dest.isLive());
5915
5916 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5917 const Descriptor *DestDesc = Dest.getFieldDesc();
5918
5919 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
5920
5921 if (DestDesc->isPrimitiveArray()) {
5922 assert(SrcDesc->isPrimitiveArray());
5923 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5924 PrimType ET = DestDesc->getPrimType();
5925 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5926 Pointer DestElem = Dest.atIndex(I);
5927 TYPE_SWITCH(ET, {
5928 DestElem.deref<T>() = Src.elem<T>(I);
5929 DestElem.initialize();
5930 });
5931 }
5932 return true;
5933 }
5934
5935 if (DestDesc->isCompositeArray()) {
5936 assert(SrcDesc->isCompositeArray());
5937 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5938 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5939 const Pointer &SrcElem = Src.atIndex(I).narrow();
5940 Pointer DestElem = Dest.atIndex(I).narrow();
5941 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
5942 return false;
5943 }
5944 return true;
5945 }
5946
5947 if (DestDesc->isRecord())
5948 return copyRecord(S, OpPC, Src, Dest, Activate);
5949 return Invalid(S, OpPC);
5950}
5951
5952bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
5953 return copyComposite(S, OpPC, Src, Dest);
5954}
5955
5956} // namespace interp
5957} // namespace clang
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
Definition Compiler.cpp:24
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
uint8_t GFNIMul(uint8_t AByte, uint8_t BByte)
uint8_t GFNIAffine(uint8_t XByte, const APInt &AQword, const APSInt &Imm, bool Inverse)
TokenType getType() const
Returns the token's type, e.g.
#define X(type, name)
Definition Value.h:97
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition PrimType.h:251
#define INT_TYPE_SWITCH(Expr, B)
Definition PrimType.h:232
#define TYPE_SWITCH(Expr, B)
Definition PrimType.h:211
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
CharUnits & getLValueOffset()
Definition APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition ASTContext.h:792
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:944
CanQualType CharTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
CanQualType HalfTy
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3723
QualType getElementType() const
Definition TypeBase.h:3735
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition Builtins.h:459
Represents a base class of a C++ class.
Definition DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition Type.cpp:255
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
Represents a function declaration or definition.
Definition Decl.h:2000
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
std::optional< llvm::AllocTokenMode > AllocTokenMode
The allocation token mode.
std::optional< uint64_t > AllocTokenMax
Maximum number of allocation tokens (0 = target SIZE_MAX), nullopt if none set (use target SIZE_MAX).
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
Helper class for OffsetOfExpr.
Definition Expr.h:2421
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2867
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8292
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8477
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition TargetInfo.h:858
bool isBigEndian() const
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8274
bool isBooleanType() const
Definition TypeBase.h:9021
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2226
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2274
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isPointerType() const
Definition TypeBase.h:8529
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8935
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2436
bool isVectorType() const
Definition TypeBase.h:8668
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2321
bool isFloatingType() const
Definition Type.cpp:2305
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
QualType getType() const
Definition Decl.h:723
Represents a GCC generic vector type.
Definition TypeBase.h:4176
unsigned getNumElements() const
Definition TypeBase.h:4191
QualType getElementType() const
Definition TypeBase.h:4190
A memory block, either on the stack or in the heap.
Definition InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition InterpBlock.h:73
bool isDynamic() const
Definition InterpBlock.h:83
Wrapper around boolean types.
Definition Boolean.h:25
static Boolean from(T Value)
Definition Boolean.h:97
Pointer into the code segment.
Definition Source.h:30
const LangOptions & getLangOpts() const
Returns the language options.
Definition Context.cpp:330
OptPrimType classify(QualType T) const
Classifies a type.
Definition Context.cpp:364
unsigned getEvalID() const
Definition Context.h:147
Manages dynamic memory allocations done during bytecode interpretation.
bool deallocate(const Expr *Source, const Block *BlockToDelete, InterpState &S)
Deallocate the given source+block combination.
std::optional< Form > getAllocationForm(const Expr *Source) const
Checks whether the allocation done at the given source is an array allocation.
Block * allocate(const Descriptor *D, unsigned EvalID, Form AllocForm)
Allocate ONE element of the given descriptor.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition Floating.h:35
void copy(const APFloat &F)
Definition Floating.h:123
llvm::FPClassTest classify() const
Definition Floating.h:154
bool isSignaling() const
Definition Floating.h:149
bool isNormal() const
Definition Floating.h:152
ComparisonCategoryResult compare(const Floating &RHS) const
Definition Floating.h:157
bool isZero() const
Definition Floating.h:144
bool isNegative() const
Definition Floating.h:143
bool isFinite() const
Definition Floating.h:151
bool isDenormal() const
Definition Floating.h:153
APFloat::fltCategory getCategory() const
Definition Floating.h:155
APFloat getAPFloat() const
Definition Floating.h:64
Base class for stack frames, shared between VM and walker.
Definition Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition IntegralAP.h:36
Frame storing local variables.
Definition InterpFrame.h:27
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition InterpFrame.h:30
SourceInfo getSource(CodePtr PC) const
Map a location to a source.
CodePtr getRetPC() const
Returns the return address of the frame.
SourceLocation getLocation(CodePtr PC) const
SourceRange getRange(CodePtr PC) const
unsigned getDepth() const
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition InterpStack.h:39
void push(Tys &&...Args)
Constructs a value in place on the top of the stack.
Definition InterpStack.h:33
void discard()
Discards the top value from the stack.
Definition InterpStack.h:50
T & peek() const
Returns a reference to the value on the top of the stack.
Definition InterpStack.h:62
Interpreter context.
Definition InterpState.h:43
Expr::EvalStatus & getEvalStatus() const override
Definition InterpState.h:67
Context & getContext() const
DynamicAllocator & getAllocator()
Context & Ctx
Interpreter Context.
Floating allocFloat(const llvm::fltSemantics &Sem)
llvm::SmallVector< const Block * > InitializingBlocks
List of blocks we're currently running either constructors or destructors for.
ASTContext & getASTContext() const override
Definition InterpState.h:70
InterpStack & Stk
Temporary stack.
const VarDecl * EvaluatingDecl
Declaration we're initializing/evaluting, if any.
InterpFrame * Current
The current frame.
T allocAP(unsigned BitWidth)
const LangOptions & getLangOpts() const
Definition InterpState.h:71
StdAllocatorCaller getStdAllocatorCaller(StringRef Name) const
Program & P
Reference to the module containing all bytecode.
PrimType value_or(PrimType PT) const
Definition PrimType.h:68
A pointer to a memory block, live or dead.
Definition Pointer.h:92
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition Pointer.h:189
bool isInitialized() const
Checks if an object was initialized.
Definition Pointer.cpp:447
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition Pointer.h:157
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition Pointer.h:553
int64_t getIndex() const
Returns the index into an array.
Definition Pointer.h:618
bool isActive() const
Checks if the object is active.
Definition Pointer.h:542
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition Pointer.h:174
T & deref() const
Dereferences the pointer, if it's live.
Definition Pointer.h:669
unsigned getNumElems() const
Returns the number of elements.
Definition Pointer.h:602
Pointer getArray() const
Returns the parent array.
Definition Pointer.h:321
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition Pointer.h:421
void activate() const
Activats a field.
Definition Pointer.cpp:570
bool isIntegralPointer() const
Definition Pointer.h:475
QualType getType() const
Returns the type of the innermost field.
Definition Pointer.h:341
bool isArrayElement() const
Checks if the pointer points to an array.
Definition Pointer.h:427
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition Pointer.cpp:546
bool isLive() const
Checks if the pointer is live.
Definition Pointer.h:273
bool inArray() const
Checks if the innermost field is an array.
Definition Pointer.h:403
T & elem(unsigned I) const
Dereferences the element at index I.
Definition Pointer.h:685
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition Pointer.h:312
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition Pointer.cpp:434
bool isZero() const
Checks if the pointer is null.
Definition Pointer.h:259
bool isRoot() const
Pointer points directly to a block.
Definition Pointer.h:443
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition Pointer.h:287
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition Pointer.cpp:646
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition Pointer.cpp:172
bool isOnePastEnd() const
Checks if the index is one past end.
Definition Pointer.h:635
uint64_t getIntegerRepresentation() const
Definition Pointer.h:144
const FieldDecl * getField() const
Returns the field information.
Definition Pointer.h:487
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition Pointer.h:224
bool isBlockPointer() const
Definition Pointer.h:474
const Block * block() const
Definition Pointer.h:608
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition Pointer.h:331
bool isVirtualBaseClass() const
Definition Pointer.h:549
bool isBaseClass() const
Checks if a structure is a base class.
Definition Pointer.h:548
size_t elemSize() const
Returns the element size of the innermost field.
Definition Pointer.h:364
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition Pointer.h:450
Lifetime getLifetime() const
Definition Pointer.h:730
void initialize() const
Initializes a field.
Definition Pointer.cpp:498
bool isField() const
Checks if the item is a field in an object.
Definition Pointer.h:279
const Record * getRecord() const
Returns the record descriptor of a class.
Definition Pointer.h:480
Descriptor * createDescriptor(const DeclTy &D, PrimType T, const Type *SourceTy=nullptr, Descriptor::MetadataSize MDSize=std::nullopt, bool IsConst=false, bool IsTemporary=false, bool IsMutable=false, bool IsVolatile=false)
Creates a descriptor for a primitive type.
Definition Program.h:119
Structure/Class descriptor.
Definition Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition Record.h:57
const Field * getField(unsigned I) const
Definition Record.h:81
llvm::iterator_range< const_base_iter > bases() const
Definition Record.h:86
unsigned getNumFields() const
Definition Record.h:80
llvm::iterator_range< const_field_iter > fields() const
Definition Record.h:76
Describes the statement/declaration an opcode was generated from.
Definition Source.h:74
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId)
Add a note to a prior diagnostic.
Definition State.cpp:63
DiagnosticBuilder report(SourceLocation Loc, diag::kind DiagId)
Directly reports a diagnostic message.
Definition State.cpp:74
OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation could not be folded (FF => FoldFailure)
Definition State.cpp:21
OptionalDiagnostic CCEDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation does not produce a C++11 core constant expression.
Definition State.cpp:42
bool checkingPotentialConstantExpression() const
Are we checking whether the expression is a potential constant expression?
Definition State.h:99
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
static bool isNoopBuiltin(unsigned ID)
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shuffle_generic(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< std::pair< unsigned, int >(unsigned, unsigned)> GetSourceIndex)
static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
static bool interp_builtin_ia32_gfni_affine(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool Inverse)
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_triop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition Interp.cpp:1147
static bool interp__builtin_ia32_shift_with_count(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APInt &, uint64_t)> ShiftOp, llvm::function_ref< APInt(const APInt &, unsigned)> OverflowOp)
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static llvm::RoundingMode getRoundingMode(FPOptions FPO)
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
bool Call(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize)
Definition Interp.cpp:1618
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}...
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static uint64_t popToUInt64(const InterpState &S, const Expr *E)
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition Interp.cpp:621
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_ia32_addsub(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool Activate(InterpState &S, CodePtr OpPC)
Definition Interp.h:1996
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static bool convertDoubleToFloatStrict(APFloat Src, Floating &Dst, InterpState &S, const Expr *DiagExpr)
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition Interp.cpp:823
static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID, bool IsUnsigned)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool isReadable(const Pointer &P)
Check for common reasons a pointer can't be read from, which are usually not diagnosed in a builtin f...
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_ext(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_test_op(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< bool(const APInt &A, const APInt &B)> Fn)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &IntResult)
Interpret an offsetof operation.
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition Interp.cpp:546
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
llvm::APFloat APFloat
Definition Floating.h:27
static void discard(InterpStack &Stk, PrimType T)
static bool interp__builtin_select_scalar(InterpState &S, const CallExpr *Call)
Scalar variant of AVX512 predicated select: Result[i] = (Mask bit 0) ?
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition Interp.cpp:441
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
llvm::APInt APInt
Definition FixedPoint.h:19
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate=false)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_x86_extract_vector_masked(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_cvt_mask2vec(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
PrimType
Enumeration of the primitive types of the VM.
Definition PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B, bool IsUnsigned)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition Interp.cpp:1198
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ia32_cvt_vec2mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, llvm::function_ref< APInt(const APSInt &)> PackFn)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition Interp.cpp:433
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool Error(InterpState &S, CodePtr OpPC)
Do nothing and just abort execution.
Definition Interp.h:3323
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp_builtin_horizontal_fp_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_ia32_pclmulqdq(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_elementwise_triop_fp(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static bool interp__builtin_ia32_vcvtps2ph(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ia32_multishiftqb(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shufbitqmb_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_elementwise_int_unaryop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &)> Fn)
constexpr bool isIntegralType(PrimType T)
Definition PrimType.h:128
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_builtin_horizontal_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_ia32_cvtsd2ss(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool HasRoundingMask)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
llvm::APSInt APSInt
Definition FixedPoint.h:20
static bool interp__builtin_ia32_gfni_mul(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static QualType getElemType(const Pointer &P)
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool MaskZ)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
static bool interp__builtin_ia32_cvtpd2ps(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool IsMasked, bool HasRounding)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ AK_Read
Definition State.h:27
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
SmallVectorImpl< PartialDiagnosticAt > * Diag
Diag - If this is non-null, it will be filled in with a stack of notes indicating why evaluation fail...
Definition Expr.h:633
Track what bits have been initialized to known values and which ones have indeterminate value.
T deref(Bytes Offset) const
Dereferences the value at the given offset.
std::unique_ptr< std::byte[]> Data
A quantity in bits.
A quantity in bytes.
size_t getQuantity() const
Describes a memory block created by an allocation site.
Definition Descriptor.h:121
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition Descriptor.h:263
QualType getElemQualType() const
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition Descriptor.h:147
QualType getType() const
const Decl * asDecl() const
Definition Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition Descriptor.h:143
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition Descriptor.h:254
const VarDecl * asVarDecl() const
Definition Descriptor.h:218
PrimType getPrimType() const
Definition Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition Descriptor.h:152
const Expr * asExpr() const
Definition Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition Descriptor.h:266
Mapping from primitive types to their representation.
Definition PrimType.h:138