clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
9#include "Boolean.h"
10#include "EvalEmitter.h"
12#include "InterpHelpers.h"
13#include "PrimType.h"
14#include "Program.h"
16#include "clang/AST/OSLog.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/Support/AllocToken.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/SipHash.h"
25
26namespace clang {
27namespace interp {
28
29[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
30 switch (ID) {
31 case Builtin::BIas_const:
32 case Builtin::BIforward:
33 case Builtin::BIforward_like:
34 case Builtin::BImove:
35 case Builtin::BImove_if_noexcept:
36 case Builtin::BIaddressof:
37 case Builtin::BI__addressof:
38 case Builtin::BI__builtin_addressof:
39 case Builtin::BI__builtin_launder:
40 return true;
41 default:
42 return false;
43 }
44 return false;
45}
46
47static void discard(InterpStack &Stk, PrimType T) {
48 TYPE_SWITCH(T, { Stk.discard<T>(); });
49}
50
52 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
53}
54
55static APSInt popToAPSInt(InterpState &S, const Expr *E) {
56 return popToAPSInt(S.Stk, *S.getContext().classify(E->getType()));
57}
59 return popToAPSInt(S.Stk, *S.getContext().classify(T));
60}
61
62/// Pushes \p Val on the stack as the type given by \p QT.
63static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
67 assert(T);
68
69 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
70
71 if (T == PT_IntAPS) {
72 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
73 Result.copy(Val);
75 return;
76 }
77
78 if (T == PT_IntAP) {
79 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
80 Result.copy(Val);
82 return;
83 }
84
86 int64_t V = Val.getSExtValue();
87 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
88 } else {
90 uint64_t V = Val.getZExtValue();
91 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
92 }
93}
94
95template <typename T>
96static void pushInteger(InterpState &S, T Val, QualType QT) {
97 if constexpr (std::is_same_v<T, APInt>)
98 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
99 else if constexpr (std::is_same_v<T, APSInt>)
100 pushInteger(S, Val, QT);
101 else
102 pushInteger(S,
103 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
104 std::is_signed_v<T>),
105 !std::is_signed_v<T>),
106 QT);
107}
108
109static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
110 const APSInt &Value) {
111
112 if (ValueT == PT_IntAPS) {
113 Dest.deref<IntegralAP<true>>() =
114 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
115 Dest.deref<IntegralAP<true>>().copy(Value);
116 } else if (ValueT == PT_IntAP) {
117 Dest.deref<IntegralAP<false>>() =
118 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
119 Dest.deref<IntegralAP<false>>().copy(Value);
120 } else {
122 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
123 }
124}
125
126static QualType getElemType(const Pointer &P) {
127 const Descriptor *Desc = P.getFieldDesc();
128 QualType T = Desc->getType();
129 if (Desc->isPrimitive())
130 return T;
131 if (T->isPointerType())
132 return T->getAs<PointerType>()->getPointeeType();
133 if (Desc->isArray())
134 return Desc->getElemQualType();
135 if (const auto *AT = T->getAsArrayTypeUnsafe())
136 return AT->getElementType();
137 return T;
138}
139
141 unsigned ID) {
142 if (!S.diagnosing())
143 return;
144
145 auto Loc = S.Current->getSource(OpPC);
146 if (S.getLangOpts().CPlusPlus11)
147 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
148 << /*isConstexpr=*/0 << /*isConstructor=*/0
150 else
151 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
152}
153
154static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
155 assert(Val.getFieldDesc()->isPrimitiveArray() &&
157 "Not a boolean vector");
158 unsigned NumElems = Val.getNumElems();
159
160 // Each element is one bit, so create an integer with NumElts bits.
161 llvm::APSInt Result(NumElems, 0);
162 for (unsigned I = 0; I != NumElems; ++I) {
163 if (Val.elem<bool>(I))
164 Result.setBit(I);
165 }
166
167 return Result;
168}
169
171 const InterpFrame *Frame,
172 const CallExpr *Call) {
173 unsigned Depth = S.Current->getDepth();
174 auto isStdCall = [](const FunctionDecl *F) -> bool {
175 return F && F->isInStdNamespace() && F->getIdentifier() &&
176 F->getIdentifier()->isStr("is_constant_evaluated");
177 };
178 const InterpFrame *Caller = Frame->Caller;
179 // The current frame is the one for __builtin_is_constant_evaluated.
180 // The one above that, potentially the one for std::is_constant_evaluated().
182 S.getEvalStatus().Diag &&
183 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
184 if (Caller && isStdCall(Frame->getCallee())) {
185 const Expr *E = Caller->getExpr(Caller->getRetPC());
186 S.report(E->getExprLoc(),
187 diag::warn_is_constant_evaluated_always_true_constexpr)
188 << "std::is_constant_evaluated" << E->getSourceRange();
189 } else {
190 S.report(Call->getExprLoc(),
191 diag::warn_is_constant_evaluated_always_true_constexpr)
192 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
193 }
194 }
195
197 return true;
198}
199
200// __builtin_assume(int)
202 const InterpFrame *Frame,
203 const CallExpr *Call) {
204 assert(Call->getNumArgs() == 1);
205 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
206 return true;
207}
208
210 const InterpFrame *Frame,
211 const CallExpr *Call, unsigned ID) {
212 uint64_t Limit = ~static_cast<uint64_t>(0);
213 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
214 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
215 Limit = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
216 .getZExtValue();
217
218 const Pointer &B = S.Stk.pop<Pointer>();
219 const Pointer &A = S.Stk.pop<Pointer>();
220 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
221 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
222 diagnoseNonConstexprBuiltin(S, OpPC, ID);
223
224 if (Limit == 0) {
225 pushInteger(S, 0, Call->getType());
226 return true;
227 }
228
229 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
230 return false;
231
232 if (A.isDummy() || B.isDummy())
233 return false;
234 if (!A.isBlockPointer() || !B.isBlockPointer())
235 return false;
236
237 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
238 ID == Builtin::BI__builtin_wcscmp ||
239 ID == Builtin::BI__builtin_wcsncmp;
240 assert(A.getFieldDesc()->isPrimitiveArray());
241 assert(B.getFieldDesc()->isPrimitiveArray());
242
243 // Different element types shouldn't happen, but with casts they can.
245 return false;
246
247 PrimType ElemT = *S.getContext().classify(getElemType(A));
248
249 auto returnResult = [&](int V) -> bool {
250 pushInteger(S, V, Call->getType());
251 return true;
252 };
253
254 unsigned IndexA = A.getIndex();
255 unsigned IndexB = B.getIndex();
256 uint64_t Steps = 0;
257 for (;; ++IndexA, ++IndexB, ++Steps) {
258
259 if (Steps >= Limit)
260 break;
261 const Pointer &PA = A.atIndex(IndexA);
262 const Pointer &PB = B.atIndex(IndexB);
263 if (!CheckRange(S, OpPC, PA, AK_Read) ||
264 !CheckRange(S, OpPC, PB, AK_Read)) {
265 return false;
266 }
267
268 if (IsWide) {
269 INT_TYPE_SWITCH(ElemT, {
270 T CA = PA.deref<T>();
271 T CB = PB.deref<T>();
272 if (CA > CB)
273 return returnResult(1);
274 if (CA < CB)
275 return returnResult(-1);
276 if (CA.isZero() || CB.isZero())
277 return returnResult(0);
278 });
279 continue;
280 }
281
282 uint8_t CA = PA.deref<uint8_t>();
283 uint8_t CB = PB.deref<uint8_t>();
284
285 if (CA > CB)
286 return returnResult(1);
287 if (CA < CB)
288 return returnResult(-1);
289 if (CA == 0 || CB == 0)
290 return returnResult(0);
291 }
292
293 return returnResult(0);
294}
295
297 const InterpFrame *Frame,
298 const CallExpr *Call, unsigned ID) {
299 const Pointer &StrPtr = S.Stk.pop<Pointer>();
300
301 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
302 diagnoseNonConstexprBuiltin(S, OpPC, ID);
303
304 if (!CheckArray(S, OpPC, StrPtr))
305 return false;
306
307 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
308 return false;
309
310 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
311 return false;
312
313 if (!StrPtr.getFieldDesc()->isPrimitiveArray())
314 return false;
315
316 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
317 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
318
319 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
320 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
321 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
322 }
323
324 size_t Len = 0;
325 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
326 const Pointer &ElemPtr = StrPtr.atIndex(I);
327
328 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
329 return false;
330
331 uint32_t Val;
332 switch (ElemSize) {
333 case 1:
334 Val = ElemPtr.deref<uint8_t>();
335 break;
336 case 2:
337 Val = ElemPtr.deref<uint16_t>();
338 break;
339 case 4:
340 Val = ElemPtr.deref<uint32_t>();
341 break;
342 default:
343 llvm_unreachable("Unsupported char size");
344 }
345 if (Val == 0)
346 break;
347 }
348
349 pushInteger(S, Len, Call->getType());
350
351 return true;
352}
353
355 const InterpFrame *Frame, const CallExpr *Call,
356 bool Signaling) {
357 const Pointer &Arg = S.Stk.pop<Pointer>();
358
359 if (!CheckLoad(S, OpPC, Arg))
360 return false;
361
362 assert(Arg.getFieldDesc()->isPrimitiveArray());
363
364 // Convert the given string to an integer using StringRef's API.
365 llvm::APInt Fill;
366 std::string Str;
367 assert(Arg.getNumElems() >= 1);
368 for (unsigned I = 0;; ++I) {
369 const Pointer &Elem = Arg.atIndex(I);
370
371 if (!CheckLoad(S, OpPC, Elem))
372 return false;
373
374 if (Elem.deref<int8_t>() == 0)
375 break;
376
377 Str += Elem.deref<char>();
378 }
379
380 // Treat empty strings as if they were zero.
381 if (Str.empty())
382 Fill = llvm::APInt(32, 0);
383 else if (StringRef(Str).getAsInteger(0, Fill))
384 return false;
385
386 const llvm::fltSemantics &TargetSemantics =
388 Call->getDirectCallee()->getReturnType());
389
390 Floating Result = S.allocFloat(TargetSemantics);
392 if (Signaling)
393 Result.copy(
394 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
395 else
396 Result.copy(
397 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
398 } else {
399 // Prior to IEEE 754-2008, architectures were allowed to choose whether
400 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
401 // a different encoding to what became a standard in 2008, and for pre-
402 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
403 // sNaN. This is now known as "legacy NaN" encoding.
404 if (Signaling)
405 Result.copy(
406 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
407 else
408 Result.copy(
409 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
410 }
411
413 return true;
414}
415
417 const InterpFrame *Frame,
418 const CallExpr *Call) {
419 const llvm::fltSemantics &TargetSemantics =
421 Call->getDirectCallee()->getReturnType());
422
423 Floating Result = S.allocFloat(TargetSemantics);
424 Result.copy(APFloat::getInf(TargetSemantics));
426 return true;
427}
428
430 const InterpFrame *Frame) {
431 const Floating &Arg2 = S.Stk.pop<Floating>();
432 const Floating &Arg1 = S.Stk.pop<Floating>();
433 Floating Result = S.allocFloat(Arg1.getSemantics());
434
435 APFloat Copy = Arg1.getAPFloat();
436 Copy.copySign(Arg2.getAPFloat());
437 Result.copy(Copy);
439
440 return true;
441}
442
444 const InterpFrame *Frame, bool IsNumBuiltin) {
445 const Floating &RHS = S.Stk.pop<Floating>();
446 const Floating &LHS = S.Stk.pop<Floating>();
447 Floating Result = S.allocFloat(LHS.getSemantics());
448
449 if (IsNumBuiltin)
450 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
451 else
452 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
454 return true;
455}
456
458 const InterpFrame *Frame, bool IsNumBuiltin) {
459 const Floating &RHS = S.Stk.pop<Floating>();
460 const Floating &LHS = S.Stk.pop<Floating>();
461 Floating Result = S.allocFloat(LHS.getSemantics());
462
463 if (IsNumBuiltin)
464 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
465 else
466 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
468 return true;
469}
470
471/// Defined as __builtin_isnan(...), to accommodate the fact that it can
472/// take a float, double, long double, etc.
473/// But for us, that's all a Floating anyway.
475 const InterpFrame *Frame,
476 const CallExpr *Call) {
477 const Floating &Arg = S.Stk.pop<Floating>();
478
479 pushInteger(S, Arg.isNan(), Call->getType());
480 return true;
481}
482
484 const InterpFrame *Frame,
485 const CallExpr *Call) {
486 const Floating &Arg = S.Stk.pop<Floating>();
487
488 pushInteger(S, Arg.isSignaling(), Call->getType());
489 return true;
490}
491
493 const InterpFrame *Frame, bool CheckSign,
494 const CallExpr *Call) {
495 const Floating &Arg = S.Stk.pop<Floating>();
496 APFloat F = Arg.getAPFloat();
497 bool IsInf = F.isInfinity();
498
499 if (CheckSign)
500 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
501 else
502 pushInteger(S, IsInf, Call->getType());
503 return true;
504}
505
507 const InterpFrame *Frame,
508 const CallExpr *Call) {
509 const Floating &Arg = S.Stk.pop<Floating>();
510
511 pushInteger(S, Arg.isFinite(), Call->getType());
512 return true;
513}
514
516 const InterpFrame *Frame,
517 const CallExpr *Call) {
518 const Floating &Arg = S.Stk.pop<Floating>();
519
520 pushInteger(S, Arg.isNormal(), Call->getType());
521 return true;
522}
523
525 const InterpFrame *Frame,
526 const CallExpr *Call) {
527 const Floating &Arg = S.Stk.pop<Floating>();
528
529 pushInteger(S, Arg.isDenormal(), Call->getType());
530 return true;
531}
532
534 const InterpFrame *Frame,
535 const CallExpr *Call) {
536 const Floating &Arg = S.Stk.pop<Floating>();
537
538 pushInteger(S, Arg.isZero(), Call->getType());
539 return true;
540}
541
543 const InterpFrame *Frame,
544 const CallExpr *Call) {
545 const Floating &Arg = S.Stk.pop<Floating>();
546
547 pushInteger(S, Arg.isNegative(), Call->getType());
548 return true;
549}
550
552 const CallExpr *Call, unsigned ID) {
553 const Floating &RHS = S.Stk.pop<Floating>();
554 const Floating &LHS = S.Stk.pop<Floating>();
555
557 S,
558 [&] {
559 switch (ID) {
560 case Builtin::BI__builtin_isgreater:
561 return LHS > RHS;
562 case Builtin::BI__builtin_isgreaterequal:
563 return LHS >= RHS;
564 case Builtin::BI__builtin_isless:
565 return LHS < RHS;
566 case Builtin::BI__builtin_islessequal:
567 return LHS <= RHS;
568 case Builtin::BI__builtin_islessgreater: {
569 ComparisonCategoryResult Cmp = LHS.compare(RHS);
570 return Cmp == ComparisonCategoryResult::Less ||
572 }
573 case Builtin::BI__builtin_isunordered:
575 default:
576 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
577 "comparison function");
578 }
579 }(),
580 Call->getType());
581 return true;
582}
583
584/// First parameter to __builtin_isfpclass is the floating value, the
585/// second one is an integral value.
587 const InterpFrame *Frame,
588 const CallExpr *Call) {
589 APSInt FPClassArg = popToAPSInt(S, Call->getArg(1));
590 const Floating &F = S.Stk.pop<Floating>();
591
592 int32_t Result = static_cast<int32_t>(
593 (F.classify() & std::move(FPClassArg)).getZExtValue());
594 pushInteger(S, Result, Call->getType());
595
596 return true;
597}
598
599/// Five int values followed by one floating value.
600/// __builtin_fpclassify(int, int, int, int, int, float)
602 const InterpFrame *Frame,
603 const CallExpr *Call) {
604 const Floating &Val = S.Stk.pop<Floating>();
605
606 PrimType IntT = *S.getContext().classify(Call->getArg(0));
607 APSInt Values[5];
608 for (unsigned I = 0; I != 5; ++I)
609 Values[4 - I] = popToAPSInt(S.Stk, IntT);
610
611 unsigned Index;
612 switch (Val.getCategory()) {
613 case APFloat::fcNaN:
614 Index = 0;
615 break;
616 case APFloat::fcInfinity:
617 Index = 1;
618 break;
619 case APFloat::fcNormal:
620 Index = Val.isDenormal() ? 3 : 2;
621 break;
622 case APFloat::fcZero:
623 Index = 4;
624 break;
625 }
626
627 // The last argument is first on the stack.
628 assert(Index <= 4);
629
630 pushInteger(S, Values[Index], Call->getType());
631 return true;
632}
633
634static inline Floating abs(InterpState &S, const Floating &In) {
635 if (!In.isNegative())
636 return In;
637
638 Floating Output = S.allocFloat(In.getSemantics());
639 APFloat New = In.getAPFloat();
640 New.changeSign();
641 Output.copy(New);
642 return Output;
643}
644
645// The C standard says "fabs raises no floating-point exceptions,
646// even if x is a signaling NaN. The returned value is independent of
647// the current rounding direction mode." Therefore constant folding can
648// proceed without regard to the floating point settings.
649// Reference, WG14 N2478 F.10.4.3
651 const InterpFrame *Frame) {
652 const Floating &Val = S.Stk.pop<Floating>();
653 S.Stk.push<Floating>(abs(S, Val));
654 return true;
655}
656
658 const InterpFrame *Frame,
659 const CallExpr *Call) {
660 APSInt Val = popToAPSInt(S, Call->getArg(0));
661 if (Val ==
662 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
663 return false;
664 if (Val.isNegative())
665 Val.negate();
666 pushInteger(S, Val, Call->getType());
667 return true;
668}
669
671 const InterpFrame *Frame,
672 const CallExpr *Call) {
673 APSInt Val;
674 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
675 const Pointer &Arg = S.Stk.pop<Pointer>();
676 Val = convertBoolVectorToInt(Arg);
677 } else {
678 Val = popToAPSInt(S, Call->getArg(0));
679 }
680 pushInteger(S, Val.popcount(), Call->getType());
681 return true;
682}
683
685 const InterpFrame *Frame,
686 const CallExpr *Call) {
687 // This is an unevaluated call, so there are no arguments on the stack.
688 assert(Call->getNumArgs() == 1);
689 const Expr *Arg = Call->getArg(0);
690
691 GCCTypeClass ResultClass =
693 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
694 pushInteger(S, ReturnVal, Call->getType());
695 return true;
696}
697
698// __builtin_expect(long, long)
699// __builtin_expect_with_probability(long, long, double)
701 const InterpFrame *Frame,
702 const CallExpr *Call) {
703 // The return value is simply the value of the first parameter.
704 // We ignore the probability.
705 unsigned NumArgs = Call->getNumArgs();
706 assert(NumArgs == 2 || NumArgs == 3);
707
708 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
709 if (NumArgs == 3)
710 S.Stk.discard<Floating>();
711 discard(S.Stk, ArgT);
712
713 APSInt Val = popToAPSInt(S.Stk, ArgT);
714 pushInteger(S, Val, Call->getType());
715 return true;
716}
717
719 const InterpFrame *Frame,
720 const CallExpr *Call) {
721#ifndef NDEBUG
722 assert(Call->getArg(0)->isLValue());
723 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
724 assert(PtrT == PT_Ptr &&
725 "Unsupported pointer type passed to __builtin_addressof()");
726#endif
727 return true;
728}
729
731 const InterpFrame *Frame,
732 const CallExpr *Call) {
733 return Call->getDirectCallee()->isConstexpr();
734}
735
737 const InterpFrame *Frame,
738 const CallExpr *Call) {
739 APSInt Arg = popToAPSInt(S, Call->getArg(0));
740
742 Arg.getZExtValue());
743 pushInteger(S, Result, Call->getType());
744 return true;
745}
746
747// Two integral values followed by a pointer (lhs, rhs, resultOut)
749 const CallExpr *Call,
750 unsigned BuiltinOp) {
751 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
752 if (ResultPtr.isDummy() || !ResultPtr.isBlockPointer())
753 return false;
754
755 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
756 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
757 APSInt RHS = popToAPSInt(S.Stk, RHST);
758 APSInt LHS = popToAPSInt(S.Stk, LHST);
759 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
760 PrimType ResultT = *S.getContext().classify(ResultType);
761 bool Overflow;
762
764 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
765 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
766 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
767 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
769 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
771 uint64_t LHSSize = LHS.getBitWidth();
772 uint64_t RHSSize = RHS.getBitWidth();
773 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
774 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
775
776 // Add an additional bit if the signedness isn't uniformly agreed to. We
777 // could do this ONLY if there is a signed and an unsigned that both have
778 // MaxBits, but the code to check that is pretty nasty. The issue will be
779 // caught in the shrink-to-result later anyway.
780 if (IsSigned && !AllSigned)
781 ++MaxBits;
782
783 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
784 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
785 Result = APSInt(MaxBits, !IsSigned);
786 }
787
788 // Find largest int.
789 switch (BuiltinOp) {
790 default:
791 llvm_unreachable("Invalid value for BuiltinOp");
792 case Builtin::BI__builtin_add_overflow:
793 case Builtin::BI__builtin_sadd_overflow:
794 case Builtin::BI__builtin_saddl_overflow:
795 case Builtin::BI__builtin_saddll_overflow:
796 case Builtin::BI__builtin_uadd_overflow:
797 case Builtin::BI__builtin_uaddl_overflow:
798 case Builtin::BI__builtin_uaddll_overflow:
799 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
800 : LHS.uadd_ov(RHS, Overflow);
801 break;
802 case Builtin::BI__builtin_sub_overflow:
803 case Builtin::BI__builtin_ssub_overflow:
804 case Builtin::BI__builtin_ssubl_overflow:
805 case Builtin::BI__builtin_ssubll_overflow:
806 case Builtin::BI__builtin_usub_overflow:
807 case Builtin::BI__builtin_usubl_overflow:
808 case Builtin::BI__builtin_usubll_overflow:
809 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
810 : LHS.usub_ov(RHS, Overflow);
811 break;
812 case Builtin::BI__builtin_mul_overflow:
813 case Builtin::BI__builtin_smul_overflow:
814 case Builtin::BI__builtin_smull_overflow:
815 case Builtin::BI__builtin_smulll_overflow:
816 case Builtin::BI__builtin_umul_overflow:
817 case Builtin::BI__builtin_umull_overflow:
818 case Builtin::BI__builtin_umulll_overflow:
819 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
820 : LHS.umul_ov(RHS, Overflow);
821 break;
822 }
823
824 // In the case where multiple sizes are allowed, truncate and see if
825 // the values are the same.
826 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
827 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
828 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
829 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
830 // since it will give us the behavior of a TruncOrSelf in the case where
831 // its parameter <= its size. We previously set Result to be at least the
832 // type-size of the result, so getTypeSize(ResultType) <= Resu
833 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
834 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
835
836 if (!APSInt::isSameValue(Temp, Result))
837 Overflow = true;
838 Result = std::move(Temp);
839 }
840
841 // Write Result to ResultPtr and put Overflow on the stack.
842 assignInteger(S, ResultPtr, ResultT, Result);
843 if (ResultPtr.canBeInitialized())
844 ResultPtr.initialize();
845
846 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
847 S.Stk.push<Boolean>(Overflow);
848 return true;
849}
850
851/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
853 const InterpFrame *Frame,
854 const CallExpr *Call, unsigned BuiltinOp) {
855 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
856 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
857 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
858 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
859 APSInt RHS = popToAPSInt(S.Stk, RHST);
860 APSInt LHS = popToAPSInt(S.Stk, LHST);
861
862 if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer())
863 return false;
864
865 APSInt CarryOut;
866
868 // Copy the number of bits and sign.
869 Result = LHS;
870 CarryOut = LHS;
871
872 bool FirstOverflowed = false;
873 bool SecondOverflowed = false;
874 switch (BuiltinOp) {
875 default:
876 llvm_unreachable("Invalid value for BuiltinOp");
877 case Builtin::BI__builtin_addcb:
878 case Builtin::BI__builtin_addcs:
879 case Builtin::BI__builtin_addc:
880 case Builtin::BI__builtin_addcl:
881 case Builtin::BI__builtin_addcll:
882 Result =
883 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
884 break;
885 case Builtin::BI__builtin_subcb:
886 case Builtin::BI__builtin_subcs:
887 case Builtin::BI__builtin_subc:
888 case Builtin::BI__builtin_subcl:
889 case Builtin::BI__builtin_subcll:
890 Result =
891 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
892 break;
893 }
894 // It is possible for both overflows to happen but CGBuiltin uses an OR so
895 // this is consistent.
896 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
897
898 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
899 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
900 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
901 CarryOutPtr.initialize();
902
903 assert(Call->getType() == Call->getArg(0)->getType());
904 pushInteger(S, Result, Call->getType());
905 return true;
906}
907
909 const InterpFrame *Frame, const CallExpr *Call,
910 unsigned BuiltinOp) {
911
912 std::optional<APSInt> Fallback;
913 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2)
914 Fallback = popToAPSInt(S, Call->getArg(1));
915
916 APSInt Val;
917 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
918 const Pointer &Arg = S.Stk.pop<Pointer>();
919 Val = convertBoolVectorToInt(Arg);
920 } else {
921 Val = popToAPSInt(S, Call->getArg(0));
922 }
923
924 // When the argument is 0, the result of GCC builtins is undefined, whereas
925 // for Microsoft intrinsics, the result is the bit-width of the argument.
926 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
927 BuiltinOp != Builtin::BI__lzcnt &&
928 BuiltinOp != Builtin::BI__lzcnt64;
929
930 if (Val == 0) {
931 if (Fallback) {
932 pushInteger(S, *Fallback, Call->getType());
933 return true;
934 }
935
936 if (ZeroIsUndefined)
937 return false;
938 }
939
940 pushInteger(S, Val.countl_zero(), Call->getType());
941 return true;
942}
943
945 const InterpFrame *Frame, const CallExpr *Call,
946 unsigned BuiltinID) {
947 std::optional<APSInt> Fallback;
948 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2)
949 Fallback = popToAPSInt(S, Call->getArg(1));
950
951 APSInt Val;
952 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
953 const Pointer &Arg = S.Stk.pop<Pointer>();
954 Val = convertBoolVectorToInt(Arg);
955 } else {
956 Val = popToAPSInt(S, Call->getArg(0));
957 }
958
959 if (Val == 0) {
960 if (Fallback) {
961 pushInteger(S, *Fallback, Call->getType());
962 return true;
963 }
964 return false;
965 }
966
967 pushInteger(S, Val.countr_zero(), Call->getType());
968 return true;
969}
970
972 const InterpFrame *Frame,
973 const CallExpr *Call) {
974 const APSInt &Val = popToAPSInt(S, Call->getArg(0));
975 assert(Val.getActiveBits() <= 64);
976
977 pushInteger(S, Val.byteSwap(), Call->getType());
978 return true;
979}
980
981/// bool __atomic_always_lock_free(size_t, void const volatile*)
982/// bool __atomic_is_lock_free(size_t, void const volatile*)
984 const InterpFrame *Frame,
985 const CallExpr *Call,
986 unsigned BuiltinOp) {
987 auto returnBool = [&S](bool Value) -> bool {
988 S.Stk.push<Boolean>(Value);
989 return true;
990 };
991
992 const Pointer &Ptr = S.Stk.pop<Pointer>();
993 const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
994
995 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
996 // of two less than or equal to the maximum inline atomic width, we know it
997 // is lock-free. If the size isn't a power of two, or greater than the
998 // maximum alignment where we promote atomics, we know it is not lock-free
999 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1000 // the answer can only be determined at runtime; for example, 16-byte
1001 // atomics have lock-free implementations on some, but not all,
1002 // x86-64 processors.
1003
1004 // Check power-of-two.
1005 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1006 if (Size.isPowerOfTwo()) {
1007 // Check against inlining width.
1008 unsigned InlineWidthBits =
1010 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1011
1012 // OK, we will inline appropriately-aligned operations of this size,
1013 // and _Atomic(T) is appropriately-aligned.
1014 if (Size == CharUnits::One())
1015 return returnBool(true);
1016
1017 // Same for null pointers.
1018 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1019 if (Ptr.isZero())
1020 return returnBool(true);
1021
1022 if (Ptr.isIntegralPointer()) {
1023 uint64_t IntVal = Ptr.getIntegerRepresentation();
1024 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1025 return returnBool(true);
1026 }
1027
1028 const Expr *PtrArg = Call->getArg(1);
1029 // Otherwise, check if the type's alignment against Size.
1030 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1031 // Drop the potential implicit-cast to 'const volatile void*', getting
1032 // the underlying type.
1033 if (ICE->getCastKind() == CK_BitCast)
1034 PtrArg = ICE->getSubExpr();
1035 }
1036
1037 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1038 QualType PointeeType = PtrTy->getPointeeType();
1039 if (!PointeeType->isIncompleteType() &&
1040 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1041 // OK, we will inline operations on this object.
1042 return returnBool(true);
1043 }
1044 }
1045 }
1046 }
1047
1048 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1049 return returnBool(false);
1050
1051 return false;
1052}
1053
1054/// bool __c11_atomic_is_lock_free(size_t)
1056 CodePtr OpPC,
1057 const InterpFrame *Frame,
1058 const CallExpr *Call) {
1059 const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
1060
1061 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1062 if (Size.isPowerOfTwo()) {
1063 // Check against inlining width.
1064 unsigned InlineWidthBits =
1066 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1067 S.Stk.push<Boolean>(true);
1068 return true;
1069 }
1070 }
1071
1072 return false; // returnBool(false);
1073}
1074
1075/// __builtin_complex(Float A, float B);
1077 const InterpFrame *Frame,
1078 const CallExpr *Call) {
1079 const Floating &Arg2 = S.Stk.pop<Floating>();
1080 const Floating &Arg1 = S.Stk.pop<Floating>();
1081 Pointer &Result = S.Stk.peek<Pointer>();
1082
1083 Result.elem<Floating>(0) = Arg1;
1084 Result.elem<Floating>(1) = Arg2;
1085 Result.initializeAllElements();
1086
1087 return true;
1088}
1089
1090/// __builtin_is_aligned()
1091/// __builtin_align_up()
1092/// __builtin_align_down()
1093/// The first parameter is either an integer or a pointer.
1094/// The second parameter is the requested alignment as an integer.
1096 const InterpFrame *Frame,
1097 const CallExpr *Call,
1098 unsigned BuiltinOp) {
1099 const APSInt &Alignment = popToAPSInt(S, Call->getArg(1));
1100
1101 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1102 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1103 return false;
1104 }
1105 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1106 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1107 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1108 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1109 << MaxValue << Call->getArg(0)->getType() << Alignment;
1110 return false;
1111 }
1112
1113 // The first parameter is either an integer or a pointer.
1114 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1115
1116 if (isIntegralType(FirstArgT)) {
1117 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1118 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1119 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1120 APSInt AlignedVal =
1121 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1122 pushInteger(S, AlignedVal, Call->getType());
1123 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1124 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1125 pushInteger(S, AlignedVal, Call->getType());
1126 } else {
1127 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1128 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1129 }
1130 return true;
1131 }
1132 assert(FirstArgT == PT_Ptr);
1133 const Pointer &Ptr = S.Stk.pop<Pointer>();
1134 if (!Ptr.isBlockPointer())
1135 return false;
1136
1137 unsigned PtrOffset = Ptr.getIndex();
1138 CharUnits BaseAlignment =
1140 CharUnits PtrAlign =
1141 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1142
1143 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1144 if (PtrAlign.getQuantity() >= Alignment) {
1145 S.Stk.push<Boolean>(true);
1146 return true;
1147 }
1148 // If the alignment is not known to be sufficient, some cases could still
1149 // be aligned at run time. However, if the requested alignment is less or
1150 // equal to the base alignment and the offset is not aligned, we know that
1151 // the run-time value can never be aligned.
1152 if (BaseAlignment.getQuantity() >= Alignment &&
1153 PtrAlign.getQuantity() < Alignment) {
1154 S.Stk.push<Boolean>(false);
1155 return true;
1156 }
1157
1158 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1159 << Alignment;
1160 return false;
1161 }
1162
1163 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1164 BuiltinOp == Builtin::BI__builtin_align_up);
1165
1166 // For align_up/align_down, we can return the same value if the alignment
1167 // is known to be greater or equal to the requested value.
1168 if (PtrAlign.getQuantity() >= Alignment) {
1169 S.Stk.push<Pointer>(Ptr);
1170 return true;
1171 }
1172
1173 // The alignment could be greater than the minimum at run-time, so we cannot
1174 // infer much about the resulting pointer value. One case is possible:
1175 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1176 // can infer the correct index if the requested alignment is smaller than
1177 // the base alignment so we can perform the computation on the offset.
1178 if (BaseAlignment.getQuantity() >= Alignment) {
1179 assert(Alignment.getBitWidth() <= 64 &&
1180 "Cannot handle > 64-bit address-space");
1181 uint64_t Alignment64 = Alignment.getZExtValue();
1182 CharUnits NewOffset =
1183 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1184 ? llvm::alignDown(PtrOffset, Alignment64)
1185 : llvm::alignTo(PtrOffset, Alignment64));
1186
1187 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1188 return true;
1189 }
1190
1191 // Otherwise, we cannot constant-evaluate the result.
1192 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1193 return false;
1194}
1195
1196/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1198 const InterpFrame *Frame,
1199 const CallExpr *Call) {
1200 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1201
1202 std::optional<APSInt> ExtraOffset;
1203 if (Call->getNumArgs() == 3)
1204 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1205
1206 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1207 const Pointer &Ptr = S.Stk.pop<Pointer>();
1208
1209 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1210
1211 // If there is a base object, then it must have the correct alignment.
1212 if (Ptr.isBlockPointer()) {
1213 CharUnits BaseAlignment;
1214 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1215 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1216 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1217 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1218
1219 if (BaseAlignment < Align) {
1220 S.CCEDiag(Call->getArg(0),
1221 diag::note_constexpr_baa_insufficient_alignment)
1222 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1223 return false;
1224 }
1225 }
1226
1227 APValue AV = Ptr.toAPValue(S.getASTContext());
1228 CharUnits AVOffset = AV.getLValueOffset();
1229 if (ExtraOffset)
1230 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1231 if (AVOffset.alignTo(Align) != AVOffset) {
1232 if (Ptr.isBlockPointer())
1233 S.CCEDiag(Call->getArg(0),
1234 diag::note_constexpr_baa_insufficient_alignment)
1235 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1236 else
1237 S.CCEDiag(Call->getArg(0),
1238 diag::note_constexpr_baa_value_insufficient_alignment)
1239 << AVOffset.getQuantity() << Align.getQuantity();
1240 return false;
1241 }
1242
1243 S.Stk.push<Pointer>(Ptr);
1244 return true;
1245}
1246
1247/// (CarryIn, LHS, RHS, Result)
1249 CodePtr OpPC,
1250 const InterpFrame *Frame,
1251 const CallExpr *Call,
1252 unsigned BuiltinOp) {
1253 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1254 !Call->getArg(1)->getType()->isIntegerType() ||
1255 !Call->getArg(2)->getType()->isIntegerType())
1256 return false;
1257
1258 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1259
1260 APSInt RHS = popToAPSInt(S, Call->getArg(2));
1261 APSInt LHS = popToAPSInt(S, Call->getArg(1));
1262 APSInt CarryIn = popToAPSInt(S, Call->getArg(0));
1263
1264 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1265 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1266
1267 unsigned BitWidth = LHS.getBitWidth();
1268 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1269 APInt ExResult =
1270 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1271 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1272
1273 APInt Result = ExResult.extractBits(BitWidth, 0);
1274 APSInt CarryOut =
1275 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1276
1277 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1278 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1279 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1280
1281 pushInteger(S, CarryOut, Call->getType());
1282
1283 return true;
1284}
1285
1287 CodePtr OpPC,
1288 const InterpFrame *Frame,
1289 const CallExpr *Call) {
1292 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1293 return true;
1294}
1295
1296static bool
1298 const InterpFrame *Frame,
1299 const CallExpr *Call) {
1300 const auto &Ptr = S.Stk.pop<Pointer>();
1301 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1302
1303 // This should be created for a StringLiteral, so should alway shold at least
1304 // one array element.
1305 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1306 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1307 uint64_t Result = getPointerAuthStableSipHash(R);
1308 pushInteger(S, Result, Call->getType());
1309 return true;
1310}
1311
1313 const InterpFrame *Frame,
1314 const CallExpr *Call) {
1315 const ASTContext &ASTCtx = S.getASTContext();
1316 uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
1317 auto Mode =
1318 ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
1319 uint64_t MaxTokens =
1320 ASTCtx.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth));
1321
1322 // We do not read any of the arguments; discard them.
1323 for (int I = Call->getNumArgs() - 1; I >= 0; --I)
1324 discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
1325
1326 // Note: Type inference from a surrounding cast is not supported in
1327 // constexpr evaluation.
1328 QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
1329 if (AllocType.isNull()) {
1330 S.CCEDiag(Call,
1331 diag::note_constexpr_infer_alloc_token_type_inference_failed);
1332 return false;
1333 }
1334
1335 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
1336 if (!ATMD) {
1337 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
1338 return false;
1339 }
1340
1341 auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
1342 if (!MaybeToken) {
1343 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
1344 return false;
1345 }
1346
1347 pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
1348 return true;
1349}
1350
1352 const InterpFrame *Frame,
1353 const CallExpr *Call) {
1354 // A call to __operator_new is only valid within std::allocate<>::allocate.
1355 // Walk up the call stack to find the appropriate caller and get the
1356 // element type from it.
1357 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1358
1359 if (ElemType.isNull()) {
1360 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1361 ? diag::note_constexpr_new_untyped
1362 : diag::note_constexpr_new);
1363 return false;
1364 }
1365 assert(NewCall);
1366
1367 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1368 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1369 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1370 return false;
1371 }
1372
1373 // We only care about the first parameter (the size), so discard all the
1374 // others.
1375 {
1376 unsigned NumArgs = Call->getNumArgs();
1377 assert(NumArgs >= 1);
1378
1379 // The std::nothrow_t arg never gets put on the stack.
1380 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1381 --NumArgs;
1382 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1383 // First arg is needed.
1384 Args = Args.drop_front();
1385
1386 // Discard the rest.
1387 for (const Expr *Arg : Args)
1388 discard(S.Stk, *S.getContext().classify(Arg));
1389 }
1390
1391 APSInt Bytes = popToAPSInt(S, Call->getArg(0));
1392 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1393 assert(!ElemSize.isZero());
1394 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1395 // elements we should allocate.
1396 APInt NumElems, Remainder;
1397 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1398 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1399 if (Remainder != 0) {
1400 // This likely indicates a bug in the implementation of 'std::allocator'.
1401 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1402 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1403 return false;
1404 }
1405
1406 // NB: The same check we're using in CheckArraySize()
1407 if (NumElems.getActiveBits() >
1409 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1410 // FIXME: NoThrow check?
1411 const SourceInfo &Loc = S.Current->getSource(OpPC);
1412 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1413 << NumElems.getZExtValue();
1414 return false;
1415 }
1416
1417 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1418 return false;
1419
1420 bool IsArray = NumElems.ugt(1);
1421 OptPrimType ElemT = S.getContext().classify(ElemType);
1422 DynamicAllocator &Allocator = S.getAllocator();
1423 if (ElemT) {
1424 Block *B =
1425 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1427 assert(B);
1428 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1429 return true;
1430 }
1431
1432 assert(!ElemT);
1433
1434 // Composite arrays
1435 if (IsArray) {
1436 const Descriptor *Desc =
1437 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1438 Block *B =
1439 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1441 assert(B);
1442 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1443 return true;
1444 }
1445
1446 // Records. Still allocate them as single-element arrays.
1448 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1449
1450 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1452 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1454 assert(B);
1455 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1456 return true;
1457}
1458
1460 const InterpFrame *Frame,
1461 const CallExpr *Call) {
1462 const Expr *Source = nullptr;
1463 const Block *BlockToDelete = nullptr;
1464
1466 S.Stk.discard<Pointer>();
1467 return false;
1468 }
1469
1470 // This is permitted only within a call to std::allocator<T>::deallocate.
1471 if (!S.getStdAllocatorCaller("deallocate")) {
1472 S.FFDiag(Call);
1473 S.Stk.discard<Pointer>();
1474 return true;
1475 }
1476
1477 {
1478 const Pointer &Ptr = S.Stk.pop<Pointer>();
1479
1480 if (Ptr.isZero()) {
1481 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1482 return true;
1483 }
1484
1485 Source = Ptr.getDeclDesc()->asExpr();
1486 BlockToDelete = Ptr.block();
1487
1488 if (!BlockToDelete->isDynamic()) {
1489 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1491 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1492 S.Note(D->getLocation(), diag::note_declared_at);
1493 }
1494 }
1495 assert(BlockToDelete);
1496
1497 DynamicAllocator &Allocator = S.getAllocator();
1498 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1499 std::optional<DynamicAllocator::Form> AllocForm =
1500 Allocator.getAllocationForm(Source);
1501
1502 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1503 // Nothing has been deallocated, this must be a double-delete.
1504 const SourceInfo &Loc = S.Current->getSource(OpPC);
1505 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1506 return false;
1507 }
1508 assert(AllocForm);
1509
1510 return CheckNewDeleteForms(
1511 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1512}
1513
1515 const InterpFrame *Frame,
1516 const CallExpr *Call) {
1517 const Floating &Arg0 = S.Stk.pop<Floating>();
1518 S.Stk.push<Floating>(Arg0);
1519 return true;
1520}
1521
1523 const CallExpr *Call, unsigned ID) {
1524 const Pointer &Arg = S.Stk.pop<Pointer>();
1525 assert(Arg.getFieldDesc()->isPrimitiveArray());
1526
1527 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1528 assert(Call->getType() == ElemType);
1529 PrimType ElemT = *S.getContext().classify(ElemType);
1530 unsigned NumElems = Arg.getNumElems();
1531
1533 T Result = Arg.elem<T>(0);
1534 unsigned BitWidth = Result.bitWidth();
1535 for (unsigned I = 1; I != NumElems; ++I) {
1536 T Elem = Arg.elem<T>(I);
1537 T PrevResult = Result;
1538
1539 if (ID == Builtin::BI__builtin_reduce_add) {
1540 if (T::add(Result, Elem, BitWidth, &Result)) {
1541 unsigned OverflowBits = BitWidth + 1;
1542 (void)handleOverflow(S, OpPC,
1543 (PrevResult.toAPSInt(OverflowBits) +
1544 Elem.toAPSInt(OverflowBits)));
1545 return false;
1546 }
1547 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1548 if (T::mul(Result, Elem, BitWidth, &Result)) {
1549 unsigned OverflowBits = BitWidth * 2;
1550 (void)handleOverflow(S, OpPC,
1551 (PrevResult.toAPSInt(OverflowBits) *
1552 Elem.toAPSInt(OverflowBits)));
1553 return false;
1554 }
1555
1556 } else if (ID == Builtin::BI__builtin_reduce_and) {
1557 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1558 } else if (ID == Builtin::BI__builtin_reduce_or) {
1559 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1560 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1561 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1562 } else if (ID == Builtin::BI__builtin_reduce_min) {
1563 if (Elem < Result)
1564 Result = Elem;
1565 } else if (ID == Builtin::BI__builtin_reduce_max) {
1566 if (Elem > Result)
1567 Result = Elem;
1568 } else {
1569 llvm_unreachable("Unhandled vector reduce builtin");
1570 }
1571 }
1572 pushInteger(S, Result.toAPSInt(), Call->getType());
1573 });
1574
1575 return true;
1576}
1577
1579 const InterpFrame *Frame,
1580 const CallExpr *Call,
1581 unsigned BuiltinID) {
1582 assert(Call->getNumArgs() == 1);
1583 QualType Ty = Call->getArg(0)->getType();
1584 if (Ty->isIntegerType()) {
1585 APSInt Val = popToAPSInt(S, Call->getArg(0));
1586 pushInteger(S, Val.abs(), Call->getType());
1587 return true;
1588 }
1589
1590 if (Ty->isFloatingType()) {
1591 Floating Val = S.Stk.pop<Floating>();
1592 Floating Result = abs(S, Val);
1593 S.Stk.push<Floating>(Result);
1594 return true;
1595 }
1596
1597 // Otherwise, the argument must be a vector.
1598 assert(Call->getArg(0)->getType()->isVectorType());
1599 const Pointer &Arg = S.Stk.pop<Pointer>();
1600 assert(Arg.getFieldDesc()->isPrimitiveArray());
1601 const Pointer &Dst = S.Stk.peek<Pointer>();
1602 assert(Dst.getFieldDesc()->isPrimitiveArray());
1603 assert(Arg.getFieldDesc()->getNumElems() ==
1604 Dst.getFieldDesc()->getNumElems());
1605
1606 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1607 PrimType ElemT = *S.getContext().classify(ElemType);
1608 unsigned NumElems = Arg.getNumElems();
1609 // we can either have a vector of integer or a vector of floating point
1610 for (unsigned I = 0; I != NumElems; ++I) {
1611 if (ElemType->isIntegerType()) {
1613 Dst.elem<T>(I) = T::from(static_cast<T>(
1614 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1616 });
1617 } else {
1618 Floating Val = Arg.elem<Floating>(I);
1619 Dst.elem<Floating>(I) = abs(S, Val);
1620 }
1621 }
1623
1624 return true;
1625}
1626
1627/// Can be called with an integer or vector as the first and only parameter.
1629 const InterpFrame *Frame,
1630 const CallExpr *Call,
1631 unsigned BuiltinID) {
1632 assert(Call->getNumArgs() == 1);
1633 if (Call->getArg(0)->getType()->isIntegerType()) {
1634 APSInt Val = popToAPSInt(S, Call->getArg(0));
1635
1636 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1637 pushInteger(S, Val.popcount(), Call->getType());
1638 } else {
1639 pushInteger(S, Val.reverseBits(), Call->getType());
1640 }
1641 return true;
1642 }
1643 // Otherwise, the argument must be a vector.
1644 assert(Call->getArg(0)->getType()->isVectorType());
1645 const Pointer &Arg = S.Stk.pop<Pointer>();
1646 assert(Arg.getFieldDesc()->isPrimitiveArray());
1647 const Pointer &Dst = S.Stk.peek<Pointer>();
1648 assert(Dst.getFieldDesc()->isPrimitiveArray());
1649 assert(Arg.getFieldDesc()->getNumElems() ==
1650 Dst.getFieldDesc()->getNumElems());
1651
1652 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1653 PrimType ElemT = *S.getContext().classify(ElemType);
1654 unsigned NumElems = Arg.getNumElems();
1655
1656 // FIXME: Reading from uninitialized vector elements?
1657 for (unsigned I = 0; I != NumElems; ++I) {
1659 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1660 Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount());
1661 } else {
1662 Dst.elem<T>(I) =
1663 T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
1664 }
1665 });
1666 }
1668
1669 return true;
1670}
1671
1672/// Can be called with an integer or vector as the first and only parameter.
1674 CodePtr OpPC,
1675 const InterpFrame *Frame,
1676 const CallExpr *Call,
1677 unsigned BuiltinID) {
1678 bool HasZeroArg = Call->getNumArgs() == 2;
1679 bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg;
1680 assert(Call->getNumArgs() == 1 || HasZeroArg);
1681 if (Call->getArg(0)->getType()->isIntegerType()) {
1682 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1683 APSInt Val = popToAPSInt(S.Stk, ArgT);
1684 std::optional<APSInt> ZeroVal;
1685 if (HasZeroArg) {
1686 ZeroVal = Val;
1687 Val = popToAPSInt(S.Stk, ArgT);
1688 }
1689
1690 if (Val.isZero()) {
1691 if (ZeroVal) {
1692 pushInteger(S, *ZeroVal, Call->getType());
1693 return true;
1694 }
1695 // If we haven't been provided the second argument, the result is
1696 // undefined
1697 S.FFDiag(S.Current->getSource(OpPC),
1698 diag::note_constexpr_countzeroes_zero)
1699 << /*IsTrailing=*/IsCTTZ;
1700 return false;
1701 }
1702
1703 if (BuiltinID == Builtin::BI__builtin_elementwise_clzg) {
1704 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1705 } else {
1706 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1707 }
1708 return true;
1709 }
1710 // Otherwise, the argument must be a vector.
1711 const ASTContext &ASTCtx = S.getASTContext();
1712 Pointer ZeroArg;
1713 if (HasZeroArg) {
1714 assert(Call->getArg(1)->getType()->isVectorType() &&
1715 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1716 Call->getArg(1)->getType()));
1717 (void)ASTCtx;
1718 ZeroArg = S.Stk.pop<Pointer>();
1719 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1720 }
1721 assert(Call->getArg(0)->getType()->isVectorType());
1722 const Pointer &Arg = S.Stk.pop<Pointer>();
1723 assert(Arg.getFieldDesc()->isPrimitiveArray());
1724 const Pointer &Dst = S.Stk.peek<Pointer>();
1725 assert(Dst.getFieldDesc()->isPrimitiveArray());
1726 assert(Arg.getFieldDesc()->getNumElems() ==
1727 Dst.getFieldDesc()->getNumElems());
1728
1729 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1730 PrimType ElemT = *S.getContext().classify(ElemType);
1731 unsigned NumElems = Arg.getNumElems();
1732
1733 // FIXME: Reading from uninitialized vector elements?
1734 for (unsigned I = 0; I != NumElems; ++I) {
1736 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1737 if (EltVal.isZero()) {
1738 if (HasZeroArg) {
1739 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1740 } else {
1741 // If we haven't been provided the second argument, the result is
1742 // undefined
1743 S.FFDiag(S.Current->getSource(OpPC),
1744 diag::note_constexpr_countzeroes_zero)
1745 << /*IsTrailing=*/IsCTTZ;
1746 return false;
1747 }
1748 } else if (IsCTTZ) {
1749 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1750 } else {
1751 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1752 }
1753 Dst.atIndex(I).initialize();
1754 });
1755 }
1756
1757 return true;
1758}
1759
1761 const InterpFrame *Frame,
1762 const CallExpr *Call, unsigned ID) {
1763 assert(Call->getNumArgs() == 3);
1764 const ASTContext &ASTCtx = S.getASTContext();
1765 APSInt Size = popToAPSInt(S, Call->getArg(2));
1766 const Pointer SrcPtr = S.Stk.pop<Pointer>();
1767 const Pointer DestPtr = S.Stk.pop<Pointer>();
1768
1769 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1770
1771 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1772 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1773
1774 bool Move =
1775 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1776 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1777 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1778 ID == Builtin::BI__builtin_wmemcpy ||
1779 ID == Builtin::BI__builtin_wmemmove;
1780
1781 // If the size is zero, we treat this as always being a valid no-op.
1782 if (Size.isZero()) {
1783 S.Stk.push<Pointer>(DestPtr);
1784 return true;
1785 }
1786
1787 if (SrcPtr.isZero() || DestPtr.isZero()) {
1788 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1789 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1790 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1791 << DiagPtr.toDiagnosticString(ASTCtx);
1792 return false;
1793 }
1794
1795 // Diagnose integral src/dest pointers specially.
1796 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1797 std::string DiagVal = "(void *)";
1798 DiagVal += SrcPtr.isIntegralPointer()
1799 ? std::to_string(SrcPtr.getIntegerRepresentation())
1800 : std::to_string(DestPtr.getIntegerRepresentation());
1801 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1802 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1803 return false;
1804 }
1805
1806 // Can't read from dummy pointers.
1807 if (DestPtr.isDummy() || SrcPtr.isDummy())
1808 return false;
1809
1810 if (DestPtr.getType()->isIncompleteType()) {
1811 S.FFDiag(S.Current->getSource(OpPC),
1812 diag::note_constexpr_memcpy_incomplete_type)
1813 << Move << DestPtr.getType();
1814 return false;
1815 }
1816 if (SrcPtr.getType()->isIncompleteType()) {
1817 S.FFDiag(S.Current->getSource(OpPC),
1818 diag::note_constexpr_memcpy_incomplete_type)
1819 << Move << SrcPtr.getType();
1820 return false;
1821 }
1822
1823 QualType DestElemType = getElemType(DestPtr);
1824 if (DestElemType->isIncompleteType()) {
1825 S.FFDiag(S.Current->getSource(OpPC),
1826 diag::note_constexpr_memcpy_incomplete_type)
1827 << Move << DestElemType;
1828 return false;
1829 }
1830
1831 size_t RemainingDestElems;
1832 if (DestPtr.getFieldDesc()->isArray()) {
1833 RemainingDestElems = DestPtr.isUnknownSizeArray()
1834 ? 0
1835 : (DestPtr.getNumElems() - DestPtr.getIndex());
1836 } else {
1837 RemainingDestElems = 1;
1838 }
1839 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1840
1841 if (WChar) {
1842 uint64_t WCharSize =
1843 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1844 Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
1845 /*IsUnsigend=*/true);
1846 }
1847
1848 if (Size.urem(DestElemSize) != 0) {
1849 S.FFDiag(S.Current->getSource(OpPC),
1850 diag::note_constexpr_memcpy_unsupported)
1851 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1852 return false;
1853 }
1854
1855 QualType SrcElemType = getElemType(SrcPtr);
1856 size_t RemainingSrcElems;
1857 if (SrcPtr.getFieldDesc()->isArray()) {
1858 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1859 ? 0
1860 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1861 } else {
1862 RemainingSrcElems = 1;
1863 }
1864 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1865
1866 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1867 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1868 << Move << SrcElemType << DestElemType;
1869 return false;
1870 }
1871
1872 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
1873 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1874 << Move << DestElemType;
1875 return false;
1876 }
1877
1878 // Check if we have enough elements to read from and write to.
1879 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1880 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1881 if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
1882 APInt N = Size.udiv(DestElemSize);
1883 S.FFDiag(S.Current->getSource(OpPC),
1884 diag::note_constexpr_memcpy_unsupported)
1885 << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
1886 << DestElemType << toString(N, 10, /*Signed=*/false);
1887 return false;
1888 }
1889
1890 // Check for overlapping memory regions.
1891 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1892 // Remove base casts.
1893 Pointer SrcP = SrcPtr;
1894 while (SrcP.isBaseClass())
1895 SrcP = SrcP.getBase();
1896
1897 Pointer DestP = DestPtr;
1898 while (DestP.isBaseClass())
1899 DestP = DestP.getBase();
1900
1901 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1902 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1903 unsigned N = Size.getZExtValue();
1904
1905 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1906 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1907 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1908 << /*IsWChar=*/false;
1909 return false;
1910 }
1911 }
1912
1913 assert(Size.getZExtValue() % DestElemSize == 0);
1914 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
1915 return false;
1916
1917 S.Stk.push<Pointer>(DestPtr);
1918 return true;
1919}
1920
1921/// Determine if T is a character type for which we guarantee that
1922/// sizeof(T) == 1.
1924 return T->isCharType() || T->isChar8Type();
1925}
1926
1928 const InterpFrame *Frame,
1929 const CallExpr *Call, unsigned ID) {
1930 assert(Call->getNumArgs() == 3);
1931 const APSInt &Size = popToAPSInt(S, Call->getArg(2));
1932 const Pointer &PtrB = S.Stk.pop<Pointer>();
1933 const Pointer &PtrA = S.Stk.pop<Pointer>();
1934
1935 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1936 ID == Builtin::BIwmemcmp)
1937 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1938
1939 if (Size.isZero()) {
1940 pushInteger(S, 0, Call->getType());
1941 return true;
1942 }
1943
1944 if (!PtrA.isBlockPointer() || !PtrB.isBlockPointer())
1945 return false;
1946
1947 bool IsWide =
1948 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1949
1950 const ASTContext &ASTCtx = S.getASTContext();
1951 QualType ElemTypeA = getElemType(PtrA);
1952 QualType ElemTypeB = getElemType(PtrB);
1953 // FIXME: This is an arbitrary limitation the current constant interpreter
1954 // had. We could remove this.
1955 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
1956 !isOneByteCharacterType(ElemTypeB))) {
1957 S.FFDiag(S.Current->getSource(OpPC),
1958 diag::note_constexpr_memcmp_unsupported)
1959 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1960 << PtrB.getType();
1961 return false;
1962 }
1963
1964 if (PtrA.isDummy() || PtrB.isDummy())
1965 return false;
1966
1967 // Now, read both pointers to a buffer and compare those.
1968 BitcastBuffer BufferA(
1969 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
1970 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1971 // FIXME: The swapping here is UNDOING something we do when reading the
1972 // data into the buffer.
1973 if (ASTCtx.getTargetInfo().isBigEndian())
1974 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1975
1976 BitcastBuffer BufferB(
1977 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
1978 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
1979 // FIXME: The swapping here is UNDOING something we do when reading the
1980 // data into the buffer.
1981 if (ASTCtx.getTargetInfo().isBigEndian())
1982 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1983
1984 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
1985 BufferB.byteSize().getQuantity());
1986
1987 unsigned ElemSize = 1;
1988 if (IsWide)
1989 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1990 // The Size given for the wide variants is in wide-char units. Convert it
1991 // to bytes.
1992 size_t ByteSize = Size.getZExtValue() * ElemSize;
1993 size_t CmpSize = std::min(MinBufferSize, ByteSize);
1994
1995 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1996 if (IsWide) {
1998 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
1999 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
2000 if (A < B) {
2001 pushInteger(S, -1, Call->getType());
2002 return true;
2003 }
2004 if (A > B) {
2005 pushInteger(S, 1, Call->getType());
2006 return true;
2007 }
2008 });
2009 } else {
2010 std::byte A = BufferA.Data[I];
2011 std::byte B = BufferB.Data[I];
2012
2013 if (A < B) {
2014 pushInteger(S, -1, Call->getType());
2015 return true;
2016 }
2017 if (A > B) {
2018 pushInteger(S, 1, Call->getType());
2019 return true;
2020 }
2021 }
2022 }
2023
2024 // We compared CmpSize bytes above. If the limiting factor was the Size
2025 // passed, we're done and the result is equality (0).
2026 if (ByteSize <= CmpSize) {
2027 pushInteger(S, 0, Call->getType());
2028 return true;
2029 }
2030
2031 // However, if we read all the available bytes but were instructed to read
2032 // even more, diagnose this as a "read of dereferenced one-past-the-end
2033 // pointer". This is what would happen if we called CheckLoad() on every array
2034 // element.
2035 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2036 << AK_Read << S.Current->getRange(OpPC);
2037 return false;
2038}
2039
2040// __builtin_memchr(ptr, int, int)
2041// __builtin_strchr(ptr, int)
2043 const CallExpr *Call, unsigned ID) {
2044 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2045 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2046 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2047
2048 std::optional<APSInt> MaxLength;
2049 if (Call->getNumArgs() == 3)
2050 MaxLength = popToAPSInt(S, Call->getArg(2));
2051
2052 APSInt Desired = popToAPSInt(S, Call->getArg(1));
2053 const Pointer &Ptr = S.Stk.pop<Pointer>();
2054
2055 if (MaxLength && MaxLength->isZero()) {
2056 S.Stk.push<Pointer>();
2057 return true;
2058 }
2059
2060 if (Ptr.isDummy()) {
2061 if (Ptr.getType()->isIncompleteType())
2062 S.FFDiag(S.Current->getSource(OpPC),
2063 diag::note_constexpr_ltor_incomplete_type)
2064 << Ptr.getType();
2065 return false;
2066 }
2067
2068 // Null is only okay if the given size is 0.
2069 if (Ptr.isZero()) {
2070 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2071 << AK_Read;
2072 return false;
2073 }
2074
2075 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2076 ? Ptr.getFieldDesc()->getElemQualType()
2077 : Ptr.getFieldDesc()->getType();
2078 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2079
2080 // Give up on byte-oriented matching against multibyte elements.
2081 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2082 S.FFDiag(S.Current->getSource(OpPC),
2083 diag::note_constexpr_memchr_unsupported)
2084 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2085 return false;
2086 }
2087
2088 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2089 int64_t DesiredTrunc;
2090 if (S.getASTContext().CharTy->isSignedIntegerType())
2091 DesiredTrunc =
2092 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
2093 else
2094 DesiredTrunc =
2095 Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2096 // strchr compares directly to the passed integer, and therefore
2097 // always fails if given an int that is not a char.
2098 if (Desired != DesiredTrunc) {
2099 S.Stk.push<Pointer>();
2100 return true;
2101 }
2102 }
2103
2104 uint64_t DesiredVal;
2105 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2106 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2107 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2108 DesiredVal = Desired.getZExtValue();
2109 } else {
2110 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2111 }
2112
2113 bool StopAtZero =
2114 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2115 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2116
2117 PrimType ElemT =
2118 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2119
2120 size_t Index = Ptr.getIndex();
2121 size_t Step = 0;
2122 for (;;) {
2123 const Pointer &ElemPtr =
2124 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2125
2126 if (!CheckLoad(S, OpPC, ElemPtr))
2127 return false;
2128
2129 uint64_t V;
2131 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2132
2133 if (V == DesiredVal) {
2134 S.Stk.push<Pointer>(ElemPtr);
2135 return true;
2136 }
2137
2138 if (StopAtZero && V == 0)
2139 break;
2140
2141 ++Step;
2142 if (MaxLength && Step == MaxLength->getZExtValue())
2143 break;
2144 }
2145
2146 S.Stk.push<Pointer>();
2147 return true;
2148}
2149
2150static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2151 const Descriptor *Desc) {
2152 if (Desc->isPrimitive())
2153 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2154 if (Desc->isArray())
2155 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2156 Desc->getNumElems();
2157 if (Desc->isRecord()) {
2158 // Can't use Descriptor::getType() as that may return a pointer type. Look
2159 // at the decl directly.
2160 return ASTCtx
2162 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2163 .getQuantity();
2164 }
2165
2166 return std::nullopt;
2167}
2168
2169/// Compute the byte offset of \p Ptr in the full declaration.
2170static unsigned computePointerOffset(const ASTContext &ASTCtx,
2171 const Pointer &Ptr) {
2172 unsigned Result = 0;
2173
2174 Pointer P = Ptr;
2175 while (P.isField() || P.isArrayElement()) {
2176 P = P.expand();
2177 const Descriptor *D = P.getFieldDesc();
2178
2179 if (P.isArrayElement()) {
2180 unsigned ElemSize =
2182 if (P.isOnePastEnd())
2183 Result += ElemSize * P.getNumElems();
2184 else
2185 Result += ElemSize * P.getIndex();
2186 P = P.expand().getArray();
2187 } else if (P.isBaseClass()) {
2188 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2189 bool IsVirtual = Ptr.isVirtualBaseClass();
2190 P = P.getBase();
2191 const Record *BaseRecord = P.getRecord();
2192
2193 const ASTRecordLayout &Layout =
2194 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2195 if (IsVirtual)
2196 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2197 else
2198 Result += Layout.getBaseClassOffset(RD).getQuantity();
2199 } else if (P.isField()) {
2200 const FieldDecl *FD = P.getField();
2201 const ASTRecordLayout &Layout =
2202 ASTCtx.getASTRecordLayout(FD->getParent());
2203 unsigned FieldIndex = FD->getFieldIndex();
2204 uint64_t FieldOffset =
2205 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2206 .getQuantity();
2207 Result += FieldOffset;
2208 P = P.getBase();
2209 } else
2210 llvm_unreachable("Unhandled descriptor type");
2211 }
2212
2213 return Result;
2214}
2215
2216/// Does Ptr point to the last subobject?
2217static bool pointsToLastObject(const Pointer &Ptr) {
2218 Pointer P = Ptr;
2219 while (!P.isRoot()) {
2220
2221 if (P.isArrayElement()) {
2222 P = P.expand().getArray();
2223 continue;
2224 }
2225 if (P.isBaseClass()) {
2226 if (P.getRecord()->getNumFields() > 0)
2227 return false;
2228 P = P.getBase();
2229 continue;
2230 }
2231
2232 Pointer Base = P.getBase();
2233 if (const Record *R = Base.getRecord()) {
2234 assert(P.getField());
2235 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2236 return false;
2237 }
2238 P = Base;
2239 }
2240
2241 return true;
2242}
2243
2244/// Does Ptr point to the last object AND to a flexible array member?
2245static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2246 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2248 FAMKind StrictFlexArraysLevel =
2249 Ctx.getLangOpts().getStrictFlexArraysLevel();
2250
2251 if (StrictFlexArraysLevel == FAMKind::Default)
2252 return true;
2253
2254 unsigned NumElems = FieldDesc->getNumElems();
2255 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2256 return true;
2257
2258 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2259 return true;
2260 return false;
2261 };
2262
2263 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2264 if (!FieldDesc->isArray())
2265 return false;
2266
2267 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2268 isFlexibleArrayMember(FieldDesc);
2269}
2270
2272 const InterpFrame *Frame,
2273 const CallExpr *Call) {
2274 const ASTContext &ASTCtx = S.getASTContext();
2275 // From the GCC docs:
2276 // Kind is an integer constant from 0 to 3. If the least significant bit is
2277 // clear, objects are whole variables. If it is set, a closest surrounding
2278 // subobject is considered the object a pointer points to. The second bit
2279 // determines if maximum or minimum of remaining bytes is computed.
2280 unsigned Kind = popToAPSInt(S, Call->getArg(1)).getZExtValue();
2281 assert(Kind <= 3 && "unexpected kind");
2282 bool UseFieldDesc = (Kind & 1u);
2283 bool ReportMinimum = (Kind & 2u);
2284 const Pointer &Ptr = S.Stk.pop<Pointer>();
2285
2286 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2287 // "If there are any side effects in them, it returns (size_t) -1
2288 // for type 0 or 1 and (size_t) 0 for type 2 or 3."
2289 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2290 return true;
2291 }
2292
2293 if (Ptr.isZero() || !Ptr.isBlockPointer())
2294 return false;
2295
2296 // We can't load through pointers.
2297 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2298 return false;
2299
2300 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2301 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2302 assert(DeclDesc);
2303
2304 if (!UseFieldDesc || DetermineForCompleteObject) {
2305 // Lower bound, so we can't fall back to this.
2306 if (ReportMinimum && !DetermineForCompleteObject)
2307 return false;
2308
2309 // Can't read beyond the pointer decl desc.
2310 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2311 return false;
2312 } else {
2313 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2314 // If we cannot determine the size of the initial allocation, then we
2315 // can't given an accurate upper-bound. However, we are still able to give
2316 // conservative lower-bounds for Type=3.
2317 if (Kind == 1)
2318 return false;
2319 }
2320 }
2321
2322 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2323 assert(Desc);
2324
2325 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2326 if (!FullSize)
2327 return false;
2328
2329 unsigned ByteOffset;
2330 if (UseFieldDesc) {
2331 if (Ptr.isBaseClass())
2332 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2333 computePointerOffset(ASTCtx, Ptr);
2334 else {
2335 if (Ptr.inArray())
2336 ByteOffset =
2337 computePointerOffset(ASTCtx, Ptr) -
2338 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2339 else
2340 ByteOffset = 0;
2341 }
2342 } else
2343 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2344
2345 assert(ByteOffset <= *FullSize);
2346 unsigned Result = *FullSize - ByteOffset;
2347
2348 pushInteger(S, Result, Call->getType());
2349 return true;
2350}
2351
2353 const CallExpr *Call) {
2354
2355 if (!S.inConstantContext())
2356 return false;
2357
2358 const Pointer &Ptr = S.Stk.pop<Pointer>();
2359
2360 auto Error = [&](int Diag) {
2361 bool CalledFromStd = false;
2362 const auto *Callee = S.Current->getCallee();
2363 if (Callee && Callee->isInStdNamespace()) {
2364 const IdentifierInfo *Identifier = Callee->getIdentifier();
2365 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2366 }
2367 S.CCEDiag(CalledFromStd
2369 : S.Current->getSource(OpPC),
2370 diag::err_invalid_is_within_lifetime)
2371 << (CalledFromStd ? "std::is_within_lifetime"
2372 : "__builtin_is_within_lifetime")
2373 << Diag;
2374 return false;
2375 };
2376
2377 if (Ptr.isZero())
2378 return Error(0);
2379 if (Ptr.isOnePastEnd())
2380 return Error(1);
2381
2382 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2383 if (!Ptr.isActive()) {
2384 Result = false;
2385 } else {
2386 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2387 return false;
2388 if (!CheckMutable(S, OpPC, Ptr))
2389 return false;
2390 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2391 return false;
2392 }
2393
2394 // Check if we're currently running an initializer.
2395 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2396 return Error(2);
2397 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2398 return Error(2);
2399
2400 pushInteger(S, Result, Call->getType());
2401 return true;
2402}
2403
2405 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2406 llvm::function_ref<APInt(const APSInt &)> Fn) {
2407 assert(Call->getNumArgs() == 1);
2408 assert(Call->getType()->isIntegerType());
2409
2410 // Single integer case.
2411 if (!Call->getArg(0)->getType()->isVectorType()) {
2412 APSInt Src = popToAPSInt(S, Call->getArg(0));
2413 APInt Result = Fn(Src);
2414 pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
2415 return true;
2416 }
2417
2418 // TODO: Add vector integer handling.
2419 return false;
2420}
2421
2423 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2424 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2425 assert(Call->getNumArgs() == 2);
2426
2427 // Single integer case.
2428 if (!Call->getArg(0)->getType()->isVectorType()) {
2429 assert(!Call->getArg(1)->getType()->isVectorType());
2430 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2431 APSInt LHS = popToAPSInt(S, Call->getArg(0));
2432 APInt Result = Fn(LHS, RHS);
2433 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2434 return true;
2435 }
2436
2437 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2438 assert(VT->getElementType()->isIntegralOrEnumerationType());
2439 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2440 unsigned NumElems = VT->getNumElements();
2441 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2442
2443 // Vector + Scalar case.
2444 if (!Call->getArg(1)->getType()->isVectorType()) {
2445 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2446
2447 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2448 const Pointer &LHS = S.Stk.pop<Pointer>();
2449 const Pointer &Dst = S.Stk.peek<Pointer>();
2450
2451 for (unsigned I = 0; I != NumElems; ++I) {
2453 Dst.elem<T>(I) = static_cast<T>(
2454 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2455 });
2456 }
2458 return true;
2459 }
2460
2461 // Vector case.
2462 assert(Call->getArg(0)->getType()->isVectorType() &&
2463 Call->getArg(1)->getType()->isVectorType());
2464 assert(VT->getElementType() ==
2465 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2466 assert(VT->getNumElements() ==
2467 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2468 assert(VT->getElementType()->isIntegralOrEnumerationType());
2469
2470 const Pointer &RHS = S.Stk.pop<Pointer>();
2471 const Pointer &LHS = S.Stk.pop<Pointer>();
2472 const Pointer &Dst = S.Stk.peek<Pointer>();
2473 for (unsigned I = 0; I != NumElems; ++I) {
2475 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2476 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2477 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2478 });
2479 }
2481
2482 return true;
2483}
2484
2485static bool
2487 llvm::function_ref<APInt(const APSInt &)> PackFn) {
2488 const auto *VT0 = E->getArg(0)->getType()->castAs<VectorType>();
2489 [[maybe_unused]] const auto *VT1 =
2490 E->getArg(1)->getType()->castAs<VectorType>();
2491 assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType");
2492 assert(VT0->getElementType() == VT1->getElementType() &&
2493 VT0->getNumElements() == VT1->getNumElements() &&
2494 "pack builtin VT0 and VT1 ElementType must be same");
2495
2496 const Pointer &RHS = S.Stk.pop<Pointer>();
2497 const Pointer &LHS = S.Stk.pop<Pointer>();
2498 const Pointer &Dst = S.Stk.peek<Pointer>();
2499
2500 const ASTContext &ASTCtx = S.getASTContext();
2501 unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType());
2502 unsigned LHSVecLen = VT0->getNumElements();
2503 unsigned SrcPerLane = 128 / SrcBits;
2504 unsigned Lanes = LHSVecLen * SrcBits / 128;
2505
2506 PrimType SrcT = *S.getContext().classify(VT0->getElementType());
2507 PrimType DstT = *S.getContext().classify(getElemType(Dst));
2508 bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType();
2509
2510 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
2511 unsigned BaseSrc = Lane * SrcPerLane;
2512 unsigned BaseDst = Lane * (2 * SrcPerLane);
2513
2514 for (unsigned I = 0; I != SrcPerLane; ++I) {
2516 APSInt A = LHS.elem<T>(BaseSrc + I).toAPSInt();
2517 APSInt B = RHS.elem<T>(BaseSrc + I).toAPSInt();
2518
2519 assignInteger(S, Dst.atIndex(BaseDst + I), DstT,
2520 APSInt(PackFn(A), IsUnsigend));
2521 assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT,
2522 APSInt(PackFn(B), IsUnsigend));
2523 });
2524 }
2525 }
2526
2527 Dst.initializeAllElements();
2528 return true;
2529}
2530
2532 const CallExpr *Call,
2533 unsigned BuiltinID) {
2534 assert(Call->getNumArgs() == 2);
2535
2536 QualType Arg0Type = Call->getArg(0)->getType();
2537
2538 // TODO: Support floating-point types.
2539 if (!(Arg0Type->isIntegerType() ||
2540 (Arg0Type->isVectorType() &&
2541 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2542 return false;
2543
2544 if (!Arg0Type->isVectorType()) {
2545 assert(!Call->getArg(1)->getType()->isVectorType());
2546 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2547 APSInt LHS = popToAPSInt(S, Arg0Type);
2548 APInt Result;
2549 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2550 Result = std::max(LHS, RHS);
2551 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2552 Result = std::min(LHS, RHS);
2553 } else {
2554 llvm_unreachable("Wrong builtin ID");
2555 }
2556
2557 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2558 return true;
2559 }
2560
2561 // Vector case.
2562 assert(Call->getArg(0)->getType()->isVectorType() &&
2563 Call->getArg(1)->getType()->isVectorType());
2564 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2565 assert(VT->getElementType() ==
2566 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2567 assert(VT->getNumElements() ==
2568 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2569 assert(VT->getElementType()->isIntegralOrEnumerationType());
2570
2571 const Pointer &RHS = S.Stk.pop<Pointer>();
2572 const Pointer &LHS = S.Stk.pop<Pointer>();
2573 const Pointer &Dst = S.Stk.peek<Pointer>();
2574 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2575 unsigned NumElems = VT->getNumElements();
2576 for (unsigned I = 0; I != NumElems; ++I) {
2577 APSInt Elem1;
2578 APSInt Elem2;
2580 Elem1 = LHS.elem<T>(I).toAPSInt();
2581 Elem2 = RHS.elem<T>(I).toAPSInt();
2582 });
2583
2584 APSInt Result;
2585 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2586 Result = APSInt(std::max(Elem1, Elem2),
2587 Call->getType()->isUnsignedIntegerOrEnumerationType());
2588 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2589 Result = APSInt(std::min(Elem1, Elem2),
2590 Call->getType()->isUnsignedIntegerOrEnumerationType());
2591 } else {
2592 llvm_unreachable("Wrong builtin ID");
2593 }
2594
2596 { Dst.elem<T>(I) = static_cast<T>(Result); });
2597 }
2598 Dst.initializeAllElements();
2599
2600 return true;
2601}
2602
2604 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2605 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &,
2606 const APSInt &)>
2607 Fn) {
2608 assert(Call->getArg(0)->getType()->isVectorType() &&
2609 Call->getArg(1)->getType()->isVectorType());
2610 const Pointer &RHS = S.Stk.pop<Pointer>();
2611 const Pointer &LHS = S.Stk.pop<Pointer>();
2612 const Pointer &Dst = S.Stk.peek<Pointer>();
2613
2614 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2615 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2616 unsigned NumElems = VT->getNumElements();
2617 const auto *DestVT = Call->getType()->castAs<VectorType>();
2618 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2619 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2620
2621 unsigned DstElem = 0;
2622 for (unsigned I = 0; I != NumElems; I += 2) {
2623 APSInt Result;
2625 APSInt LoLHS = LHS.elem<T>(I).toAPSInt();
2626 APSInt HiLHS = LHS.elem<T>(I + 1).toAPSInt();
2627 APSInt LoRHS = RHS.elem<T>(I).toAPSInt();
2628 APSInt HiRHS = RHS.elem<T>(I + 1).toAPSInt();
2629 Result = APSInt(Fn(LoLHS, HiLHS, LoRHS, HiRHS), DestUnsigned);
2630 });
2631
2632 INT_TYPE_SWITCH_NO_BOOL(DestElemT,
2633 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2634 ++DstElem;
2635 }
2636
2637 Dst.initializeAllElements();
2638 return true;
2639}
2640
2642 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2643 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2644 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2645 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2646 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2647
2648 const Pointer &RHS = S.Stk.pop<Pointer>();
2649 const Pointer &LHS = S.Stk.pop<Pointer>();
2650 const Pointer &Dst = S.Stk.peek<Pointer>();
2651 unsigned NumElts = VT->getNumElements();
2652 unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType());
2653 unsigned EltsPerLane = 128 / EltBits;
2654 unsigned Lanes = NumElts * EltBits / 128;
2655 unsigned DestIndex = 0;
2656
2657 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2658 unsigned LaneStart = Lane * EltsPerLane;
2659 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2661 APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt();
2662 APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt();
2663 APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2664 Dst.elem<T>(DestIndex++) = static_cast<T>(ResL);
2665 });
2666 }
2667
2668 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2670 APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt();
2671 APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt();
2672 APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2673 Dst.elem<T>(DestIndex++) = static_cast<T>(ResR);
2674 });
2675 }
2676 }
2677 Dst.initializeAllElements();
2678 return true;
2679}
2680
2682 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2683 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2684 llvm::RoundingMode)>
2685 Fn) {
2686 const Pointer &RHS = S.Stk.pop<Pointer>();
2687 const Pointer &LHS = S.Stk.pop<Pointer>();
2688 const Pointer &Dst = S.Stk.peek<Pointer>();
2689 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2690 llvm::RoundingMode RM = getRoundingMode(FPO);
2691 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2692
2693 unsigned NumElts = VT->getNumElements();
2694 unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType());
2695 unsigned NumLanes = NumElts * EltBits / 128;
2696 unsigned NumElemsPerLane = NumElts / NumLanes;
2697 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
2698
2699 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
2700 using T = PrimConv<PT_Float>::T;
2701 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2702 APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2703 APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2704 Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM));
2705 }
2706 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2707 APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2708 APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2709 Dst.elem<T>(L + E + HalfElemsPerLane) =
2710 static_cast<T>(Fn(Elem1, Elem2, RM));
2711 }
2712 }
2713 Dst.initializeAllElements();
2714 return true;
2715}
2716
2718 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2719 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2720 const APFloat &, llvm::RoundingMode)>
2721 Fn) {
2722 assert(Call->getNumArgs() == 3);
2723
2724 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2725 llvm::RoundingMode RM = getRoundingMode(FPO);
2726 QualType Arg1Type = Call->getArg(0)->getType();
2727 QualType Arg2Type = Call->getArg(1)->getType();
2728 QualType Arg3Type = Call->getArg(2)->getType();
2729
2730 // Non-vector floating point types.
2731 if (!Arg1Type->isVectorType()) {
2732 assert(!Arg2Type->isVectorType());
2733 assert(!Arg3Type->isVectorType());
2734 (void)Arg2Type;
2735 (void)Arg3Type;
2736
2737 const Floating &Z = S.Stk.pop<Floating>();
2738 const Floating &Y = S.Stk.pop<Floating>();
2739 const Floating &X = S.Stk.pop<Floating>();
2740 APFloat F = Fn(X.getAPFloat(), Y.getAPFloat(), Z.getAPFloat(), RM);
2741 Floating Result = S.allocFloat(X.getSemantics());
2742 Result.copy(F);
2743 S.Stk.push<Floating>(Result);
2744 return true;
2745 }
2746
2747 // Vector type.
2748 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2749 Arg3Type->isVectorType());
2750
2751 const VectorType *VecTy = Arg1Type->castAs<VectorType>();
2752 QualType ElemQT = VecTy->getElementType();
2753 unsigned NumElems = VecTy->getNumElements();
2754
2755 assert(ElemQT == Arg2Type->castAs<VectorType>()->getElementType() &&
2756 ElemQT == Arg3Type->castAs<VectorType>()->getElementType());
2757 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2758 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2759 assert(ElemQT->isRealFloatingType());
2760 (void)ElemQT;
2761
2762 const Pointer &VZ = S.Stk.pop<Pointer>();
2763 const Pointer &VY = S.Stk.pop<Pointer>();
2764 const Pointer &VX = S.Stk.pop<Pointer>();
2765 const Pointer &Dst = S.Stk.peek<Pointer>();
2766 for (unsigned I = 0; I != NumElems; ++I) {
2767 using T = PrimConv<PT_Float>::T;
2768 APFloat X = VX.elem<T>(I).getAPFloat();
2769 APFloat Y = VY.elem<T>(I).getAPFloat();
2770 APFloat Z = VZ.elem<T>(I).getAPFloat();
2771 APFloat F = Fn(X, Y, Z, RM);
2772 Dst.elem<Floating>(I) = Floating(F);
2773 }
2775 return true;
2776}
2777
2778/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2780 const CallExpr *Call) {
2781 const Pointer &RHS = S.Stk.pop<Pointer>();
2782 const Pointer &LHS = S.Stk.pop<Pointer>();
2783 APSInt Mask = popToAPSInt(S, Call->getArg(0));
2784 const Pointer &Dst = S.Stk.peek<Pointer>();
2785
2786 assert(LHS.getNumElems() == RHS.getNumElems());
2787 assert(LHS.getNumElems() == Dst.getNumElems());
2788 unsigned NumElems = LHS.getNumElems();
2789 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2790 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2791
2792 for (unsigned I = 0; I != NumElems; ++I) {
2793 if (ElemT == PT_Float) {
2794 assert(DstElemT == PT_Float);
2795 Dst.elem<Floating>(I) =
2796 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2797 } else {
2798 APSInt Elem;
2799 INT_TYPE_SWITCH(ElemT, {
2800 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2801 });
2802 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2803 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2804 }
2805 }
2807
2808 return true;
2809}
2810
2812 const CallExpr *Call) {
2813 APSInt Mask = popToAPSInt(S, Call->getArg(2));
2814 const Pointer &TrueVec = S.Stk.pop<Pointer>();
2815 const Pointer &FalseVec = S.Stk.pop<Pointer>();
2816 const Pointer &Dst = S.Stk.peek<Pointer>();
2817
2818 assert(FalseVec.getNumElems() == TrueVec.getNumElems());
2819 assert(FalseVec.getNumElems() == Dst.getNumElems());
2820 unsigned NumElems = FalseVec.getNumElems();
2821 PrimType ElemT = FalseVec.getFieldDesc()->getPrimType();
2822 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2823
2824 for (unsigned I = 0; I != NumElems; ++I) {
2825 bool MaskBit = Mask[I % 8];
2826 if (ElemT == PT_Float) {
2827 assert(DstElemT == PT_Float);
2828 Dst.elem<Floating>(I) =
2829 MaskBit ? TrueVec.elem<Floating>(I) : FalseVec.elem<Floating>(I);
2830 } else {
2831 assert(DstElemT == ElemT);
2832 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
2833 Dst.elem<T>(I) =
2834 static_cast<T>(MaskBit ? TrueVec.elem<T>(I).toAPSInt()
2835 : FalseVec.elem<T>(I).toAPSInt());
2836 });
2837 }
2838 }
2839 Dst.initializeAllElements();
2840
2841 return true;
2842}
2843
2845 const CallExpr *Call) {
2846 assert(Call->getNumArgs() == 2 && "masked forms handled via select*");
2847 const Pointer &Control = S.Stk.pop<Pointer>();
2848 const Pointer &Src = S.Stk.pop<Pointer>();
2849 const Pointer &Dst = S.Stk.peek<Pointer>();
2850
2851 unsigned NumElems = Dst.getNumElems();
2852 assert(NumElems == Control.getNumElems());
2853 assert(NumElems == Dst.getNumElems());
2854
2855 for (unsigned Idx = 0; Idx != NumElems; ++Idx) {
2856 uint8_t Ctlb = static_cast<uint8_t>(Control.elem<int8_t>(Idx));
2857
2858 if (Ctlb & 0x80) {
2859 Dst.elem<int8_t>(Idx) = 0;
2860 } else {
2861 unsigned LaneBase = (Idx / 16) * 16;
2862 unsigned SrcOffset = Ctlb & 0x0F;
2863 unsigned SrcIdx = LaneBase + SrcOffset;
2864
2865 Dst.elem<int8_t>(Idx) = Src.elem<int8_t>(SrcIdx);
2866 }
2867 }
2868 Dst.initializeAllElements();
2869 return true;
2870}
2871
2873 const CallExpr *Call, bool IsShufHW) {
2874 assert(Call->getNumArgs() == 2 && "masked forms handled via select*");
2875 APSInt ControlImm = popToAPSInt(S, Call->getArg(1));
2876 const Pointer &Src = S.Stk.pop<Pointer>();
2877 const Pointer &Dst = S.Stk.peek<Pointer>();
2878
2879 unsigned NumElems = Dst.getNumElems();
2880 PrimType ElemT = Dst.getFieldDesc()->getPrimType();
2881
2882 unsigned ElemBits = static_cast<unsigned>(primSize(ElemT) * 8);
2883 if (ElemBits != 16 && ElemBits != 32)
2884 return false;
2885
2886 unsigned LaneElts = 128u / ElemBits;
2887 assert(LaneElts && (NumElems % LaneElts == 0));
2888
2889 uint8_t Ctl = static_cast<uint8_t>(ControlImm.getZExtValue());
2890
2891 for (unsigned Idx = 0; Idx != NumElems; Idx++) {
2892 unsigned LaneBase = (Idx / LaneElts) * LaneElts;
2893 unsigned LaneIdx = Idx % LaneElts;
2894 unsigned SrcIdx = Idx;
2895 unsigned Sel = (Ctl >> (2 * (LaneIdx & 0x3))) & 0x3;
2896 if (ElemBits == 32) {
2897 SrcIdx = LaneBase + Sel;
2898 } else {
2899 constexpr unsigned HalfSize = 4;
2900 bool InHigh = LaneIdx >= HalfSize;
2901 if (!IsShufHW && !InHigh) {
2902 SrcIdx = LaneBase + Sel;
2903 } else if (IsShufHW && InHigh) {
2904 SrcIdx = LaneBase + HalfSize + Sel;
2905 }
2906 }
2907
2908 INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(Idx) = Src.elem<T>(SrcIdx); });
2909 }
2910 Dst.initializeAllElements();
2911 return true;
2912}
2913
2915 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2916 llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) {
2917 const Pointer &RHS = S.Stk.pop<Pointer>();
2918 const Pointer &LHS = S.Stk.pop<Pointer>();
2919
2920 assert(LHS.getNumElems() == RHS.getNumElems());
2921
2922 unsigned SourceLen = LHS.getNumElems();
2923 QualType ElemQT = getElemType(LHS);
2924 OptPrimType ElemPT = S.getContext().classify(ElemQT);
2925 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
2926
2927 APInt AWide(LaneWidth * SourceLen, 0);
2928 APInt BWide(LaneWidth * SourceLen, 0);
2929
2930 for (unsigned I = 0; I != SourceLen; ++I) {
2931 APInt ALane;
2932 APInt BLane;
2933
2934 if (ElemQT->isIntegerType()) { // Get value.
2935 INT_TYPE_SWITCH_NO_BOOL(*ElemPT, {
2936 ALane = LHS.elem<T>(I).toAPSInt();
2937 BLane = RHS.elem<T>(I).toAPSInt();
2938 });
2939 } else if (ElemQT->isFloatingType()) { // Get only sign bit.
2940 using T = PrimConv<PT_Float>::T;
2941 ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2942 BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2943 } else { // Must be integer or floating type.
2944 return false;
2945 }
2946 AWide.insertBits(ALane, I * LaneWidth);
2947 BWide.insertBits(BLane, I * LaneWidth);
2948 }
2949 pushInteger(S, Fn(AWide, BWide), Call->getType());
2950 return true;
2951}
2952
2954 const CallExpr *Call) {
2955 assert(Call->getNumArgs() == 1);
2956
2957 const Pointer &Source = S.Stk.pop<Pointer>();
2958
2959 unsigned SourceLen = Source.getNumElems();
2960 QualType ElemQT = getElemType(Source);
2961 OptPrimType ElemT = S.getContext().classify(ElemQT);
2962 unsigned ResultLen =
2963 S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
2964 APInt Result(ResultLen, 0);
2965
2966 for (unsigned I = 0; I != SourceLen; ++I) {
2967 APInt Elem;
2968 if (ElemQT->isIntegerType()) {
2969 INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
2970 } else if (ElemQT->isRealFloatingType()) {
2971 using T = PrimConv<PT_Float>::T;
2972 Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
2973 } else {
2974 return false;
2975 }
2976 Result.setBitVal(I, Elem.isNegative());
2977 }
2978 pushInteger(S, Result, Call->getType());
2979 return true;
2980}
2981
2983 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2984 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
2985 Fn) {
2986 assert(Call->getNumArgs() == 3);
2987
2988 QualType Arg0Type = Call->getArg(0)->getType();
2989 QualType Arg2Type = Call->getArg(2)->getType();
2990 // Non-vector integer types.
2991 if (!Arg0Type->isVectorType()) {
2992 const APSInt &Op2 = popToAPSInt(S, Arg2Type);
2993 const APSInt &Op1 = popToAPSInt(S, Call->getArg(1));
2994 const APSInt &Op0 = popToAPSInt(S, Arg0Type);
2995 APSInt Result = APSInt(Fn(Op0, Op1, Op2), Op0.isUnsigned());
2996 pushInteger(S, Result, Call->getType());
2997 return true;
2998 }
2999
3000 const auto *VecT = Arg0Type->castAs<VectorType>();
3001 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3002 unsigned NumElems = VecT->getNumElements();
3003 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3004
3005 // Vector + Vector + Scalar case.
3006 if (!Arg2Type->isVectorType()) {
3007 APSInt Op2 = popToAPSInt(S, Arg2Type);
3008
3009 const Pointer &Op1 = S.Stk.pop<Pointer>();
3010 const Pointer &Op0 = S.Stk.pop<Pointer>();
3011 const Pointer &Dst = S.Stk.peek<Pointer>();
3012 for (unsigned I = 0; I != NumElems; ++I) {
3014 Dst.elem<T>(I) = static_cast<T>(APSInt(
3015 Fn(Op0.elem<T>(I).toAPSInt(), Op1.elem<T>(I).toAPSInt(), Op2),
3016 DestUnsigned));
3017 });
3018 }
3020
3021 return true;
3022 }
3023
3024 // Vector type.
3025 const Pointer &Op2 = S.Stk.pop<Pointer>();
3026 const Pointer &Op1 = S.Stk.pop<Pointer>();
3027 const Pointer &Op0 = S.Stk.pop<Pointer>();
3028 const Pointer &Dst = S.Stk.peek<Pointer>();
3029 for (unsigned I = 0; I != NumElems; ++I) {
3030 APSInt Val0, Val1, Val2;
3032 Val0 = Op0.elem<T>(I).toAPSInt();
3033 Val1 = Op1.elem<T>(I).toAPSInt();
3034 Val2 = Op2.elem<T>(I).toAPSInt();
3035 });
3036 APSInt Result = APSInt(Fn(Val0, Val1, Val2), Val0.isUnsigned());
3038 { Dst.elem<T>(I) = static_cast<T>(Result); });
3039 }
3041
3042 return true;
3043}
3044
3046 const CallExpr *Call,
3047 unsigned ID) {
3048 assert(Call->getNumArgs() == 2);
3049
3050 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3051 uint64_t Index = ImmAPS.getZExtValue();
3052
3053 const Pointer &Src = S.Stk.pop<Pointer>();
3054 if (!Src.getFieldDesc()->isPrimitiveArray())
3055 return false;
3056
3057 const Pointer &Dst = S.Stk.peek<Pointer>();
3058 if (!Dst.getFieldDesc()->isPrimitiveArray())
3059 return false;
3060
3061 unsigned SrcElems = Src.getNumElems();
3062 unsigned DstElems = Dst.getNumElems();
3063
3064 unsigned NumLanes = SrcElems / DstElems;
3065 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3066 unsigned ExtractPos = Lane * DstElems;
3067
3068 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3069
3070 TYPE_SWITCH(ElemT, {
3071 for (unsigned I = 0; I != DstElems; ++I) {
3072 Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
3073 }
3074 });
3075
3077 return true;
3078}
3079
3081 CodePtr OpPC,
3082 const CallExpr *Call,
3083 unsigned ID) {
3084 assert(Call->getNumArgs() == 4);
3085
3086 APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
3087 const Pointer &Merge = S.Stk.pop<Pointer>();
3088 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3089 const Pointer &Src = S.Stk.pop<Pointer>();
3090
3091 if (!Src.getFieldDesc()->isPrimitiveArray() ||
3092 !Merge.getFieldDesc()->isPrimitiveArray())
3093 return false;
3094
3095 const Pointer &Dst = S.Stk.peek<Pointer>();
3096 if (!Dst.getFieldDesc()->isPrimitiveArray())
3097 return false;
3098
3099 unsigned SrcElems = Src.getNumElems();
3100 unsigned DstElems = Dst.getNumElems();
3101
3102 unsigned NumLanes = SrcElems / DstElems;
3103 unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
3104 unsigned Base = Lane * DstElems;
3105
3106 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3107
3108 TYPE_SWITCH(ElemT, {
3109 for (unsigned I = 0; I != DstElems; ++I) {
3110 if (MaskAPS[I])
3111 Dst.elem<T>(I) = Src.elem<T>(Base + I);
3112 else
3113 Dst.elem<T>(I) = Merge.elem<T>(I);
3114 }
3115 });
3116
3118 return true;
3119}
3120
3122 const CallExpr *Call,
3123 unsigned ID) {
3124 assert(Call->getNumArgs() == 3);
3125
3126 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3127 uint64_t Index = ImmAPS.getZExtValue();
3128
3129 const Pointer &SubVec = S.Stk.pop<Pointer>();
3130 if (!SubVec.getFieldDesc()->isPrimitiveArray())
3131 return false;
3132
3133 const Pointer &BaseVec = S.Stk.pop<Pointer>();
3134 if (!BaseVec.getFieldDesc()->isPrimitiveArray())
3135 return false;
3136
3137 const Pointer &Dst = S.Stk.peek<Pointer>();
3138
3139 unsigned BaseElements = BaseVec.getNumElems();
3140 unsigned SubElements = SubVec.getNumElems();
3141
3142 assert(SubElements != 0 && BaseElements != 0 &&
3143 (BaseElements % SubElements) == 0);
3144
3145 unsigned NumLanes = BaseElements / SubElements;
3146 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3147 unsigned InsertPos = Lane * SubElements;
3148
3149 PrimType ElemT = BaseVec.getFieldDesc()->getPrimType();
3150
3151 TYPE_SWITCH(ElemT, {
3152 for (unsigned I = 0; I != BaseElements; ++I)
3153 Dst.elem<T>(I) = BaseVec.elem<T>(I);
3154 for (unsigned I = 0; I != SubElements; ++I)
3155 Dst.elem<T>(InsertPos + I) = SubVec.elem<T>(I);
3156 });
3157
3159 return true;
3160}
3161
3163 const CallExpr *Call) {
3164 assert(Call->getNumArgs() == 1);
3165
3166 const Pointer &Source = S.Stk.pop<Pointer>();
3167 const Pointer &Dest = S.Stk.peek<Pointer>();
3168
3169 unsigned SourceLen = Source.getNumElems();
3170 QualType ElemQT = getElemType(Source);
3171 OptPrimType ElemT = S.getContext().classify(ElemQT);
3172 unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
3173
3174 bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
3175 ->castAs<VectorType>()
3176 ->getElementType()
3178
3179 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3180 APSInt MinIndex(ElemBitWidth, DestUnsigned);
3181 APSInt MinVal = Source.elem<T>(0).toAPSInt();
3182
3183 for (unsigned I = 1; I != SourceLen; ++I) {
3184 APSInt Val = Source.elem<T>(I).toAPSInt();
3185 if (MinVal.ugt(Val)) {
3186 MinVal = Val;
3187 MinIndex = I;
3188 }
3189 }
3190
3191 Dest.elem<T>(0) = static_cast<T>(MinVal);
3192 Dest.elem<T>(1) = static_cast<T>(MinIndex);
3193 for (unsigned I = 2; I != SourceLen; ++I) {
3194 Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
3195 }
3196 });
3197 Dest.initializeAllElements();
3198 return true;
3199}
3200
3202 const CallExpr *Call, bool MaskZ) {
3203 assert(Call->getNumArgs() == 5);
3204
3205 APInt U = popToAPSInt(S, Call->getArg(4)); // Lane mask
3206 APInt Imm = popToAPSInt(S, Call->getArg(3)); // Ternary truth table
3207 const Pointer &C = S.Stk.pop<Pointer>();
3208 const Pointer &B = S.Stk.pop<Pointer>();
3209 const Pointer &A = S.Stk.pop<Pointer>();
3210 const Pointer &Dst = S.Stk.peek<Pointer>();
3211
3212 unsigned DstLen = A.getNumElems();
3213 QualType ElemQT = getElemType(A);
3214 OptPrimType ElemT = S.getContext().classify(ElemQT);
3215 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
3216 bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType();
3217
3218 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3219 for (unsigned I = 0; I != DstLen; ++I) {
3220 APInt ALane = A.elem<T>(I).toAPSInt();
3221 APInt BLane = B.elem<T>(I).toAPSInt();
3222 APInt CLane = C.elem<T>(I).toAPSInt();
3223 APInt RLane(LaneWidth, 0);
3224 if (U[I]) { // If lane not masked, compute ternary logic.
3225 for (unsigned Bit = 0; Bit != LaneWidth; ++Bit) {
3226 unsigned ABit = ALane[Bit];
3227 unsigned BBit = BLane[Bit];
3228 unsigned CBit = CLane[Bit];
3229 unsigned Idx = (ABit << 2) | (BBit << 1) | (CBit);
3230 RLane.setBitVal(Bit, Imm[Idx]);
3231 }
3232 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3233 } else if (MaskZ) { // If zero masked, zero the lane.
3234 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3235 } else { // Just masked, put in A lane.
3236 Dst.elem<T>(I) = static_cast<T>(APSInt(ALane, DstUnsigned));
3237 }
3238 }
3239 });
3240 Dst.initializeAllElements();
3241 return true;
3242}
3243
3245 const CallExpr *Call, unsigned ID) {
3246 assert(Call->getNumArgs() == 2);
3247
3248 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3249 const Pointer &Vec = S.Stk.pop<Pointer>();
3250 if (!Vec.getFieldDesc()->isPrimitiveArray())
3251 return false;
3252
3253 unsigned NumElems = Vec.getNumElems();
3254 unsigned Index =
3255 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3256
3257 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3258 // FIXME(#161685): Replace float+int split with a numeric-only type switch
3259 if (ElemT == PT_Float) {
3260 S.Stk.push<Floating>(Vec.elem<Floating>(Index));
3261 return true;
3262 }
3264 APSInt V = Vec.elem<T>(Index).toAPSInt();
3265 pushInteger(S, V, Call->getType());
3266 });
3267
3268 return true;
3269}
3270
3272 const CallExpr *Call, unsigned ID) {
3273 assert(Call->getNumArgs() == 3);
3274
3275 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3276 APSInt ValAPS = popToAPSInt(S, Call->getArg(1));
3277
3278 const Pointer &Base = S.Stk.pop<Pointer>();
3279 if (!Base.getFieldDesc()->isPrimitiveArray())
3280 return false;
3281
3282 const Pointer &Dst = S.Stk.peek<Pointer>();
3283
3284 unsigned NumElems = Base.getNumElems();
3285 unsigned Index =
3286 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3287
3288 PrimType ElemT = Base.getFieldDesc()->getPrimType();
3290 for (unsigned I = 0; I != NumElems; ++I)
3291 Dst.elem<T>(I) = Base.elem<T>(I);
3292 Dst.elem<T>(Index) = static_cast<T>(ValAPS);
3293 });
3294
3296 return true;
3297}
3298
3299static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
3300 bool IsUnsigned) {
3301 switch (Imm & 0x7) {
3302 case 0x00: // _MM_CMPINT_EQ
3303 return (A == B);
3304 case 0x01: // _MM_CMPINT_LT
3305 return IsUnsigned ? A.ult(B) : A.slt(B);
3306 case 0x02: // _MM_CMPINT_LE
3307 return IsUnsigned ? A.ule(B) : A.sle(B);
3308 case 0x03: // _MM_CMPINT_FALSE
3309 return false;
3310 case 0x04: // _MM_CMPINT_NE
3311 return (A != B);
3312 case 0x05: // _MM_CMPINT_NLT
3313 return IsUnsigned ? A.ugt(B) : A.sgt(B);
3314 case 0x06: // _MM_CMPINT_NLE
3315 return IsUnsigned ? A.uge(B) : A.sge(B);
3316 case 0x07: // _MM_CMPINT_TRUE
3317 return true;
3318 default:
3319 llvm_unreachable("Invalid Op");
3320 }
3321}
3322
3324 const CallExpr *Call, unsigned ID,
3325 bool IsUnsigned) {
3326 assert(Call->getNumArgs() == 4);
3327
3328 APSInt Mask = popToAPSInt(S, Call->getArg(3));
3329 APSInt Opcode = popToAPSInt(S, Call->getArg(2));
3330 unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
3331 const Pointer &RHS = S.Stk.pop<Pointer>();
3332 const Pointer &LHS = S.Stk.pop<Pointer>();
3333
3334 assert(LHS.getNumElems() == RHS.getNumElems());
3335
3336 APInt RetMask = APInt::getZero(LHS.getNumElems());
3337 unsigned VectorLen = LHS.getNumElems();
3338 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
3339
3340 for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
3341 APSInt A, B;
3343 A = LHS.elem<T>(ElemNum).toAPSInt();
3344 B = RHS.elem<T>(ElemNum).toAPSInt();
3345 });
3346 RetMask.setBitVal(ElemNum,
3347 Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
3348 }
3349 pushInteger(S, RetMask, Call->getType());
3350 return true;
3351}
3352
3354 const CallExpr *Call) {
3355 assert(Call->getNumArgs() == 1);
3356
3357 QualType Arg0Type = Call->getArg(0)->getType();
3358 const auto *VecT = Arg0Type->castAs<VectorType>();
3359 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3360 unsigned NumElems = VecT->getNumElements();
3361 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3362 const Pointer &Src = S.Stk.pop<Pointer>();
3363 const Pointer &Dst = S.Stk.peek<Pointer>();
3364
3365 for (unsigned I = 0; I != NumElems; ++I) {
3367 APSInt ElemI = Src.elem<T>(I).toAPSInt();
3368 APInt ConflictMask(ElemI.getBitWidth(), 0);
3369 for (unsigned J = 0; J != I; ++J) {
3370 APSInt ElemJ = Src.elem<T>(J).toAPSInt();
3371 ConflictMask.setBitVal(J, ElemI == ElemJ);
3372 }
3373 Dst.elem<T>(I) = static_cast<T>(APSInt(ConflictMask, DestUnsigned));
3374 });
3375 }
3377 return true;
3378}
3379
3381 InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID,
3382 llvm::function_ref<APInt(const Pointer &, unsigned Lane, unsigned I,
3383 unsigned Shift)>
3384 Fn) {
3385 assert(Call->getNumArgs() == 2);
3386
3387 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3388 uint64_t Shift = ImmAPS.getZExtValue() & 0xff;
3389
3390 const Pointer &Src = S.Stk.pop<Pointer>();
3391 if (!Src.getFieldDesc()->isPrimitiveArray())
3392 return false;
3393
3394 unsigned NumElems = Src.getNumElems();
3395 const Pointer &Dst = S.Stk.peek<Pointer>();
3396 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3397
3398 for (unsigned Lane = 0; Lane != NumElems; Lane += 16) {
3399 for (unsigned I = 0; I != 16; ++I) {
3400 unsigned Base = Lane + I;
3401 APSInt Result = APSInt(Fn(Src, Lane, I, Shift));
3403 { Dst.elem<T>(Base) = static_cast<T>(Result); });
3404 }
3405 }
3406
3408
3409 return true;
3410}
3411
3413 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3414 llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
3415 GetSourceIndex) {
3416
3417 assert(Call->getNumArgs() == 3);
3418
3419 unsigned ShuffleMask = 0;
3420 Pointer A, MaskVector, B;
3421
3422 QualType Arg2Type = Call->getArg(2)->getType();
3423 bool IsVectorMask = false;
3424 if (Arg2Type->isVectorType()) {
3425 IsVectorMask = true;
3426 B = S.Stk.pop<Pointer>();
3427 MaskVector = S.Stk.pop<Pointer>();
3428 A = S.Stk.pop<Pointer>();
3429 } else if (Arg2Type->isIntegerType()) {
3430 ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
3431 B = S.Stk.pop<Pointer>();
3432 A = S.Stk.pop<Pointer>();
3433 } else {
3434 return false;
3435 }
3436
3437 QualType Arg0Type = Call->getArg(0)->getType();
3438 const auto *VecT = Arg0Type->castAs<VectorType>();
3439 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3440 unsigned NumElems = VecT->getNumElements();
3441
3442 const Pointer &Dst = S.Stk.peek<Pointer>();
3443
3444 PrimType MaskElemT = PT_Uint32;
3445 if (IsVectorMask) {
3446 QualType Arg1Type = Call->getArg(1)->getType();
3447 const auto *MaskVecT = Arg1Type->castAs<VectorType>();
3448 QualType MaskElemType = MaskVecT->getElementType();
3449 MaskElemT = *S.getContext().classify(MaskElemType);
3450 }
3451
3452 for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
3453 if (IsVectorMask) {
3454 INT_TYPE_SWITCH(MaskElemT, {
3455 ShuffleMask = static_cast<unsigned>(MaskVector.elem<T>(DstIdx));
3456 });
3457 }
3458 auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
3459
3460 if (SrcIdx < 0) {
3461 // Zero out this element
3462 if (ElemT == PT_Float) {
3463 Dst.elem<Floating>(DstIdx) = Floating(
3464 S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
3465 } else {
3466 INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
3467 }
3468 } else {
3469 const Pointer &Src = (SrcVecIdx == 0) ? A : B;
3470 TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
3471 }
3472 }
3474
3475 return true;
3476}
3477
3479 uint32_t BuiltinID) {
3480 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
3481 return Invalid(S, OpPC);
3482
3483 const InterpFrame *Frame = S.Current;
3484 switch (BuiltinID) {
3485 case Builtin::BI__builtin_is_constant_evaluated:
3487
3488 case Builtin::BI__builtin_assume:
3489 case Builtin::BI__assume:
3490 return interp__builtin_assume(S, OpPC, Frame, Call);
3491
3492 case Builtin::BI__builtin_strcmp:
3493 case Builtin::BIstrcmp:
3494 case Builtin::BI__builtin_strncmp:
3495 case Builtin::BIstrncmp:
3496 case Builtin::BI__builtin_wcsncmp:
3497 case Builtin::BIwcsncmp:
3498 case Builtin::BI__builtin_wcscmp:
3499 case Builtin::BIwcscmp:
3500 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
3501
3502 case Builtin::BI__builtin_strlen:
3503 case Builtin::BIstrlen:
3504 case Builtin::BI__builtin_wcslen:
3505 case Builtin::BIwcslen:
3506 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
3507
3508 case Builtin::BI__builtin_nan:
3509 case Builtin::BI__builtin_nanf:
3510 case Builtin::BI__builtin_nanl:
3511 case Builtin::BI__builtin_nanf16:
3512 case Builtin::BI__builtin_nanf128:
3513 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
3514
3515 case Builtin::BI__builtin_nans:
3516 case Builtin::BI__builtin_nansf:
3517 case Builtin::BI__builtin_nansl:
3518 case Builtin::BI__builtin_nansf16:
3519 case Builtin::BI__builtin_nansf128:
3520 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
3521
3522 case Builtin::BI__builtin_huge_val:
3523 case Builtin::BI__builtin_huge_valf:
3524 case Builtin::BI__builtin_huge_vall:
3525 case Builtin::BI__builtin_huge_valf16:
3526 case Builtin::BI__builtin_huge_valf128:
3527 case Builtin::BI__builtin_inf:
3528 case Builtin::BI__builtin_inff:
3529 case Builtin::BI__builtin_infl:
3530 case Builtin::BI__builtin_inff16:
3531 case Builtin::BI__builtin_inff128:
3532 return interp__builtin_inf(S, OpPC, Frame, Call);
3533
3534 case Builtin::BI__builtin_copysign:
3535 case Builtin::BI__builtin_copysignf:
3536 case Builtin::BI__builtin_copysignl:
3537 case Builtin::BI__builtin_copysignf128:
3538 return interp__builtin_copysign(S, OpPC, Frame);
3539
3540 case Builtin::BI__builtin_fmin:
3541 case Builtin::BI__builtin_fminf:
3542 case Builtin::BI__builtin_fminl:
3543 case Builtin::BI__builtin_fminf16:
3544 case Builtin::BI__builtin_fminf128:
3545 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3546
3547 case Builtin::BI__builtin_fminimum_num:
3548 case Builtin::BI__builtin_fminimum_numf:
3549 case Builtin::BI__builtin_fminimum_numl:
3550 case Builtin::BI__builtin_fminimum_numf16:
3551 case Builtin::BI__builtin_fminimum_numf128:
3552 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3553
3554 case Builtin::BI__builtin_fmax:
3555 case Builtin::BI__builtin_fmaxf:
3556 case Builtin::BI__builtin_fmaxl:
3557 case Builtin::BI__builtin_fmaxf16:
3558 case Builtin::BI__builtin_fmaxf128:
3559 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3560
3561 case Builtin::BI__builtin_fmaximum_num:
3562 case Builtin::BI__builtin_fmaximum_numf:
3563 case Builtin::BI__builtin_fmaximum_numl:
3564 case Builtin::BI__builtin_fmaximum_numf16:
3565 case Builtin::BI__builtin_fmaximum_numf128:
3566 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3567
3568 case Builtin::BI__builtin_isnan:
3569 return interp__builtin_isnan(S, OpPC, Frame, Call);
3570
3571 case Builtin::BI__builtin_issignaling:
3572 return interp__builtin_issignaling(S, OpPC, Frame, Call);
3573
3574 case Builtin::BI__builtin_isinf:
3575 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
3576
3577 case Builtin::BI__builtin_isinf_sign:
3578 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
3579
3580 case Builtin::BI__builtin_isfinite:
3581 return interp__builtin_isfinite(S, OpPC, Frame, Call);
3582
3583 case Builtin::BI__builtin_isnormal:
3584 return interp__builtin_isnormal(S, OpPC, Frame, Call);
3585
3586 case Builtin::BI__builtin_issubnormal:
3587 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
3588
3589 case Builtin::BI__builtin_iszero:
3590 return interp__builtin_iszero(S, OpPC, Frame, Call);
3591
3592 case Builtin::BI__builtin_signbit:
3593 case Builtin::BI__builtin_signbitf:
3594 case Builtin::BI__builtin_signbitl:
3595 return interp__builtin_signbit(S, OpPC, Frame, Call);
3596
3597 case Builtin::BI__builtin_isgreater:
3598 case Builtin::BI__builtin_isgreaterequal:
3599 case Builtin::BI__builtin_isless:
3600 case Builtin::BI__builtin_islessequal:
3601 case Builtin::BI__builtin_islessgreater:
3602 case Builtin::BI__builtin_isunordered:
3603 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
3604
3605 case Builtin::BI__builtin_isfpclass:
3606 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
3607
3608 case Builtin::BI__builtin_fpclassify:
3609 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
3610
3611 case Builtin::BI__builtin_fabs:
3612 case Builtin::BI__builtin_fabsf:
3613 case Builtin::BI__builtin_fabsl:
3614 case Builtin::BI__builtin_fabsf128:
3615 return interp__builtin_fabs(S, OpPC, Frame);
3616
3617 case Builtin::BI__builtin_abs:
3618 case Builtin::BI__builtin_labs:
3619 case Builtin::BI__builtin_llabs:
3620 return interp__builtin_abs(S, OpPC, Frame, Call);
3621
3622 case Builtin::BI__builtin_popcount:
3623 case Builtin::BI__builtin_popcountl:
3624 case Builtin::BI__builtin_popcountll:
3625 case Builtin::BI__builtin_popcountg:
3626 case Builtin::BI__popcnt16: // Microsoft variants of popcount
3627 case Builtin::BI__popcnt:
3628 case Builtin::BI__popcnt64:
3629 return interp__builtin_popcount(S, OpPC, Frame, Call);
3630
3631 case Builtin::BI__builtin_parity:
3632 case Builtin::BI__builtin_parityl:
3633 case Builtin::BI__builtin_parityll:
3635 S, OpPC, Call, [](const APSInt &Val) {
3636 return APInt(Val.getBitWidth(), Val.popcount() % 2);
3637 });
3638 case Builtin::BI__builtin_clrsb:
3639 case Builtin::BI__builtin_clrsbl:
3640 case Builtin::BI__builtin_clrsbll:
3642 S, OpPC, Call, [](const APSInt &Val) {
3643 return APInt(Val.getBitWidth(),
3644 Val.getBitWidth() - Val.getSignificantBits());
3645 });
3646 case Builtin::BI__builtin_bitreverse8:
3647 case Builtin::BI__builtin_bitreverse16:
3648 case Builtin::BI__builtin_bitreverse32:
3649 case Builtin::BI__builtin_bitreverse64:
3651 S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
3652
3653 case Builtin::BI__builtin_classify_type:
3654 return interp__builtin_classify_type(S, OpPC, Frame, Call);
3655
3656 case Builtin::BI__builtin_expect:
3657 case Builtin::BI__builtin_expect_with_probability:
3658 return interp__builtin_expect(S, OpPC, Frame, Call);
3659
3660 case Builtin::BI__builtin_rotateleft8:
3661 case Builtin::BI__builtin_rotateleft16:
3662 case Builtin::BI__builtin_rotateleft32:
3663 case Builtin::BI__builtin_rotateleft64:
3664 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3665 case Builtin::BI_rotl16:
3666 case Builtin::BI_rotl:
3667 case Builtin::BI_lrotl:
3668 case Builtin::BI_rotl64:
3670 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
3671 return Value.rotl(Amount);
3672 });
3673
3674 case Builtin::BI__builtin_rotateright8:
3675 case Builtin::BI__builtin_rotateright16:
3676 case Builtin::BI__builtin_rotateright32:
3677 case Builtin::BI__builtin_rotateright64:
3678 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3679 case Builtin::BI_rotr16:
3680 case Builtin::BI_rotr:
3681 case Builtin::BI_lrotr:
3682 case Builtin::BI_rotr64:
3684 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
3685 return Value.rotr(Amount);
3686 });
3687
3688 case Builtin::BI__builtin_ffs:
3689 case Builtin::BI__builtin_ffsl:
3690 case Builtin::BI__builtin_ffsll:
3692 S, OpPC, Call, [](const APSInt &Val) {
3693 return APInt(Val.getBitWidth(),
3694 Val.isZero() ? 0u : Val.countTrailingZeros() + 1u);
3695 });
3696
3697 case Builtin::BIaddressof:
3698 case Builtin::BI__addressof:
3699 case Builtin::BI__builtin_addressof:
3700 assert(isNoopBuiltin(BuiltinID));
3701 return interp__builtin_addressof(S, OpPC, Frame, Call);
3702
3703 case Builtin::BIas_const:
3704 case Builtin::BIforward:
3705 case Builtin::BIforward_like:
3706 case Builtin::BImove:
3707 case Builtin::BImove_if_noexcept:
3708 assert(isNoopBuiltin(BuiltinID));
3709 return interp__builtin_move(S, OpPC, Frame, Call);
3710
3711 case Builtin::BI__builtin_eh_return_data_regno:
3713
3714 case Builtin::BI__builtin_launder:
3715 assert(isNoopBuiltin(BuiltinID));
3716 return true;
3717
3718 case Builtin::BI__builtin_add_overflow:
3719 case Builtin::BI__builtin_sub_overflow:
3720 case Builtin::BI__builtin_mul_overflow:
3721 case Builtin::BI__builtin_sadd_overflow:
3722 case Builtin::BI__builtin_uadd_overflow:
3723 case Builtin::BI__builtin_uaddl_overflow:
3724 case Builtin::BI__builtin_uaddll_overflow:
3725 case Builtin::BI__builtin_usub_overflow:
3726 case Builtin::BI__builtin_usubl_overflow:
3727 case Builtin::BI__builtin_usubll_overflow:
3728 case Builtin::BI__builtin_umul_overflow:
3729 case Builtin::BI__builtin_umull_overflow:
3730 case Builtin::BI__builtin_umulll_overflow:
3731 case Builtin::BI__builtin_saddl_overflow:
3732 case Builtin::BI__builtin_saddll_overflow:
3733 case Builtin::BI__builtin_ssub_overflow:
3734 case Builtin::BI__builtin_ssubl_overflow:
3735 case Builtin::BI__builtin_ssubll_overflow:
3736 case Builtin::BI__builtin_smul_overflow:
3737 case Builtin::BI__builtin_smull_overflow:
3738 case Builtin::BI__builtin_smulll_overflow:
3739 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
3740
3741 case Builtin::BI__builtin_addcb:
3742 case Builtin::BI__builtin_addcs:
3743 case Builtin::BI__builtin_addc:
3744 case Builtin::BI__builtin_addcl:
3745 case Builtin::BI__builtin_addcll:
3746 case Builtin::BI__builtin_subcb:
3747 case Builtin::BI__builtin_subcs:
3748 case Builtin::BI__builtin_subc:
3749 case Builtin::BI__builtin_subcl:
3750 case Builtin::BI__builtin_subcll:
3751 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
3752
3753 case Builtin::BI__builtin_clz:
3754 case Builtin::BI__builtin_clzl:
3755 case Builtin::BI__builtin_clzll:
3756 case Builtin::BI__builtin_clzs:
3757 case Builtin::BI__builtin_clzg:
3758 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
3759 case Builtin::BI__lzcnt:
3760 case Builtin::BI__lzcnt64:
3761 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
3762
3763 case Builtin::BI__builtin_ctz:
3764 case Builtin::BI__builtin_ctzl:
3765 case Builtin::BI__builtin_ctzll:
3766 case Builtin::BI__builtin_ctzs:
3767 case Builtin::BI__builtin_ctzg:
3768 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
3769
3770 case Builtin::BI__builtin_elementwise_clzg:
3771 case Builtin::BI__builtin_elementwise_ctzg:
3773 BuiltinID);
3774
3775 case Builtin::BI__builtin_bswap16:
3776 case Builtin::BI__builtin_bswap32:
3777 case Builtin::BI__builtin_bswap64:
3778 return interp__builtin_bswap(S, OpPC, Frame, Call);
3779
3780 case Builtin::BI__atomic_always_lock_free:
3781 case Builtin::BI__atomic_is_lock_free:
3782 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
3783
3784 case Builtin::BI__c11_atomic_is_lock_free:
3786
3787 case Builtin::BI__builtin_complex:
3788 return interp__builtin_complex(S, OpPC, Frame, Call);
3789
3790 case Builtin::BI__builtin_is_aligned:
3791 case Builtin::BI__builtin_align_up:
3792 case Builtin::BI__builtin_align_down:
3793 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
3794
3795 case Builtin::BI__builtin_assume_aligned:
3796 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
3797
3798 case clang::X86::BI__builtin_ia32_bextr_u32:
3799 case clang::X86::BI__builtin_ia32_bextr_u64:
3800 case clang::X86::BI__builtin_ia32_bextri_u32:
3801 case clang::X86::BI__builtin_ia32_bextri_u64:
3803 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
3804 unsigned BitWidth = Val.getBitWidth();
3805 uint64_t Shift = Idx.extractBitsAsZExtValue(8, 0);
3806 uint64_t Length = Idx.extractBitsAsZExtValue(8, 8);
3807 if (Length > BitWidth) {
3808 Length = BitWidth;
3809 }
3810
3811 // Handle out of bounds cases.
3812 if (Length == 0 || Shift >= BitWidth)
3813 return APInt(BitWidth, 0);
3814
3815 uint64_t Result = Val.getZExtValue() >> Shift;
3816 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
3817 return APInt(BitWidth, Result);
3818 });
3819
3820 case clang::X86::BI__builtin_ia32_bzhi_si:
3821 case clang::X86::BI__builtin_ia32_bzhi_di:
3823 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
3824 unsigned BitWidth = Val.getBitWidth();
3825 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
3826 APSInt Result = Val;
3827
3828 if (Index < BitWidth)
3829 Result.clearHighBits(BitWidth - Index);
3830
3831 return Result;
3832 });
3833
3834 case clang::X86::BI__builtin_ia32_lzcnt_u16:
3835 case clang::X86::BI__builtin_ia32_lzcnt_u32:
3836 case clang::X86::BI__builtin_ia32_lzcnt_u64:
3838 S, OpPC, Call, [](const APSInt &Src) {
3839 return APInt(Src.getBitWidth(), Src.countLeadingZeros());
3840 });
3841
3842 case clang::X86::BI__builtin_ia32_tzcnt_u16:
3843 case clang::X86::BI__builtin_ia32_tzcnt_u32:
3844 case clang::X86::BI__builtin_ia32_tzcnt_u64:
3846 S, OpPC, Call, [](const APSInt &Src) {
3847 return APInt(Src.getBitWidth(), Src.countTrailingZeros());
3848 });
3849
3850 case clang::X86::BI__builtin_ia32_pdep_si:
3851 case clang::X86::BI__builtin_ia32_pdep_di:
3853 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
3854 unsigned BitWidth = Val.getBitWidth();
3855 APInt Result = APInt::getZero(BitWidth);
3856
3857 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
3858 if (Mask[I])
3859 Result.setBitVal(I, Val[P++]);
3860 }
3861
3862 return Result;
3863 });
3864
3865 case clang::X86::BI__builtin_ia32_pext_si:
3866 case clang::X86::BI__builtin_ia32_pext_di:
3868 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
3869 unsigned BitWidth = Val.getBitWidth();
3870 APInt Result = APInt::getZero(BitWidth);
3871
3872 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
3873 if (Mask[I])
3874 Result.setBitVal(P++, Val[I]);
3875 }
3876
3877 return Result;
3878 });
3879
3880 case clang::X86::BI__builtin_ia32_addcarryx_u32:
3881 case clang::X86::BI__builtin_ia32_addcarryx_u64:
3882 case clang::X86::BI__builtin_ia32_subborrow_u32:
3883 case clang::X86::BI__builtin_ia32_subborrow_u64:
3885 BuiltinID);
3886
3887 case Builtin::BI__builtin_os_log_format_buffer_size:
3889
3890 case Builtin::BI__builtin_ptrauth_string_discriminator:
3892
3893 case Builtin::BI__builtin_infer_alloc_token:
3895
3896 case Builtin::BI__noop:
3897 pushInteger(S, 0, Call->getType());
3898 return true;
3899
3900 case Builtin::BI__builtin_operator_new:
3901 return interp__builtin_operator_new(S, OpPC, Frame, Call);
3902
3903 case Builtin::BI__builtin_operator_delete:
3904 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
3905
3906 case Builtin::BI__arithmetic_fence:
3908
3909 case Builtin::BI__builtin_reduce_add:
3910 case Builtin::BI__builtin_reduce_mul:
3911 case Builtin::BI__builtin_reduce_and:
3912 case Builtin::BI__builtin_reduce_or:
3913 case Builtin::BI__builtin_reduce_xor:
3914 case Builtin::BI__builtin_reduce_min:
3915 case Builtin::BI__builtin_reduce_max:
3916 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
3917
3918 case Builtin::BI__builtin_elementwise_popcount:
3919 case Builtin::BI__builtin_elementwise_bitreverse:
3921 BuiltinID);
3922
3923 case Builtin::BI__builtin_elementwise_abs:
3924 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
3925
3926 case Builtin::BI__builtin_memcpy:
3927 case Builtin::BImemcpy:
3928 case Builtin::BI__builtin_wmemcpy:
3929 case Builtin::BIwmemcpy:
3930 case Builtin::BI__builtin_memmove:
3931 case Builtin::BImemmove:
3932 case Builtin::BI__builtin_wmemmove:
3933 case Builtin::BIwmemmove:
3934 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
3935
3936 case Builtin::BI__builtin_memcmp:
3937 case Builtin::BImemcmp:
3938 case Builtin::BI__builtin_bcmp:
3939 case Builtin::BIbcmp:
3940 case Builtin::BI__builtin_wmemcmp:
3941 case Builtin::BIwmemcmp:
3942 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
3943
3944 case Builtin::BImemchr:
3945 case Builtin::BI__builtin_memchr:
3946 case Builtin::BIstrchr:
3947 case Builtin::BI__builtin_strchr:
3948 case Builtin::BIwmemchr:
3949 case Builtin::BI__builtin_wmemchr:
3950 case Builtin::BIwcschr:
3951 case Builtin::BI__builtin_wcschr:
3952 case Builtin::BI__builtin_char_memchr:
3953 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
3954
3955 case Builtin::BI__builtin_object_size:
3956 case Builtin::BI__builtin_dynamic_object_size:
3957 return interp__builtin_object_size(S, OpPC, Frame, Call);
3958
3959 case Builtin::BI__builtin_is_within_lifetime:
3961
3962 case Builtin::BI__builtin_elementwise_add_sat:
3964 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3965 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
3966 });
3967
3968 case Builtin::BI__builtin_elementwise_sub_sat:
3970 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3971 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
3972 });
3973 case X86::BI__builtin_ia32_extract128i256:
3974 case X86::BI__builtin_ia32_vextractf128_pd256:
3975 case X86::BI__builtin_ia32_vextractf128_ps256:
3976 case X86::BI__builtin_ia32_vextractf128_si256:
3977 return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
3978
3979 case X86::BI__builtin_ia32_extractf32x4_256_mask:
3980 case X86::BI__builtin_ia32_extractf32x4_mask:
3981 case X86::BI__builtin_ia32_extractf32x8_mask:
3982 case X86::BI__builtin_ia32_extractf64x2_256_mask:
3983 case X86::BI__builtin_ia32_extractf64x2_512_mask:
3984 case X86::BI__builtin_ia32_extractf64x4_mask:
3985 case X86::BI__builtin_ia32_extracti32x4_256_mask:
3986 case X86::BI__builtin_ia32_extracti32x4_mask:
3987 case X86::BI__builtin_ia32_extracti32x8_mask:
3988 case X86::BI__builtin_ia32_extracti64x2_256_mask:
3989 case X86::BI__builtin_ia32_extracti64x2_512_mask:
3990 case X86::BI__builtin_ia32_extracti64x4_mask:
3991 return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
3992
3993 case clang::X86::BI__builtin_ia32_pmulhrsw128:
3994 case clang::X86::BI__builtin_ia32_pmulhrsw256:
3995 case clang::X86::BI__builtin_ia32_pmulhrsw512:
3997 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3998 return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
3999 .extractBits(16, 1);
4000 });
4001
4002 case clang::X86::BI__builtin_ia32_movmskps:
4003 case clang::X86::BI__builtin_ia32_movmskpd:
4004 case clang::X86::BI__builtin_ia32_pmovmskb128:
4005 case clang::X86::BI__builtin_ia32_pmovmskb256:
4006 case clang::X86::BI__builtin_ia32_movmskps256:
4007 case clang::X86::BI__builtin_ia32_movmskpd256: {
4008 return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
4009 }
4010
4011 case X86::BI__builtin_ia32_psignb128:
4012 case X86::BI__builtin_ia32_psignb256:
4013 case X86::BI__builtin_ia32_psignw128:
4014 case X86::BI__builtin_ia32_psignw256:
4015 case X86::BI__builtin_ia32_psignd128:
4016 case X86::BI__builtin_ia32_psignd256:
4018 S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
4019 if (BElem.isZero())
4020 return APInt::getZero(AElem.getBitWidth());
4021 if (BElem.isNegative())
4022 return -AElem;
4023 return AElem;
4024 });
4025
4026 case clang::X86::BI__builtin_ia32_pavgb128:
4027 case clang::X86::BI__builtin_ia32_pavgw128:
4028 case clang::X86::BI__builtin_ia32_pavgb256:
4029 case clang::X86::BI__builtin_ia32_pavgw256:
4030 case clang::X86::BI__builtin_ia32_pavgb512:
4031 case clang::X86::BI__builtin_ia32_pavgw512:
4033 llvm::APIntOps::avgCeilU);
4034
4035 case clang::X86::BI__builtin_ia32_pmaddubsw128:
4036 case clang::X86::BI__builtin_ia32_pmaddubsw256:
4037 case clang::X86::BI__builtin_ia32_pmaddubsw512:
4039 S, OpPC, Call,
4040 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4041 const APSInt &HiRHS) {
4042 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4043 return (LoLHS.zext(BitWidth) * LoRHS.sext(BitWidth))
4044 .sadd_sat((HiLHS.zext(BitWidth) * HiRHS.sext(BitWidth)));
4045 });
4046
4047 case clang::X86::BI__builtin_ia32_pmaddwd128:
4048 case clang::X86::BI__builtin_ia32_pmaddwd256:
4049 case clang::X86::BI__builtin_ia32_pmaddwd512:
4051 S, OpPC, Call,
4052 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4053 const APSInt &HiRHS) {
4054 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4055 return (LoLHS.sext(BitWidth) * LoRHS.sext(BitWidth)) +
4056 (HiLHS.sext(BitWidth) * HiRHS.sext(BitWidth));
4057 });
4058
4059 case clang::X86::BI__builtin_ia32_pmulhuw128:
4060 case clang::X86::BI__builtin_ia32_pmulhuw256:
4061 case clang::X86::BI__builtin_ia32_pmulhuw512:
4063 llvm::APIntOps::mulhu);
4064
4065 case clang::X86::BI__builtin_ia32_pmulhw128:
4066 case clang::X86::BI__builtin_ia32_pmulhw256:
4067 case clang::X86::BI__builtin_ia32_pmulhw512:
4069 llvm::APIntOps::mulhs);
4070
4071 case clang::X86::BI__builtin_ia32_psllv2di:
4072 case clang::X86::BI__builtin_ia32_psllv4di:
4073 case clang::X86::BI__builtin_ia32_psllv4si:
4074 case clang::X86::BI__builtin_ia32_psllv8di:
4075 case clang::X86::BI__builtin_ia32_psllv8hi:
4076 case clang::X86::BI__builtin_ia32_psllv8si:
4077 case clang::X86::BI__builtin_ia32_psllv16hi:
4078 case clang::X86::BI__builtin_ia32_psllv16si:
4079 case clang::X86::BI__builtin_ia32_psllv32hi:
4080 case clang::X86::BI__builtin_ia32_psllwi128:
4081 case clang::X86::BI__builtin_ia32_psllwi256:
4082 case clang::X86::BI__builtin_ia32_psllwi512:
4083 case clang::X86::BI__builtin_ia32_pslldi128:
4084 case clang::X86::BI__builtin_ia32_pslldi256:
4085 case clang::X86::BI__builtin_ia32_pslldi512:
4086 case clang::X86::BI__builtin_ia32_psllqi128:
4087 case clang::X86::BI__builtin_ia32_psllqi256:
4088 case clang::X86::BI__builtin_ia32_psllqi512:
4090 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4091 if (RHS.uge(LHS.getBitWidth())) {
4092 return APInt::getZero(LHS.getBitWidth());
4093 }
4094 return LHS.shl(RHS.getZExtValue());
4095 });
4096
4097 case clang::X86::BI__builtin_ia32_psrav4si:
4098 case clang::X86::BI__builtin_ia32_psrav8di:
4099 case clang::X86::BI__builtin_ia32_psrav8hi:
4100 case clang::X86::BI__builtin_ia32_psrav8si:
4101 case clang::X86::BI__builtin_ia32_psrav16hi:
4102 case clang::X86::BI__builtin_ia32_psrav16si:
4103 case clang::X86::BI__builtin_ia32_psrav32hi:
4104 case clang::X86::BI__builtin_ia32_psravq128:
4105 case clang::X86::BI__builtin_ia32_psravq256:
4106 case clang::X86::BI__builtin_ia32_psrawi128:
4107 case clang::X86::BI__builtin_ia32_psrawi256:
4108 case clang::X86::BI__builtin_ia32_psrawi512:
4109 case clang::X86::BI__builtin_ia32_psradi128:
4110 case clang::X86::BI__builtin_ia32_psradi256:
4111 case clang::X86::BI__builtin_ia32_psradi512:
4112 case clang::X86::BI__builtin_ia32_psraqi128:
4113 case clang::X86::BI__builtin_ia32_psraqi256:
4114 case clang::X86::BI__builtin_ia32_psraqi512:
4116 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4117 if (RHS.uge(LHS.getBitWidth())) {
4118 return LHS.ashr(LHS.getBitWidth() - 1);
4119 }
4120 return LHS.ashr(RHS.getZExtValue());
4121 });
4122
4123 case clang::X86::BI__builtin_ia32_psrlv2di:
4124 case clang::X86::BI__builtin_ia32_psrlv4di:
4125 case clang::X86::BI__builtin_ia32_psrlv4si:
4126 case clang::X86::BI__builtin_ia32_psrlv8di:
4127 case clang::X86::BI__builtin_ia32_psrlv8hi:
4128 case clang::X86::BI__builtin_ia32_psrlv8si:
4129 case clang::X86::BI__builtin_ia32_psrlv16hi:
4130 case clang::X86::BI__builtin_ia32_psrlv16si:
4131 case clang::X86::BI__builtin_ia32_psrlv32hi:
4132 case clang::X86::BI__builtin_ia32_psrlwi128:
4133 case clang::X86::BI__builtin_ia32_psrlwi256:
4134 case clang::X86::BI__builtin_ia32_psrlwi512:
4135 case clang::X86::BI__builtin_ia32_psrldi128:
4136 case clang::X86::BI__builtin_ia32_psrldi256:
4137 case clang::X86::BI__builtin_ia32_psrldi512:
4138 case clang::X86::BI__builtin_ia32_psrlqi128:
4139 case clang::X86::BI__builtin_ia32_psrlqi256:
4140 case clang::X86::BI__builtin_ia32_psrlqi512:
4142 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4143 if (RHS.uge(LHS.getBitWidth())) {
4144 return APInt::getZero(LHS.getBitWidth());
4145 }
4146 return LHS.lshr(RHS.getZExtValue());
4147 });
4148 case clang::X86::BI__builtin_ia32_packsswb128:
4149 case clang::X86::BI__builtin_ia32_packsswb256:
4150 case clang::X86::BI__builtin_ia32_packsswb512:
4151 case clang::X86::BI__builtin_ia32_packssdw128:
4152 case clang::X86::BI__builtin_ia32_packssdw256:
4153 case clang::X86::BI__builtin_ia32_packssdw512:
4154 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4155 return APInt(Src).truncSSat(Src.getBitWidth() / 2);
4156 });
4157 case clang::X86::BI__builtin_ia32_packusdw128:
4158 case clang::X86::BI__builtin_ia32_packusdw256:
4159 case clang::X86::BI__builtin_ia32_packusdw512:
4160 case clang::X86::BI__builtin_ia32_packuswb128:
4161 case clang::X86::BI__builtin_ia32_packuswb256:
4162 case clang::X86::BI__builtin_ia32_packuswb512:
4163 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4164 unsigned DstBits = Src.getBitWidth() / 2;
4165 if (Src.isNegative())
4166 return APInt::getZero(DstBits);
4167 if (Src.isIntN(DstBits))
4168 return APInt(Src).trunc(DstBits);
4169 return APInt::getAllOnes(DstBits);
4170 });
4171
4172 case clang::X86::BI__builtin_ia32_vprotbi:
4173 case clang::X86::BI__builtin_ia32_vprotdi:
4174 case clang::X86::BI__builtin_ia32_vprotqi:
4175 case clang::X86::BI__builtin_ia32_vprotwi:
4176 case clang::X86::BI__builtin_ia32_prold128:
4177 case clang::X86::BI__builtin_ia32_prold256:
4178 case clang::X86::BI__builtin_ia32_prold512:
4179 case clang::X86::BI__builtin_ia32_prolq128:
4180 case clang::X86::BI__builtin_ia32_prolq256:
4181 case clang::X86::BI__builtin_ia32_prolq512:
4183 S, OpPC, Call,
4184 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
4185
4186 case clang::X86::BI__builtin_ia32_prord128:
4187 case clang::X86::BI__builtin_ia32_prord256:
4188 case clang::X86::BI__builtin_ia32_prord512:
4189 case clang::X86::BI__builtin_ia32_prorq128:
4190 case clang::X86::BI__builtin_ia32_prorq256:
4191 case clang::X86::BI__builtin_ia32_prorq512:
4193 S, OpPC, Call,
4194 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
4195
4196 case Builtin::BI__builtin_elementwise_max:
4197 case Builtin::BI__builtin_elementwise_min:
4198 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
4199
4200 case clang::X86::BI__builtin_ia32_phaddw128:
4201 case clang::X86::BI__builtin_ia32_phaddw256:
4202 case clang::X86::BI__builtin_ia32_phaddd128:
4203 case clang::X86::BI__builtin_ia32_phaddd256:
4205 S, OpPC, Call,
4206 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
4207 case clang::X86::BI__builtin_ia32_phaddsw128:
4208 case clang::X86::BI__builtin_ia32_phaddsw256:
4210 S, OpPC, Call,
4211 [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); });
4212 case clang::X86::BI__builtin_ia32_phsubw128:
4213 case clang::X86::BI__builtin_ia32_phsubw256:
4214 case clang::X86::BI__builtin_ia32_phsubd128:
4215 case clang::X86::BI__builtin_ia32_phsubd256:
4217 S, OpPC, Call,
4218 [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; });
4219 case clang::X86::BI__builtin_ia32_phsubsw128:
4220 case clang::X86::BI__builtin_ia32_phsubsw256:
4222 S, OpPC, Call,
4223 [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); });
4224 case clang::X86::BI__builtin_ia32_haddpd:
4225 case clang::X86::BI__builtin_ia32_haddps:
4226 case clang::X86::BI__builtin_ia32_haddpd256:
4227 case clang::X86::BI__builtin_ia32_haddps256:
4229 S, OpPC, Call,
4230 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4231 APFloat F = LHS;
4232 F.add(RHS, RM);
4233 return F;
4234 });
4235 case clang::X86::BI__builtin_ia32_hsubpd:
4236 case clang::X86::BI__builtin_ia32_hsubps:
4237 case clang::X86::BI__builtin_ia32_hsubpd256:
4238 case clang::X86::BI__builtin_ia32_hsubps256:
4240 S, OpPC, Call,
4241 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4242 APFloat F = LHS;
4243 F.subtract(RHS, RM);
4244 return F;
4245 });
4246
4247 case clang::X86::BI__builtin_ia32_pmuldq128:
4248 case clang::X86::BI__builtin_ia32_pmuldq256:
4249 case clang::X86::BI__builtin_ia32_pmuldq512:
4251 S, OpPC, Call,
4252 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4253 const APSInt &HiRHS) {
4254 return llvm::APIntOps::mulsExtended(LoLHS, LoRHS);
4255 });
4256
4257 case clang::X86::BI__builtin_ia32_pmuludq128:
4258 case clang::X86::BI__builtin_ia32_pmuludq256:
4259 case clang::X86::BI__builtin_ia32_pmuludq512:
4261 S, OpPC, Call,
4262 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4263 const APSInt &HiRHS) {
4264 return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
4265 });
4266
4267 case Builtin::BI__builtin_elementwise_fma:
4269 S, OpPC, Call,
4270 [](const APFloat &X, const APFloat &Y, const APFloat &Z,
4271 llvm::RoundingMode RM) {
4272 APFloat F = X;
4273 F.fusedMultiplyAdd(Y, Z, RM);
4274 return F;
4275 });
4276
4277 case X86::BI__builtin_ia32_vpmadd52luq128:
4278 case X86::BI__builtin_ia32_vpmadd52luq256:
4279 case X86::BI__builtin_ia32_vpmadd52luq512:
4281 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4282 return A + (B.trunc(52) * C.trunc(52)).zext(64);
4283 });
4284 case X86::BI__builtin_ia32_vpmadd52huq128:
4285 case X86::BI__builtin_ia32_vpmadd52huq256:
4286 case X86::BI__builtin_ia32_vpmadd52huq512:
4288 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4289 return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64);
4290 });
4291
4292 case X86::BI__builtin_ia32_vpshldd128:
4293 case X86::BI__builtin_ia32_vpshldd256:
4294 case X86::BI__builtin_ia32_vpshldd512:
4295 case X86::BI__builtin_ia32_vpshldq128:
4296 case X86::BI__builtin_ia32_vpshldq256:
4297 case X86::BI__builtin_ia32_vpshldq512:
4298 case X86::BI__builtin_ia32_vpshldw128:
4299 case X86::BI__builtin_ia32_vpshldw256:
4300 case X86::BI__builtin_ia32_vpshldw512:
4302 S, OpPC, Call,
4303 [](const APSInt &Hi, const APSInt &Lo, const APSInt &Amt) {
4304 return llvm::APIntOps::fshl(Hi, Lo, Amt);
4305 });
4306
4307 case X86::BI__builtin_ia32_vpshrdd128:
4308 case X86::BI__builtin_ia32_vpshrdd256:
4309 case X86::BI__builtin_ia32_vpshrdd512:
4310 case X86::BI__builtin_ia32_vpshrdq128:
4311 case X86::BI__builtin_ia32_vpshrdq256:
4312 case X86::BI__builtin_ia32_vpshrdq512:
4313 case X86::BI__builtin_ia32_vpshrdw128:
4314 case X86::BI__builtin_ia32_vpshrdw256:
4315 case X86::BI__builtin_ia32_vpshrdw512:
4316 // NOTE: Reversed Hi/Lo operands.
4318 S, OpPC, Call,
4319 [](const APSInt &Lo, const APSInt &Hi, const APSInt &Amt) {
4320 return llvm::APIntOps::fshr(Hi, Lo, Amt);
4321 });
4322 case X86::BI__builtin_ia32_vpconflictsi_128:
4323 case X86::BI__builtin_ia32_vpconflictsi_256:
4324 case X86::BI__builtin_ia32_vpconflictsi_512:
4325 case X86::BI__builtin_ia32_vpconflictdi_128:
4326 case X86::BI__builtin_ia32_vpconflictdi_256:
4327 case X86::BI__builtin_ia32_vpconflictdi_512:
4328 return interp__builtin_ia32_vpconflict(S, OpPC, Call);
4329 case clang::X86::BI__builtin_ia32_blendpd:
4330 case clang::X86::BI__builtin_ia32_blendpd256:
4331 case clang::X86::BI__builtin_ia32_blendps:
4332 case clang::X86::BI__builtin_ia32_blendps256:
4333 case clang::X86::BI__builtin_ia32_pblendw128:
4334 case clang::X86::BI__builtin_ia32_pblendw256:
4335 case clang::X86::BI__builtin_ia32_pblendd128:
4336 case clang::X86::BI__builtin_ia32_pblendd256:
4337 return interp__builtin_blend(S, OpPC, Call);
4338
4339 case clang::X86::BI__builtin_ia32_blendvpd:
4340 case clang::X86::BI__builtin_ia32_blendvpd256:
4341 case clang::X86::BI__builtin_ia32_blendvps:
4342 case clang::X86::BI__builtin_ia32_blendvps256:
4344 S, OpPC, Call,
4345 [](const APFloat &F, const APFloat &T, const APFloat &C,
4346 llvm::RoundingMode) { return C.isNegative() ? T : F; });
4347
4348 case clang::X86::BI__builtin_ia32_pblendvb128:
4349 case clang::X86::BI__builtin_ia32_pblendvb256:
4351 S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
4352 return ((APInt)C).isNegative() ? T : F;
4353 });
4354 case X86::BI__builtin_ia32_ptestz128:
4355 case X86::BI__builtin_ia32_ptestz256:
4356 case X86::BI__builtin_ia32_vtestzps:
4357 case X86::BI__builtin_ia32_vtestzps256:
4358 case X86::BI__builtin_ia32_vtestzpd:
4359 case X86::BI__builtin_ia32_vtestzpd256:
4361 S, OpPC, Call,
4362 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
4363 case X86::BI__builtin_ia32_ptestc128:
4364 case X86::BI__builtin_ia32_ptestc256:
4365 case X86::BI__builtin_ia32_vtestcps:
4366 case X86::BI__builtin_ia32_vtestcps256:
4367 case X86::BI__builtin_ia32_vtestcpd:
4368 case X86::BI__builtin_ia32_vtestcpd256:
4370 S, OpPC, Call,
4371 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
4372 case X86::BI__builtin_ia32_ptestnzc128:
4373 case X86::BI__builtin_ia32_ptestnzc256:
4374 case X86::BI__builtin_ia32_vtestnzcps:
4375 case X86::BI__builtin_ia32_vtestnzcps256:
4376 case X86::BI__builtin_ia32_vtestnzcpd:
4377 case X86::BI__builtin_ia32_vtestnzcpd256:
4379 S, OpPC, Call, [](const APInt &A, const APInt &B) {
4380 return ((A & B) != 0) && ((~A & B) != 0);
4381 });
4382 case X86::BI__builtin_ia32_selectb_128:
4383 case X86::BI__builtin_ia32_selectb_256:
4384 case X86::BI__builtin_ia32_selectb_512:
4385 case X86::BI__builtin_ia32_selectw_128:
4386 case X86::BI__builtin_ia32_selectw_256:
4387 case X86::BI__builtin_ia32_selectw_512:
4388 case X86::BI__builtin_ia32_selectd_128:
4389 case X86::BI__builtin_ia32_selectd_256:
4390 case X86::BI__builtin_ia32_selectd_512:
4391 case X86::BI__builtin_ia32_selectq_128:
4392 case X86::BI__builtin_ia32_selectq_256:
4393 case X86::BI__builtin_ia32_selectq_512:
4394 case X86::BI__builtin_ia32_selectph_128:
4395 case X86::BI__builtin_ia32_selectph_256:
4396 case X86::BI__builtin_ia32_selectph_512:
4397 case X86::BI__builtin_ia32_selectpbf_128:
4398 case X86::BI__builtin_ia32_selectpbf_256:
4399 case X86::BI__builtin_ia32_selectpbf_512:
4400 case X86::BI__builtin_ia32_selectps_128:
4401 case X86::BI__builtin_ia32_selectps_256:
4402 case X86::BI__builtin_ia32_selectps_512:
4403 case X86::BI__builtin_ia32_selectpd_128:
4404 case X86::BI__builtin_ia32_selectpd_256:
4405 case X86::BI__builtin_ia32_selectpd_512:
4406 return interp__builtin_select(S, OpPC, Call);
4407
4408 case X86::BI__builtin_ia32_shufps:
4409 case X86::BI__builtin_ia32_shufps256:
4410 case X86::BI__builtin_ia32_shufps512:
4412 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4413 unsigned NumElemPerLane = 4;
4414 unsigned NumSelectableElems = NumElemPerLane / 2;
4415 unsigned BitsPerElem = 2;
4416 unsigned IndexMask = 0x3;
4417 unsigned MaskBits = 8;
4418 unsigned Lane = DstIdx / NumElemPerLane;
4419 unsigned ElemInLane = DstIdx % NumElemPerLane;
4420 unsigned LaneOffset = Lane * NumElemPerLane;
4421 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
4422 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4423 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
4424 return std::pair<unsigned, int>{SrcIdx,
4425 static_cast<int>(LaneOffset + Index)};
4426 });
4427 case X86::BI__builtin_ia32_shufpd:
4428 case X86::BI__builtin_ia32_shufpd256:
4429 case X86::BI__builtin_ia32_shufpd512:
4431 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4432 unsigned NumElemPerLane = 2;
4433 unsigned NumSelectableElems = NumElemPerLane / 2;
4434 unsigned BitsPerElem = 1;
4435 unsigned IndexMask = 0x1;
4436 unsigned MaskBits = 8;
4437 unsigned Lane = DstIdx / NumElemPerLane;
4438 unsigned ElemInLane = DstIdx % NumElemPerLane;
4439 unsigned LaneOffset = Lane * NumElemPerLane;
4440 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
4441 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4442 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
4443 return std::pair<unsigned, int>{SrcIdx,
4444 static_cast<int>(LaneOffset + Index)};
4445 });
4446 case X86::BI__builtin_ia32_insertps128:
4448 S, OpPC, Call, [](unsigned DstIdx, unsigned Mask) {
4449 // Bits [3:0]: zero mask - if bit is set, zero this element
4450 if ((Mask & (1 << DstIdx)) != 0) {
4451 return std::pair<unsigned, int>{0, -1};
4452 }
4453 // Bits [7:6]: select element from source vector Y (0-3)
4454 // Bits [5:4]: select destination position (0-3)
4455 unsigned SrcElem = (Mask >> 6) & 0x3;
4456 unsigned DstElem = (Mask >> 4) & 0x3;
4457 if (DstIdx == DstElem) {
4458 // Insert element from source vector (B) at this position
4459 return std::pair<unsigned, int>{1, static_cast<int>(SrcElem)};
4460 } else {
4461 // Copy from destination vector (A)
4462 return std::pair<unsigned, int>{0, static_cast<int>(DstIdx)};
4463 }
4464 });
4465 case X86::BI__builtin_ia32_vpermi2varq128:
4466 case X86::BI__builtin_ia32_vpermi2varpd128:
4468 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4469 int Offset = ShuffleMask & 0x1;
4470 unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
4471 return std::pair<unsigned, int>{SrcIdx, Offset};
4472 });
4473 case X86::BI__builtin_ia32_vpermi2vard128:
4474 case X86::BI__builtin_ia32_vpermi2varps128:
4475 case X86::BI__builtin_ia32_vpermi2varq256:
4476 case X86::BI__builtin_ia32_vpermi2varpd256:
4478 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4479 int Offset = ShuffleMask & 0x3;
4480 unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
4481 return std::pair<unsigned, int>{SrcIdx, Offset};
4482 });
4483 case X86::BI__builtin_ia32_vpermi2varhi128:
4484 case X86::BI__builtin_ia32_vpermi2vard256:
4485 case X86::BI__builtin_ia32_vpermi2varps256:
4486 case X86::BI__builtin_ia32_vpermi2varq512:
4487 case X86::BI__builtin_ia32_vpermi2varpd512:
4489 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4490 int Offset = ShuffleMask & 0x7;
4491 unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
4492 return std::pair<unsigned, int>{SrcIdx, Offset};
4493 });
4494 case X86::BI__builtin_ia32_vpermi2varqi128:
4495 case X86::BI__builtin_ia32_vpermi2varhi256:
4496 case X86::BI__builtin_ia32_vpermi2vard512:
4497 case X86::BI__builtin_ia32_vpermi2varps512:
4499 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4500 int Offset = ShuffleMask & 0xF;
4501 unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
4502 return std::pair<unsigned, int>{SrcIdx, Offset};
4503 });
4504 case X86::BI__builtin_ia32_vpermi2varqi256:
4505 case X86::BI__builtin_ia32_vpermi2varhi512:
4507 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4508 int Offset = ShuffleMask & 0x1F;
4509 unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
4510 return std::pair<unsigned, int>{SrcIdx, Offset};
4511 });
4512 case X86::BI__builtin_ia32_vpermi2varqi512:
4514 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4515 int Offset = ShuffleMask & 0x3F;
4516 unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
4517 return std::pair<unsigned, int>{SrcIdx, Offset};
4518 });
4519 case X86::BI__builtin_ia32_pshufb128:
4520 case X86::BI__builtin_ia32_pshufb256:
4521 case X86::BI__builtin_ia32_pshufb512:
4522 return interp__builtin_ia32_pshufb(S, OpPC, Call);
4523
4524 case X86::BI__builtin_ia32_pshuflw:
4525 case X86::BI__builtin_ia32_pshuflw256:
4526 case X86::BI__builtin_ia32_pshuflw512:
4527 return interp__builtin_ia32_pshuf(S, OpPC, Call, false);
4528
4529 case X86::BI__builtin_ia32_pshufhw:
4530 case X86::BI__builtin_ia32_pshufhw256:
4531 case X86::BI__builtin_ia32_pshufhw512:
4532 return interp__builtin_ia32_pshuf(S, OpPC, Call, true);
4533
4534 case X86::BI__builtin_ia32_pshufd:
4535 case X86::BI__builtin_ia32_pshufd256:
4536 case X86::BI__builtin_ia32_pshufd512:
4537 return interp__builtin_ia32_pshuf(S, OpPC, Call, false);
4538
4539 case X86::BI__builtin_ia32_kandqi:
4540 case X86::BI__builtin_ia32_kandhi:
4541 case X86::BI__builtin_ia32_kandsi:
4542 case X86::BI__builtin_ia32_kanddi:
4544 S, OpPC, Call,
4545 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
4546
4547 case X86::BI__builtin_ia32_kandnqi:
4548 case X86::BI__builtin_ia32_kandnhi:
4549 case X86::BI__builtin_ia32_kandnsi:
4550 case X86::BI__builtin_ia32_kandndi:
4552 S, OpPC, Call,
4553 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
4554
4555 case X86::BI__builtin_ia32_korqi:
4556 case X86::BI__builtin_ia32_korhi:
4557 case X86::BI__builtin_ia32_korsi:
4558 case X86::BI__builtin_ia32_kordi:
4560 S, OpPC, Call,
4561 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
4562
4563 case X86::BI__builtin_ia32_kxnorqi:
4564 case X86::BI__builtin_ia32_kxnorhi:
4565 case X86::BI__builtin_ia32_kxnorsi:
4566 case X86::BI__builtin_ia32_kxnordi:
4568 S, OpPC, Call,
4569 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
4570
4571 case X86::BI__builtin_ia32_kxorqi:
4572 case X86::BI__builtin_ia32_kxorhi:
4573 case X86::BI__builtin_ia32_kxorsi:
4574 case X86::BI__builtin_ia32_kxordi:
4576 S, OpPC, Call,
4577 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
4578
4579 case X86::BI__builtin_ia32_knotqi:
4580 case X86::BI__builtin_ia32_knothi:
4581 case X86::BI__builtin_ia32_knotsi:
4582 case X86::BI__builtin_ia32_knotdi:
4584 S, OpPC, Call, [](const APSInt &Src) { return ~Src; });
4585
4586 case X86::BI__builtin_ia32_kaddqi:
4587 case X86::BI__builtin_ia32_kaddhi:
4588 case X86::BI__builtin_ia32_kaddsi:
4589 case X86::BI__builtin_ia32_kadddi:
4591 S, OpPC, Call,
4592 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
4593
4594 case X86::BI__builtin_ia32_phminposuw128:
4595 return interp__builtin_ia32_phminposuw(S, OpPC, Call);
4596
4597 case X86::BI__builtin_ia32_pternlogd128_mask:
4598 case X86::BI__builtin_ia32_pternlogd256_mask:
4599 case X86::BI__builtin_ia32_pternlogd512_mask:
4600 case X86::BI__builtin_ia32_pternlogq128_mask:
4601 case X86::BI__builtin_ia32_pternlogq256_mask:
4602 case X86::BI__builtin_ia32_pternlogq512_mask:
4603 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/false);
4604 case X86::BI__builtin_ia32_pternlogd128_maskz:
4605 case X86::BI__builtin_ia32_pternlogd256_maskz:
4606 case X86::BI__builtin_ia32_pternlogd512_maskz:
4607 case X86::BI__builtin_ia32_pternlogq128_maskz:
4608 case X86::BI__builtin_ia32_pternlogq256_maskz:
4609 case X86::BI__builtin_ia32_pternlogq512_maskz:
4610 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/true);
4611 case Builtin::BI__builtin_elementwise_fshl:
4613 llvm::APIntOps::fshl);
4614 case Builtin::BI__builtin_elementwise_fshr:
4616 llvm::APIntOps::fshr);
4617
4618 case X86::BI__builtin_ia32_insertf32x4_256:
4619 case X86::BI__builtin_ia32_inserti32x4_256:
4620 case X86::BI__builtin_ia32_insertf64x2_256:
4621 case X86::BI__builtin_ia32_inserti64x2_256:
4622 case X86::BI__builtin_ia32_insertf32x4:
4623 case X86::BI__builtin_ia32_inserti32x4:
4624 case X86::BI__builtin_ia32_insertf64x2_512:
4625 case X86::BI__builtin_ia32_inserti64x2_512:
4626 case X86::BI__builtin_ia32_insertf32x8:
4627 case X86::BI__builtin_ia32_inserti32x8:
4628 case X86::BI__builtin_ia32_insertf64x4:
4629 case X86::BI__builtin_ia32_inserti64x4:
4630 case X86::BI__builtin_ia32_vinsertf128_ps256:
4631 case X86::BI__builtin_ia32_vinsertf128_pd256:
4632 case X86::BI__builtin_ia32_vinsertf128_si256:
4633 case X86::BI__builtin_ia32_insert128i256:
4634 return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
4635
4636 case X86::BI__builtin_ia32_vec_ext_v4hi:
4637 case X86::BI__builtin_ia32_vec_ext_v16qi:
4638 case X86::BI__builtin_ia32_vec_ext_v8hi:
4639 case X86::BI__builtin_ia32_vec_ext_v4si:
4640 case X86::BI__builtin_ia32_vec_ext_v2di:
4641 case X86::BI__builtin_ia32_vec_ext_v32qi:
4642 case X86::BI__builtin_ia32_vec_ext_v16hi:
4643 case X86::BI__builtin_ia32_vec_ext_v8si:
4644 case X86::BI__builtin_ia32_vec_ext_v4di:
4645 case X86::BI__builtin_ia32_vec_ext_v4sf:
4646 return interp__builtin_vec_ext(S, OpPC, Call, BuiltinID);
4647
4648 case X86::BI__builtin_ia32_vec_set_v4hi:
4649 case X86::BI__builtin_ia32_vec_set_v16qi:
4650 case X86::BI__builtin_ia32_vec_set_v8hi:
4651 case X86::BI__builtin_ia32_vec_set_v4si:
4652 case X86::BI__builtin_ia32_vec_set_v2di:
4653 case X86::BI__builtin_ia32_vec_set_v32qi:
4654 case X86::BI__builtin_ia32_vec_set_v16hi:
4655 case X86::BI__builtin_ia32_vec_set_v8si:
4656 case X86::BI__builtin_ia32_vec_set_v4di:
4657 return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
4658
4659 case X86::BI__builtin_ia32_cmpb128_mask:
4660 case X86::BI__builtin_ia32_cmpw128_mask:
4661 case X86::BI__builtin_ia32_cmpd128_mask:
4662 case X86::BI__builtin_ia32_cmpq128_mask:
4663 case X86::BI__builtin_ia32_cmpb256_mask:
4664 case X86::BI__builtin_ia32_cmpw256_mask:
4665 case X86::BI__builtin_ia32_cmpd256_mask:
4666 case X86::BI__builtin_ia32_cmpq256_mask:
4667 case X86::BI__builtin_ia32_cmpb512_mask:
4668 case X86::BI__builtin_ia32_cmpw512_mask:
4669 case X86::BI__builtin_ia32_cmpd512_mask:
4670 case X86::BI__builtin_ia32_cmpq512_mask:
4671 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
4672 /*IsUnsigned=*/false);
4673
4674 case X86::BI__builtin_ia32_ucmpb128_mask:
4675 case X86::BI__builtin_ia32_ucmpw128_mask:
4676 case X86::BI__builtin_ia32_ucmpd128_mask:
4677 case X86::BI__builtin_ia32_ucmpq128_mask:
4678 case X86::BI__builtin_ia32_ucmpb256_mask:
4679 case X86::BI__builtin_ia32_ucmpw256_mask:
4680 case X86::BI__builtin_ia32_ucmpd256_mask:
4681 case X86::BI__builtin_ia32_ucmpq256_mask:
4682 case X86::BI__builtin_ia32_ucmpb512_mask:
4683 case X86::BI__builtin_ia32_ucmpw512_mask:
4684 case X86::BI__builtin_ia32_ucmpd512_mask:
4685 case X86::BI__builtin_ia32_ucmpq512_mask:
4686 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
4687 /*IsUnsigned=*/true);
4688 case X86::BI__builtin_ia32_pslldqi128_byteshift:
4689 case X86::BI__builtin_ia32_pslldqi256_byteshift:
4690 case X86::BI__builtin_ia32_pslldqi512_byteshift:
4691 // These SLLDQ intrinsics always operate on byte elements (8 bits).
4692 // The lane width is hardcoded to 16 to match the SIMD register size,
4693 // but the algorithm processes one byte per iteration,
4694 // so APInt(8, ...) is correct and intentional.
4696 S, OpPC, Call, BuiltinID,
4697 [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
4698 if (I < Shift) {
4699 return APInt(8, 0);
4700 }
4701 return APInt(8, Src.elem<uint8_t>(Lane + I - Shift));
4702 });
4703
4704 case X86::BI__builtin_ia32_psrldqi128_byteshift:
4705 case X86::BI__builtin_ia32_psrldqi256_byteshift:
4706 case X86::BI__builtin_ia32_psrldqi512_byteshift:
4707 // These SRLDQ intrinsics always operate on byte elements (8 bits).
4708 // The lane width is hardcoded to 16 to match the SIMD register size,
4709 // but the algorithm processes one byte per iteration,
4710 // so APInt(8, ...) is correct and intentional.
4712 S, OpPC, Call, BuiltinID,
4713 [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
4714 if (I + Shift < 16) {
4715 return APInt(8, Src.elem<uint8_t>(Lane + I + Shift));
4716 }
4717
4718 return APInt(8, 0);
4719 });
4720
4721 default:
4722 S.FFDiag(S.Current->getLocation(OpPC),
4723 diag::note_invalid_subexpr_in_const_expr)
4724 << S.Current->getRange(OpPC);
4725
4726 return false;
4727 }
4728
4729 llvm_unreachable("Unhandled builtin ID");
4730}
4731
4733 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
4735 unsigned N = E->getNumComponents();
4736 assert(N > 0);
4737
4738 unsigned ArrayIndex = 0;
4739 QualType CurrentType = E->getTypeSourceInfo()->getType();
4740 for (unsigned I = 0; I != N; ++I) {
4741 const OffsetOfNode &Node = E->getComponent(I);
4742 switch (Node.getKind()) {
4743 case OffsetOfNode::Field: {
4744 const FieldDecl *MemberDecl = Node.getField();
4745 const auto *RD = CurrentType->getAsRecordDecl();
4746 if (!RD || RD->isInvalidDecl())
4747 return false;
4749 unsigned FieldIndex = MemberDecl->getFieldIndex();
4750 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
4751 Result +=
4753 CurrentType = MemberDecl->getType().getNonReferenceType();
4754 break;
4755 }
4756 case OffsetOfNode::Array: {
4757 // When generating bytecode, we put all the index expressions as Sint64 on
4758 // the stack.
4759 int64_t Index = ArrayIndices[ArrayIndex];
4760 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
4761 if (!AT)
4762 return false;
4763 CurrentType = AT->getElementType();
4764 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
4765 Result += Index * ElementSize;
4766 ++ArrayIndex;
4767 break;
4768 }
4769 case OffsetOfNode::Base: {
4770 const CXXBaseSpecifier *BaseSpec = Node.getBase();
4771 if (BaseSpec->isVirtual())
4772 return false;
4773
4774 // Find the layout of the class whose base we are looking into.
4775 const auto *RD = CurrentType->getAsCXXRecordDecl();
4776 if (!RD || RD->isInvalidDecl())
4777 return false;
4779
4780 // Find the base class itself.
4781 CurrentType = BaseSpec->getType();
4782 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
4783 if (!BaseRD)
4784 return false;
4785
4786 // Add the offset to the base.
4787 Result += RL.getBaseClassOffset(BaseRD);
4788 break;
4789 }
4791 llvm_unreachable("Dependent OffsetOfExpr?");
4792 }
4793 }
4794
4795 IntResult = Result.getQuantity();
4796
4797 return true;
4798}
4799
4801 const Pointer &Ptr, const APSInt &IntValue) {
4802
4803 const Record *R = Ptr.getRecord();
4804 assert(R);
4805 assert(R->getNumFields() == 1);
4806
4807 unsigned FieldOffset = R->getField(0u)->Offset;
4808 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
4809 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
4810
4811 INT_TYPE_SWITCH(FieldT,
4812 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
4813 FieldPtr.initialize();
4814 return true;
4815}
4816
4817static void zeroAll(Pointer &Dest) {
4818 const Descriptor *Desc = Dest.getFieldDesc();
4819
4820 if (Desc->isPrimitive()) {
4821 TYPE_SWITCH(Desc->getPrimType(), {
4822 Dest.deref<T>().~T();
4823 new (&Dest.deref<T>()) T();
4824 });
4825 return;
4826 }
4827
4828 if (Desc->isRecord()) {
4829 const Record *R = Desc->ElemRecord;
4830 for (const Record::Field &F : R->fields()) {
4831 Pointer FieldPtr = Dest.atField(F.Offset);
4832 zeroAll(FieldPtr);
4833 }
4834 return;
4835 }
4836
4837 if (Desc->isPrimitiveArray()) {
4838 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
4839 TYPE_SWITCH(Desc->getPrimType(), {
4840 Dest.deref<T>().~T();
4841 new (&Dest.deref<T>()) T();
4842 });
4843 }
4844 return;
4845 }
4846
4847 if (Desc->isCompositeArray()) {
4848 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
4849 Pointer ElemPtr = Dest.atIndex(I).narrow();
4850 zeroAll(ElemPtr);
4851 }
4852 return;
4853 }
4854}
4855
4856static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
4857 Pointer &Dest, bool Activate);
4858static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
4859 Pointer &Dest, bool Activate = false) {
4860 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
4861 const Descriptor *DestDesc = Dest.getFieldDesc();
4862
4863 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
4864 Pointer DestField = Dest.atField(F.Offset);
4865 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
4866 TYPE_SWITCH(*FT, {
4867 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
4868 if (Src.atField(F.Offset).isInitialized())
4869 DestField.initialize();
4870 if (Activate)
4871 DestField.activate();
4872 });
4873 return true;
4874 }
4875 // Composite field.
4876 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
4877 };
4878
4879 assert(SrcDesc->isRecord());
4880 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
4881 const Record *R = DestDesc->ElemRecord;
4882 for (const Record::Field &F : R->fields()) {
4883 if (R->isUnion()) {
4884 // For unions, only copy the active field. Zero all others.
4885 const Pointer &SrcField = Src.atField(F.Offset);
4886 if (SrcField.isActive()) {
4887 if (!copyField(F, /*Activate=*/true))
4888 return false;
4889 } else {
4890 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
4891 return false;
4892 Pointer DestField = Dest.atField(F.Offset);
4893 zeroAll(DestField);
4894 }
4895 } else {
4896 if (!copyField(F, Activate))
4897 return false;
4898 }
4899 }
4900
4901 for (const Record::Base &B : R->bases()) {
4902 Pointer DestBase = Dest.atField(B.Offset);
4903 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
4904 return false;
4905 }
4906
4907 Dest.initialize();
4908 return true;
4909}
4910
4911static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
4912 Pointer &Dest, bool Activate = false) {
4913 assert(Src.isLive() && Dest.isLive());
4914
4915 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
4916 const Descriptor *DestDesc = Dest.getFieldDesc();
4917
4918 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
4919
4920 if (DestDesc->isPrimitiveArray()) {
4921 assert(SrcDesc->isPrimitiveArray());
4922 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
4923 PrimType ET = DestDesc->getPrimType();
4924 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
4925 Pointer DestElem = Dest.atIndex(I);
4926 TYPE_SWITCH(ET, {
4927 DestElem.deref<T>() = Src.elem<T>(I);
4928 DestElem.initialize();
4929 });
4930 }
4931 return true;
4932 }
4933
4934 if (DestDesc->isCompositeArray()) {
4935 assert(SrcDesc->isCompositeArray());
4936 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
4937 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
4938 const Pointer &SrcElem = Src.atIndex(I).narrow();
4939 Pointer DestElem = Dest.atIndex(I).narrow();
4940 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
4941 return false;
4942 }
4943 return true;
4944 }
4945
4946 if (DestDesc->isRecord())
4947 return copyRecord(S, OpPC, Src, Dest, Activate);
4948 return Invalid(S, OpPC);
4949}
4950
4951bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
4952 return copyComposite(S, OpPC, Src, Dest);
4953}
4954
4955} // namespace interp
4956} // namespace clang
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
Definition Compiler.cpp:23
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
TokenType getType() const
Returns the token's type, e.g.
#define X(type, name)
Definition Value.h:97
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition PrimType.h:251
#define INT_TYPE_SWITCH(Expr, B)
Definition PrimType.h:232
#define TYPE_SWITCH(Expr, B)
Definition PrimType.h:211
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
CharUnits & getLValueOffset()
Definition APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition ASTContext.h:774
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:926
CanQualType CharTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition Builtins.h:431
Represents a base class of a C++ class.
Definition DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition Type.cpp:254
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
Represents a function declaration or definition.
Definition Decl.h:2000
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
std::optional< llvm::AllocTokenMode > AllocTokenMode
The allocation token mode.
std::optional< uint64_t > AllocTokenMax
Maximum number of allocation tokens (0 = no max), nullopt if none set (use target default).
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
Helper class for OffsetOfExpr.
Definition Expr.h:2421
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8463
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition TargetInfo.h:853
bool isBigEndian() const
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8260
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isPointerType() const
Definition TypeBase.h:8515
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2435
bool isVectorType() const
Definition TypeBase.h:8654
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
QualType getType() const
Definition Decl.h:723
Represents a GCC generic vector type.
Definition TypeBase.h:4175
unsigned getNumElements() const
Definition TypeBase.h:4190
QualType getElementType() const
Definition TypeBase.h:4189
A memory block, either on the stack or in the heap.
Definition InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition InterpBlock.h:73
bool isDynamic() const
Definition InterpBlock.h:83
Wrapper around boolean types.
Definition Boolean.h:25
static Boolean from(T Value)
Definition Boolean.h:97
Pointer into the code segment.
Definition Source.h:30
const LangOptions & getLangOpts() const
Returns the language options.
Definition Context.cpp:329
OptPrimType classify(QualType T) const
Classifies a type.
Definition Context.cpp:363
unsigned getEvalID() const
Definition Context.h:145
Manages dynamic memory allocations done during bytecode interpretation.
bool deallocate(const Expr *Source, const Block *BlockToDelete, InterpState &S)
Deallocate the given source+block combination.
std::optional< Form > getAllocationForm(const Expr *Source) const
Checks whether the allocation done at the given source is an array allocation.
Block * allocate(const Descriptor *D, unsigned EvalID, Form AllocForm)
Allocate ONE element of the given descriptor.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition Floating.h:35
void copy(const APFloat &F)
Definition Floating.h:123
llvm::FPClassTest classify() const
Definition Floating.h:154
bool isSignaling() const
Definition Floating.h:149
bool isNormal() const
Definition Floating.h:152
ComparisonCategoryResult compare(const Floating &RHS) const
Definition Floating.h:157
bool isZero() const
Definition Floating.h:144
bool isNegative() const
Definition Floating.h:143
bool isFinite() const
Definition Floating.h:151
bool isDenormal() const
Definition Floating.h:153
APFloat::fltCategory getCategory() const
Definition Floating.h:155
APFloat getAPFloat() const
Definition Floating.h:64
Base class for stack frames, shared between VM and walker.
Definition Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition IntegralAP.h:36
Frame storing local variables.
Definition InterpFrame.h:27
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition InterpFrame.h:30
SourceInfo getSource(CodePtr PC) const
Map a location to a source.
CodePtr getRetPC() const
Returns the return address of the frame.
SourceLocation getLocation(CodePtr PC) const
SourceRange getRange(CodePtr PC) const
unsigned getDepth() const
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition InterpStack.h:39
void push(Tys &&...Args)
Constructs a value in place on the top of the stack.
Definition InterpStack.h:33
void discard()
Discards the top value from the stack.
Definition InterpStack.h:50
T & peek() const
Returns a reference to the value on the top of the stack.
Definition InterpStack.h:62
Interpreter context.
Definition InterpState.h:43
Expr::EvalStatus & getEvalStatus() const override
Definition InterpState.h:67
Context & getContext() const
DynamicAllocator & getAllocator()
Context & Ctx
Interpreter Context.
Floating allocFloat(const llvm::fltSemantics &Sem)
llvm::SmallVector< const Block * > InitializingBlocks
List of blocks we're currently running either constructors or destructors for.
ASTContext & getASTContext() const override
Definition InterpState.h:70
InterpStack & Stk
Temporary stack.
const VarDecl * EvaluatingDecl
Declaration we're initializing/evaluting, if any.
InterpFrame * Current
The current frame.
T allocAP(unsigned BitWidth)
const LangOptions & getLangOpts() const
Definition InterpState.h:71
StdAllocatorCaller getStdAllocatorCaller(StringRef Name) const
Program & P
Reference to the module containing all bytecode.
PrimType value_or(PrimType PT) const
Definition PrimType.h:68
A pointer to a memory block, live or dead.
Definition Pointer.h:91
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition Pointer.h:188
bool isInitialized() const
Checks if an object was initialized.
Definition Pointer.cpp:440
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition Pointer.h:156
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition Pointer.h:547
int64_t getIndex() const
Returns the index into an array.
Definition Pointer.h:612
bool isActive() const
Checks if the object is active.
Definition Pointer.h:536
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition Pointer.h:173
T & deref() const
Dereferences the pointer, if it's live.
Definition Pointer.h:663
unsigned getNumElems() const
Returns the number of elements.
Definition Pointer.h:596
Pointer getArray() const
Returns the parent array.
Definition Pointer.h:316
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition Pointer.h:415
void activate() const
Activats a field.
Definition Pointer.cpp:576
bool isIntegralPointer() const
Definition Pointer.h:469
QualType getType() const
Returns the type of the innermost field.
Definition Pointer.h:336
bool isArrayElement() const
Checks if the pointer points to an array.
Definition Pointer.h:421
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition Pointer.cpp:545
bool isLive() const
Checks if the pointer is live.
Definition Pointer.h:268
bool inArray() const
Checks if the innermost field is an array.
Definition Pointer.h:397
T & elem(unsigned I) const
Dereferences the element at index I.
Definition Pointer.h:679
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition Pointer.h:307
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition Pointer.cpp:427
bool isZero() const
Checks if the pointer is null.
Definition Pointer.h:254
bool isRoot() const
Pointer points directly to a block.
Definition Pointer.h:437
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition Pointer.h:282
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition Pointer.cpp:652
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition Pointer.cpp:171
bool isOnePastEnd() const
Checks if the index is one past end.
Definition Pointer.h:629
uint64_t getIntegerRepresentation() const
Definition Pointer.h:143
const FieldDecl * getField() const
Returns the field information.
Definition Pointer.h:481
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition Pointer.h:221
bool isBlockPointer() const
Definition Pointer.h:468
const Block * block() const
Definition Pointer.h:602
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition Pointer.h:326
bool isVirtualBaseClass() const
Definition Pointer.h:543
bool isBaseClass() const
Checks if a structure is a base class.
Definition Pointer.h:542
size_t elemSize() const
Returns the element size of the innermost field.
Definition Pointer.h:358
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition Pointer.h:444
Lifetime getLifetime() const
Definition Pointer.h:724
void initialize() const
Initializes a field.
Definition Pointer.cpp:493
bool isField() const
Checks if the item is a field in an object.
Definition Pointer.h:274
const Record * getRecord() const
Returns the record descriptor of a class.
Definition Pointer.h:474
Descriptor * createDescriptor(const DeclTy &D, PrimType T, const Type *SourceTy=nullptr, Descriptor::MetadataSize MDSize=std::nullopt, bool IsConst=false, bool IsTemporary=false, bool IsMutable=false, bool IsVolatile=false)
Creates a descriptor for a primitive type.
Definition Program.h:119
Structure/Class descriptor.
Definition Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition Record.cpp:47
llvm::iterator_range< const_base_iter > bases() const
Definition Record.h:92
unsigned getNumFields() const
Definition Record.h:88
llvm::iterator_range< const_field_iter > fields() const
Definition Record.h:84
Describes the statement/declaration an opcode was generated from.
Definition Source.h:73
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId)
Add a note to a prior diagnostic.
Definition State.cpp:63
DiagnosticBuilder report(SourceLocation Loc, diag::kind DiagId)
Directly reports a diagnostic message.
Definition State.cpp:74
OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation could not be folded (FF => FoldFailure)
Definition State.cpp:21
OptionalDiagnostic CCEDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation does not produce a C++11 core constant expression.
Definition State.cpp:42
bool checkingPotentialConstantExpression() const
Are we checking whether the expression is a potential constant expression?
Definition State.h:99
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
static bool isNoopBuiltin(unsigned ID)
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shuffle_generic(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< std::pair< unsigned, int >(unsigned, unsigned)> GetSourceIndex)
static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_triop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition Interp.cpp:1114
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static llvm::RoundingMode getRoundingMode(FPOptions FPO)
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_ia32_pshufb(InterpState &S, CodePtr OpPC, const CallExpr *Call)
bool Call(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize)
Definition Interp.cpp:1561
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_blend(InterpState &S, CodePtr OpPC, const CallExpr *Call)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}...
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition Interp.cpp:594
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool Activate(InterpState &S, CodePtr OpPC)
Definition Interp.h:1963
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_x86_byteshift(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID, llvm::function_ref< APInt(const Pointer &, unsigned Lane, unsigned I, unsigned Shift)> Fn)
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition Interp.cpp:793
static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID, bool IsUnsigned)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_ext(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_test_op(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< bool(const APInt &A, const APInt &B)> Fn)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &IntResult)
Interpret an offsetof operation.
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition Interp.cpp:519
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
llvm::APFloat APFloat
Definition Floating.h:27
static void discard(InterpStack &Stk, PrimType T)
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition Interp.cpp:414
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
llvm::APInt APInt
Definition FixedPoint.h:19
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate=false)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_x86_extract_vector_masked(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
PrimType
Enumeration of the primitive types of the VM.
Definition PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B, bool IsUnsigned)
static bool interp__builtin_ia32_pshuf(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool IsShufHW)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition Interp.cpp:1165
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, llvm::function_ref< APInt(const APSInt &)> PackFn)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition Interp.cpp:406
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool Error(InterpState &S, CodePtr OpPC)
Do nothing and just abort execution.
Definition Interp.h:3286
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp_builtin_horizontal_fp_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_elementwise_triop_fp(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
size_t primSize(PrimType Type)
Returns the size of a primitive type in bytes.
Definition PrimType.cpp:23
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_elementwise_int_unaryop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &)> Fn)
constexpr bool isIntegralType(PrimType T)
Definition PrimType.h:128
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_builtin_horizontal_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
llvm::APSInt APSInt
Definition FixedPoint.h:20
static QualType getElemType(const Pointer &P)
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool MaskZ)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ AK_Read
Definition State.h:27
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
SmallVectorImpl< PartialDiagnosticAt > * Diag
Diag - If this is non-null, it will be filled in with a stack of notes indicating why evaluation fail...
Definition Expr.h:633
Track what bits have been initialized to known values and which ones have indeterminate value.
std::unique_ptr< std::byte[]> Data
A quantity in bits.
A quantity in bytes.
size_t getQuantity() const
Describes a memory block created by an allocation site.
Definition Descriptor.h:122
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition Descriptor.h:263
QualType getElemQualType() const
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition Descriptor.h:148
QualType getType() const
const Decl * asDecl() const
Definition Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition Descriptor.h:144
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition Descriptor.h:254
const VarDecl * asVarDecl() const
Definition Descriptor.h:218
PrimType getPrimType() const
Definition Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition Descriptor.h:153
const Expr * asExpr() const
Definition Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition Descriptor.h:266
Mapping from primitive types to their representation.
Definition PrimType.h:138